repo_name
stringlengths 4
116
| path
stringlengths 4
379
| size
stringlengths 1
7
| content
stringlengths 3
1.05M
| license
stringclasses 15
values |
---|---|---|---|---|
ksa46/work | library/Bisna/Application/Resource/Doctrine.php | 3109 | <?php
/**
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* This software consists of voluntary contributions made by many individuals
* and is licensed under the LGPL. For more information, see
* <http://www.doctrine-project.org>.
*/
// Zend Framework cannot deal with Resources using namespaces
//namespace Bisna\Application\Resource;
use Bisna\Application\Container;
/**
* Zend Application Resource Doctrine class
*
* @license http://www.opensource.org/licenses/lgpl-license.php LGPL
* @link www.doctrine-project.org
*
* @author Guilherme Blanco <[email protected]>
*/
class Bisna_Application_Resource_Doctrine extends \Zend_Application_Resource_ResourceAbstract
{
/**
* Initializes Doctrine Context.
*
* @return Core\Application\Container\DoctrineContainer
*/
public function init()
{
$config = $this->getOptions();
// Bootstrapping Doctrine autoloaders
$this->registerAutoloaders($config);
// Starting Doctrine container
$container = new Container\DoctrineContainer($config);
// Add to Zend Registry
\Zend_Registry::set('doctrine', $container);
\Zend_Registry::set('em', $container->getEntityManager());
\Zend_Registry::set('cache', $container->getCacheInstance());
return $container;
}
/**
* Register Doctrine autoloaders
*
* @param array Doctrine global configuration
*/
private function registerAutoloaders(array $config = array())
{
$autoloader = \Zend_Loader_Autoloader::getInstance();
$doctrineIncludePath = isset($config['includePath'])
? $config['includePath'] : APPLICATION_PATH . '/../library/Doctrine';
require_once $doctrineIncludePath . '/Common/ClassLoader.php';
$symfonyAutoloader = new \Doctrine\Common\ClassLoader('Symfony');
$autoloader->pushAutoloader(array($symfonyAutoloader, 'loadClass'), 'Symfony');
$doctrineExtensionsAutoloader = new \Doctrine\Common\ClassLoader('DoctrineExtensions');
$autoloader->pushAutoloader(array($doctrineExtensionsAutoloader, 'loadClass'), 'DoctrineExtensions');
$doctrineAutoloader = new \Doctrine\Common\ClassLoader('Doctrine');
$autoloader->pushAutoloader(array($doctrineAutoloader, 'loadClass'), 'Doctrine');
}
}
| unlicense |
enj/origin | vendor/github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/resources/mgmt/features/models.go | 2807 | // +build go1.9
// Copyright 2019 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This code was auto-generated by:
// github.com/Azure/azure-sdk-for-go/tools/profileBuilder
package features
import (
"context"
original "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2015-12-01/features"
)
const (
DefaultBaseURI = original.DefaultBaseURI
)
type BaseClient = original.BaseClient
type Client = original.Client
type Operation = original.Operation
type OperationDisplay = original.OperationDisplay
type OperationListResult = original.OperationListResult
type OperationListResultIterator = original.OperationListResultIterator
type OperationListResultPage = original.OperationListResultPage
type OperationsListResult = original.OperationsListResult
type OperationsListResultIterator = original.OperationsListResultIterator
type OperationsListResultPage = original.OperationsListResultPage
type Properties = original.Properties
type Result = original.Result
func New(subscriptionID string) BaseClient {
return original.New(subscriptionID)
}
func NewClient(subscriptionID string) Client {
return original.NewClient(subscriptionID)
}
func NewClientWithBaseURI(baseURI string, subscriptionID string) Client {
return original.NewClientWithBaseURI(baseURI, subscriptionID)
}
func NewOperationListResultIterator(page OperationListResultPage) OperationListResultIterator {
return original.NewOperationListResultIterator(page)
}
func NewOperationListResultPage(getNextPage func(context.Context, OperationListResult) (OperationListResult, error)) OperationListResultPage {
return original.NewOperationListResultPage(getNextPage)
}
func NewOperationsListResultIterator(page OperationsListResultPage) OperationsListResultIterator {
return original.NewOperationsListResultIterator(page)
}
func NewOperationsListResultPage(getNextPage func(context.Context, OperationsListResult) (OperationsListResult, error)) OperationsListResultPage {
return original.NewOperationsListResultPage(getNextPage)
}
func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient {
return original.NewWithBaseURI(baseURI, subscriptionID)
}
func UserAgent() string {
return original.UserAgent() + " profiles/2017-03-09"
}
func Version() string {
return original.Version()
}
| apache-2.0 |
joelschuster/fuse | components/examples/camel-example-sap/src/main/java/org/fusesource/sap/example/bean/FlightCustomerInfo.java | 3065 | /**
* Copyright 2013 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version
* 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package org.fusesource.sap.example.bean;
/**
* Bean containing Flight Customer Information
*
* @author William Collins <[email protected]>
*
*/
public class FlightCustomerInfo {
private String customerNumber;
private String formOfAddress;
private String name;
private String street;
private String poBox;
private String city;
private String region;
private String postalCode;
private String country;
private String countryIso;
private String phone;
private String email;
public String getCustomerNumber() {
return customerNumber;
}
public void setCustomerNumber(String customerNumber) {
this.customerNumber = customerNumber;
}
public String getFormOfAddress() {
return formOfAddress;
}
public void setFormOfAddress(String formOfAddress) {
this.formOfAddress = formOfAddress;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getStreet() {
return street;
}
public void setStreet(String street) {
this.street = street;
}
public String getPoBox() {
return poBox;
}
public void setPoBox(String poBox) {
this.poBox = poBox;
}
public String getCity() {
return city;
}
public void setCity(String city) {
this.city = city;
}
public String getRegion() {
return region;
}
public void setRegion(String region) {
this.region = region;
}
public String getPostalCode() {
return postalCode;
}
public void setPostalCode(String postalCode) {
this.postalCode = postalCode;
}
public String getCountry() {
return country;
}
public void setCountry(String country) {
this.country = country;
}
public String getCountryIso() {
return countryIso;
}
public void setCountryIso(String countryIso) {
this.countryIso = countryIso;
}
public String getPhone() {
return phone;
}
public void setPhone(String phone) {
this.phone = phone;
}
public String getEmail() {
return email;
}
public void setEmail(String email) {
this.email = email;
}
@Override
public String toString() {
return "FlightCustomerInfo [customerNumber=" + customerNumber + ", formOfAddress=" + formOfAddress + ", name=" + name + ", street=" + street
+ ", poBox=" + poBox + ", city=" + city + ", region=" + region + ", postalCode=" + postalCode + ", country=" + country + ", countryIso="
+ countryIso + ", phone=" + phone + ", email=" + email + "]";
}
}
| apache-2.0 |
christophd/camel | components/camel-xmpp/src/test/java/org/apache/camel/component/xmpp/XmppTestUtil.java | 2359 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.xmpp;
import java.net.InetAddress;
import java.security.KeyStore;
import javax.net.ssl.SSLContext;
import javax.net.ssl.TrustManagerFactory;
import org.apache.camel.spi.Registry;
import org.apache.camel.util.ObjectHelper;
import org.jivesoftware.smack.ConnectionConfiguration;
import org.jivesoftware.smack.tcp.XMPPTCPConnectionConfiguration;
import org.jxmpp.jid.impl.JidCreate;
public final class XmppTestUtil {
private XmppTestUtil() {
}
public static void bindSSLContextTo(Registry registry, String hostAddress, int port) throws Exception {
KeyStore keyStore = KeyStore.getInstance("JKS");
keyStore.load(ObjectHelper.loadResourceAsStream("bogus_mina_tls.cert"), "boguspw".toCharArray());
TrustManagerFactory trustManagerFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
trustManagerFactory.init(keyStore);
SSLContext sslContext = SSLContext.getInstance("TLS");
sslContext.init(null, trustManagerFactory.getTrustManagers(), null);
ConnectionConfiguration connectionConfig = XMPPTCPConnectionConfiguration.builder()
.setXmppDomain(JidCreate.domainBareFrom("apache.camel"))
.setHostAddress(InetAddress.getByName(hostAddress))
.setPort(port)
.setCustomSSLContext(sslContext)
.setHostnameVerifier((hostname, session) -> true)
.build();
registry.bind("customConnectionConfig", connectionConfig);
}
}
| apache-2.0 |
dlwh/breeze | math/src/main/scala/breeze/linalg/NumericOps.scala | 13690 | package breeze.linalg
/*
Copyright 2012 David Hall
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import operators._
import support.{CanTranspose, CanSlice2}
import breeze.generic.UFunc
import breeze.storage.Zero
import scala.reflect.ClassTag
trait ImmutableNumericOps[+This] extends Any {
def repr: This
// Immutable
/** Element-wise sum of this and b. */
final def :+[TT >: This, B, That](b: B)(implicit op: OpAdd.Impl2[TT, B, That]) = op(repr, b)
/** Element-wise product of this and b. */
final def :*[TT >: This, B, That](b: B)(implicit op: OpMulScalar.Impl2[TT, B, That]) = op(repr, b)
/** Element-wise equality comparator of this and b. */
final def :==[TT >: This, B, That](b: B)(implicit op: OpEq.Impl2[TT, B, That]) = op(repr, b)
/** Element-wise inequality comparator of this and b. */
final def :!=[TT >: This, B, That](b: B)(implicit op: OpNe.Impl2[TT, B, That]) = op(repr, b)
/*
* Ring Element Ops
*/
// Immutable
final def unary_-[TT >: This, That](implicit op: OpNeg.Impl[TT, That]) = op(repr)
/** Element-wise difference of this and b. */
final def :-[TT >: This, B, That](b: B)(implicit op: OpSub.Impl2[TT, B, That]) = op(repr, b)
/** Alias for :-(b) for all b. */
final def -[TT >: This, B, That](b: B)(implicit op: OpSub.Impl2[TT, B, That]) = {
op(repr, b)
}
/** Element-wise modulo of this and b. */
final def :%[TT >: This, B, That](b: B)(implicit op: OpMod.Impl2[TT, B, That]) = op(repr, b)
/** Alias for :%(b) when b is a scalar. */
final def %[TT >: This, B, That](b: B)(implicit op: OpMod.Impl2[TT, B, That]) = {
op(repr, b)
}
/*
* Field Element Ops
*/
// Immutable
/** Element-wise quotient of this and b. */
final def :/[TT >: This, B, That](b: B)(implicit op: OpDiv.Impl2[TT, B, That]) = op(repr, b)
/** Alias for :/(b) when b is a scalar. */
final def /[TT >: This, B, That](b: B)(implicit op: OpDiv.Impl2[TT, B, That]) = {
op(repr, b)
}
/** Element-wise exponentiation of this and b. */
final def :^[TT >: This, B, That](b: B)(implicit op: OpPow.Impl2[TT, B, That]) = op(repr, b)
/** Represents the "natural" norm of this vector, for types that don't support arbitrary norms */
@deprecated("Use norm(XXX) instead of XXX.norm", "0.9")
final def norm[TT >: This, R]()(implicit op: breeze.linalg.norm.Impl[TT, R]): R = {
op(repr)
}
/** Represents the norm of this vector */
@deprecated("Use norm(XXX) instead of XXX.norm", "0.9")
final def norm[TT >: This, B, R](b: B)(implicit op: breeze.linalg.norm.Impl2[TT, B, R]): R = {
op(repr, b)
}
/** Inner product of this and b. */
final def dot[TT >: This, B, BB >: B, That](b: B)(implicit op: OpMulInner.Impl2[TT, BB, That]) = op(repr, b)
/*
* Logical Ops
*/
final def unary_ = op(repr)
/** Element-wise logical "and" operator -- returns true if corresponding elements are non-zero. */
final def :&[TT >: This, B, That](b: B)(implicit op: OpAnd.Impl2[TT, B, That]) = op(repr, b)
/** Element-wise logical "or" operator -- returns true if either element is non-zero. */
final def :|[TT >: This, B, That](b: B)(implicit op: OpOr.Impl2[TT, B, That]) = op(repr, b)
/** Element-wise logical "xor" operator -- returns true if only one of the corresponding elements is non-zero. */
final def :^^[TT >: This, B, That](b: B)(implicit op: OpXor.Impl2[TT, B, That]) = op(repr, b)
/** Alias for :&&(b) for all b. */
final def &[TT >: This, B, That](b: B)(implicit op: OpAnd.Impl2[TT, B, That]) = {
op(repr, b)
}
/** Alias for :||(b) for all b. */
final def |[TT >: This, B, That](b: B)(implicit op: OpOr.Impl2[TT, B, That]) = {
op(repr, b)
}
/** Alias for :^^(b) for all b. */
final def ^^[TT >: This, B, That](b: B)(implicit op: OpXor.Impl2[TT, B, That]): That = {
op(repr, b)
}
/*
* Matrix-y ops
*/
/** Matrix multiplication (and scalar multiplication that follows standard order of operations) */
final def *[TT >: This, B, That](b: B)(implicit op: OpMulMatrix.Impl2[TT, B, That]) = {
op(repr, b)
}
/** A transposed view of this object. */
final def t[TT >: This, That](implicit op: CanTranspose[TT, That]) =
op.apply(repr)
/** Shaped solve of this by b. */
def \[TT >: This, B, That](b: B)(implicit op: OpSolveMatrixBy.Impl2[TT, B, That]) =
op.apply(repr, b)
/** A transposed view of this object, followed by a slice. Sadly frequently necessary. */
final def t[TT >: This, That, Slice1, Slice2, Result](a: Slice1,
b: Slice2)
(implicit op: CanTranspose[TT, That],
canSlice: CanSlice2[That, Slice1, Slice2, Result]): Result =
canSlice(op.apply(repr), a, b)
}
/**
* In some sense, this is the real root of the linalg hierarchy. It provides
* methods for doing operations on a Tensor-like thing. All methods farm out to some implicit or another.
* We use this when we don't care about the index into the Tensor, or if we don't really have an index.
* @author dlwh
*/
trait NumericOps[+This] extends ImmutableNumericOps[This] {
// We move this here because of ambiguities with any2stringadd
/** Alias for :+(b) for all b. */
final def +[TT >: This, B, That](b: B)(implicit op: OpAdd.Impl2[TT, B, That]) = {
op(repr, b)
}
/*
* Semiring Element Ops
*/
// Mutable
/** Mutates this by element-wise assignment of b into this. */
final def :=[TT >: This, B](b: B)(implicit op: OpSet.InPlaceImpl2[TT, B]): This = {
op(repr, b)
repr
}
/** Mutates this by element-wise addition of b into this. */
final def :+=[TT >: This, B](b: B)(implicit op: OpAdd.InPlaceImpl2[TT, B]): This = {
op(repr, b)
repr
}
/** Mutates this by element-wise multiplication of b into this. */
final def :*=[TT >: This, B](b: B)(implicit op: OpMulScalar.InPlaceImpl2[TT, B]): This = {
op(repr, b)
repr
}
/** Alias for :+=(b) for all b. */
final def +=[TT >: This, B](b: B)(implicit op: OpAdd.InPlaceImpl2[TT, B]) =
this.:+=[TT, B](b)
/** Alias for :*=(b) when b is a scalar. */
final def *=[TT >: This, B](b: B)(implicit op: OpMulScalar.InPlaceImpl2[TT, B]) =
this.:*=[TT, B](b)
// Mutable
/** Mutates this by element-wise subtraction of b from this */
final def :-=[TT >: This, B](b: B)(implicit op: OpSub.InPlaceImpl2[TT, B]): This = {
op(repr, b)
repr
}
/** Mutates this by element-wise modulo of b into this. */
final def :%=[TT >: This, B](b: B)(implicit op: OpMod.InPlaceImpl2[TT, B]): This = {
op(repr, b)
repr
}
/** Alias for :%=(b) when b is a scalar. */
final def %=[TT >: This, B](b: B)(implicit op: OpMod.InPlaceImpl2[TT, B]) =
this.:%=[TT, B](b)
/** Alias for :-=(b) for all b. */
final def -=[TT >: This, B](b: B)(implicit op: OpSub.InPlaceImpl2[TT, B]) =
this.:-=[TT, B](b)
// Mutable
/** Mutates this by element-wise division of b into this */
final def :/=[TT >: This, B](b: B)(implicit op: OpDiv.InPlaceImpl2[TT, B]): This = {
op(repr, b)
repr
}
/** Mutates this by element-wise exponentiation of this by b. */
final def :^=[TT >: This, B](b: B)(implicit op: OpPow.InPlaceImpl2[TT, B]): This = {
op(repr, b)
repr
}
/** Alias for :/=(b) when b is a scalar. */
final def /=[TT >: This, B](b: B)(implicit op: OpDiv.InPlaceImpl2[TT, B]) =
this.:/=[TT, B](b)
/*
* Ordering Ops
*/
/** Element-wise less=than comparator of this and b. */
final def :<[TT >: This, B, That](b: B)(implicit op: OpLT.Impl2[TT, B, That]) = op(repr, b)
/** Element-wise less-than-or-equal-to comparator of this and b. */
final def :<=[TT >: This, B, That](b: B)(implicit op: OpLTE.Impl2[TT, B, That]) = op(repr, b)
/** Element-wise greater-than comparator of this and b. */
final def :>[TT >: This, B, That](b: B)(implicit op: OpGT.Impl2[TT, B, That]) = op(repr, b)
/** Element-wise greater-than-or-equal-to comparator of this and b. */
final def :>=[TT >: This, B, That](b: B)(implicit op: OpGTE.Impl2[TT, B, That]) = op(repr, b)
/** Mutates this by element-wise and of this and b. */
final def :&=[TT >: This, B](b: B)(implicit op: OpAnd.InPlaceImpl2[TT, B]): This = {
op(repr, b)
repr
}
/** Mutates this by element-wise or of this and b. */
final def :|=[TT >: This, B](b: B)(implicit op: OpOr.InPlaceImpl2[TT, B]): This = {
op(repr, b)
repr
}
/** Mutates this by element-wise xor of this and b. */
final def :^^=[TT >: This, B](b: B)(implicit op: OpXor.InPlaceImpl2[TT, B]): This = {
op(repr, b)
repr
}
/** Mutates this by element-wise and of this and b. */
final def &=[TT >: This, B](b: B)(implicit op: OpAnd.InPlaceImpl2[TT, B]): This = {
op(repr, b)
repr
}
/** Mutates this by element-wise or of this and b. */
final def |=[TT >: This, B](b: B)(implicit op: OpOr.InPlaceImpl2[TT, B]): This = {
op(repr, b)
repr
}
/** Mutates this by element-wise xor of this and b. */
final def ^^=[TT >: This, B](b: B)(implicit op: OpXor.InPlaceImpl2[TT, B]): This = {
op(repr, b)
repr
}
}
object NumericOps {
/*
implicit class ScalarsAreNumericOps[@specialized(Int, Double, Long, Float) S](x: S) extends NumericOps[S] {
def repr: S = x
}
*/
/**
* If you import this object's members, you can treat Arrays as DenseVectors.
*/
object Arrays extends ArraysLowPriority {
implicit class ArrayIsNumericOps[V](arr: Array[V]) extends NumericOps[Array[V]] {
def repr = arr
}
// TODO these two really shouldn't be necessary, but there's interference(?) from any2StringAdd, or something.
implicit def binaryOpFromDVOp2Add[V](implicit op: OpAdd.Impl2[DenseVector[V], DenseVector[V], DenseVector[V]]): OpAdd.Impl2[Array[V], Array[V], Array[V]] = {
new OpAdd.Impl2[Array[V], Array[V], Array[V]] {
def apply(a: Array[V], b: Array[V]): Array[V] = {
val r = op(new DenseVector(a), new DenseVector[V](b))
if (r.offset != 0 || r.stride != 1) {
r.copy.data
} else {
r.data
}
}
}
}
implicit def binaryOpAddFromDVUOpAdd2[V](implicit op: OpAdd.Impl2[DenseVector[V], V, DenseVector[V]]) = {
new OpAdd.Impl2[Array[V], V, Array[V]] {
def apply(a: Array[V], b: V): Array[V] = {
val r = op(new DenseVector(a), b)
if (r.offset != 0 || r.stride != 1) {
r.copy.data
} else {
r.data
}
}
}
}
implicit def binaryOpFromDVOp2[V, Op <: OpType](implicit op: UFunc.UImpl2[Op, DenseVector[V], DenseVector[V], DenseVector[V]]): UFunc.UImpl2[Op, Array[V], Array[V], Array[V]] = {
new UFunc.UImpl2[Op, Array[V], Array[V], Array[V]] {
def apply(a: Array[V], b: Array[V]): Array[V] = {
val r = op(new DenseVector(a), new DenseVector[V](b))
if (r.offset != 0 || r.stride != 1) {
r.copy.data
} else {
r.data
}
}
}
}
implicit def binaryUpdateOpFromDVDVOp[V, Op <: OpType](implicit op: UFunc.InPlaceImpl2[Op, DenseVector[V], DenseVector[V]]) = {
new UFunc.InPlaceImpl2[Op, Array[V], Array[V]] {
def apply(a: Array[V], b: Array[V]) {
op(new DenseVector(a), new DenseVector(b))
}
}
}
implicit def binaryOpFromDVUOp2[V, Op <: OpType](implicit op: UFunc.UImpl2[Op, DenseVector[V], V, DenseVector[V]]) = {
new UFunc.UImpl2[Op, Array[V], V, Array[V]] {
def apply(a: Array[V], b: V): Array[V] = {
val r = op(new DenseVector(a), b)
if (r.offset != 0 || r.stride != 1) {
r.copy.data
} else {
r.data
}
}
}
}
}
sealed trait ArraysLowPriority {
implicit def binaryUpdateOpFromDVOp[V, Other, Op, U](implicit op: UFunc.InPlaceImpl2[Op, DenseVector[V], Other], man: ClassTag[U]) = {
new UFunc.InPlaceImpl2[Op, Array[V], Other] {
def apply(a: Array[V], b: Other) {
op(new DenseVector(a), b)
}
}
}
implicit def binaryOpFromDVOp[V, Other, Op <: OpType, U](implicit op: UFunc.UImpl2[Op, DenseVector[V], Other, DenseVector[U]],
man: ClassTag[U],
zero: Zero[U]) = {
new UFunc.UImpl2[Op, Array[V], Other, Array[U]] {
def apply(a: Array[V], b: Other): Array[U] = {
val r = op(new DenseVector(a), b)
if (r.offset != 0 || r.stride != 1) {
val z = DenseVector.zeros[U](r.length)
z := r
z.data
} else {
r.data
}
}
}
}
}
implicit def binaryUpdateOpFromDVVOp[V, Op, U](implicit op: UFunc.InPlaceImpl2[Op, DenseVector[V], U], man: ClassTag[U]) = {
new UFunc.InPlaceImpl2[Op, Array[V], U] {
def apply(a: Array[V], b: U) {
op(new DenseVector(a), b)
}
}
}
}
| apache-2.0 |
mindthegab/SFE-Minuet-DesktopClient | minuet/CefGlue/Enums/CefMenuItemType.cs | 357 | //
// This file manually written from cef/include/internal/cef_types.h.
// C API name: cef_menu_item_type_t.
//
namespace Xilium.CefGlue
{
/// <summary>
/// Supported menu item types.
/// </summary>
public enum CefMenuItemType
{
None,
Command,
Check,
Radio,
Separator,
SubMenu,
}
}
| apache-2.0 |
nevenr/jolokia | src/site/resources/js/jolokia.js | 31506 | /*
* Copyright 2009-2012 Roland Huss
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
j * You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* =================================
* Jolokia Javascript Client Library
* =================================
*
* Requires jquery.js and json2.js
* (if no native JSON.stringify() support is available)
*/
(function() {
var _jolokiaConstructorFunc = function ($) {
// Default paramerters for GET and POST requests
var DEFAULT_CLIENT_PARAMS = {
type:"POST",
jsonp:false
};
var GET_AJAX_PARAMS = {
type:"GET"
};
var POST_AJAX_PARAMS = {
type:"POST",
processData:false,
dataType:"json",
contentType:"text/json"
};
// Processing parameters which are added to the
// URL as query parameters if given as options
var PROCESSING_PARAMS = ["maxDepth", "maxCollectionSize", "maxObjects", "ignoreErrors", "canonicalNaming",
"serializeException", "includeStackTrace", "ifModifiedSince"];
/**
* Constructor for creating a client to the Jolokia agent.
*
* An object containing the default parameters can be provided as argument. For the possible parameters
* see {@link #request()}.
*
* @param param either a string in which case it is used as the URL to the agent or
* an object with the default parameters as key-value pairs
*/
function Jolokia(param) {
// If called without 'new', we are constructing an object
// nevertheless
if (!(this instanceof arguments.callee)) {
return new Jolokia(param);
}
// Jolokia Javascript Client version
this.CLIENT_VERSION = "1.2.2";
// Registered requests for fetching periodically
var jobs = [];
// Options used for every request
var agentOptions = {};
// State of the scheduler
var pollerIsRunning = false;
// Allow a single URL parameter as well
if (typeof param === "string") {
param = {url:param};
}
$.extend(agentOptions, DEFAULT_CLIENT_PARAMS, param);
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// Public methods
/**
* The request method using one or more JSON requests and sending it to the agent. Beside the
* request a bunch of options can be given, which are merged with the options provided
* at the constructor (where the options given here take precedence).
*
* Known options are:
*
* <dl>
* <dt>url</dt>
* <dd>Agent URL, which is mandatory</dd>
* <dt>method</dt>
* <dd>
* Either "post" or "get" depending on the desired HTTP method (case does not matter).
* Please note, that bulk requests are not possible with "get". On the other
* hand, JSONP requests are not possible with "post" (which obviously implies
* that bulk request cannot be used with JSONP requests). Also, when using a
* <code>read</code> type request for multiple attributes, this also can
* only be sent as "post" requests. If not given, a HTTP method is determined
* dyamically. If a method is selected which doesn't fit to the request, an error
* is raised.
* </dd>
* <dt>jsonp</dt>
* <dd>
* Whether the request should be sent via JSONP (a technique for allowing cross
* domain request circumventing the infamous "same-origin-policy"). This can be
* used only with HTTP "get" requests.
* </dd>
* <dt>success</dt>
* <dd>
* Callback function which is called for a successful request. The callback receives
* the response as single argument. If no <code>success</code> callback is given, then
* the request is performed synchronously and gives back the response as return
* value.
* </dd>
* <dt>error</dt>
* <dd>
* Callback in case a Jolokia error occurs. A Jolokia error is one, in which the HTTP request
* suceeded with a status code of 200, but the response object contains a status other
* than OK (200) which happens if the request JMX operation fails. This callback receives
* the full Jolokia response object (with a key <code>error</code> set). If no error callback
* is given, but an asynchronous operation is performed, the error response is printed
* to the Javascript console by default.
* </dd>
* <dt>ajaxError</dt>
* <dd>
* Global error callback called when the Ajax request itself failed. It obtains the same arguments
* as the error callback given for <code>jQuery.ajax()</code>, i.e. the <code>XmlHttpResonse</code>,
* a text status and an error thrown. Refer to the jQuery documentation for more information about
* this error handler.
* </dd>
* <dt>username</dt>
* <dd>A username used for HTTP authentication</dd>
* <dt>password</dt>
* <dd>A password used for HTTP authentication</dd>
* <dt>timeout</dt>
* <dd>Timeout for the HTTP request</dd>
* <dt>maxDepth</dt>
* <dd>Maximum traversal depth for serialization of complex return values</dd>
* <dt>maxCollectionSize</dt>
* <dd>
* Maximum size of collections returned during serialization.
* If larger, the collection is returned truncated.
* </dd>
* <dt>maxObjects</dt>
* <dd>
* Maximum number of objects contained in the response.
* </dd>
* <dt>ignoreErrors</dt>
* <dd>
* If set to true, errors during JMX operations and JSON serialization
* are ignored. Otherwise if a single deserialization fails, the whole request
* returns with an error. This works only for certain operations like pattern reads..
* </dd>
* </dl>
*
* @param request the request to send
* @param params parameters used for sending the request
* @return the response object if called synchronously or nothing if called for asynchronous operation.
*/
this.request = function (request, params) {
var opts = $.extend({}, agentOptions, params);
assertNotNull(opts.url, "No URL given");
var ajaxParams = {};
// Copy over direct params for the jQuery ajax call
$.each(["username", "password", "timeout"], function (i, key) {
if (opts[key]) {
ajaxParams[key] = opts[key];
}
});
if (extractMethod(request, opts) === "post") {
$.extend(ajaxParams, POST_AJAX_PARAMS);
ajaxParams.data = JSON.stringify(request);
ajaxParams.url = ensureTrailingSlash(opts.url);
} else {
$.extend(ajaxParams, GET_AJAX_PARAMS);
ajaxParams.dataType = opts.jsonp ? "jsonp" : "json";
ajaxParams.url = opts.url + "/" + constructGetUrlPath(request);
}
// Add processing parameters as query parameters
ajaxParams.url = addProcessingParameters(ajaxParams.url, opts);
// Global error handler
if (opts.ajaxError) {
ajaxParams.error = opts.ajaxError;
}
// Dispatch Callbacks to error and success handlers
if (opts.success) {
var success_callback = constructCallbackDispatcher(opts.success);
var error_callback = constructCallbackDispatcher(opts.error);
ajaxParams.success = function (data) {
var responses = $.isArray(data) ? data : [ data ];
for (var idx = 0; idx < responses.length; idx++) {
var resp = responses[idx];
if (Jolokia.isError(resp)) {
error_callback(resp, idx);
} else {
success_callback(resp, idx);
}
}
};
// Perform the request
$.ajax(ajaxParams);
return null;
} else {
// Synchronous operation requested (i.e. no callbacks provided)
if (opts.jsonp) {
throw Error("JSONP is not supported for synchronous requests");
}
ajaxParams.async = false;
var xhr = $.ajax(ajaxParams);
if (httpSuccess(xhr)) {
return $.parseJSON(xhr.responseText);
} else {
return null;
}
}
};
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// Scheduler related methods
/**
* Register one or more requests for periodically polling the agent along with a callback to call on receipt
* of the response.
*
* The first argument can be either an object or a function. The remaining arguments are interpreted
* as Jolokia request objects
*
* If a function is given or an object with an attribute <code>callback</code> holding a function, then
* this function is called with all responses received as argument, regardless whether the individual response
* indicates a success or error state.
*
* If the first argument is an object with two callback attributes <code>success</code> and <code>error</code>,
* these functions are called for <em>each</em> response separately, depending whether the response
* indicates success or an error state. If multiple requests have been registered along with this callback object,
* the callback is called multiple times, one for each request in the same order as the request are given.
* As second argument, the handle which is returned by this method is given and as third argument the index
* within the list of requests.
*
* If the first argument is an object, an additional 'config' attribute with processing parameters can
* be given which is used as default for the registered requests.
* Request with a 'config' section take precedence.
*
* @param callback and options specification.
* @param request, request, .... One or more requests to be registered for this single callback
* @return handle which can be used for unregistering the request again or for correlation purposes in the callbacks
*/
this.register = function() {
if (arguments.length < 2) {
throw "At a least one request must be provided";
}
var callback = arguments[0],
requests = Array.prototype.slice.call(arguments,1),
job;
if (typeof callback === 'object') {
if (callback.success && callback.error) {
job = {
success: callback.success,
error: callback.error
};
} else if (callback.callback) {
job = {
callback: callback.callback
};
} else {
throw "Either 'callback' or ('success' and 'error') callback must be provided " +
"when registering a Jolokia job";
}
job = $.extend(job,{
config: callback.config,
onlyIfModified: callback.onlyIfModified
});
} else if (typeof callback === 'function') {
// Simplest version without config possibility
job = {
success: null,
error: null,
callback: callback
};
} else {
throw "First argument must be either a callback func " +
"or an object with 'success' and 'error' attributes";
}
if (!requests) {
throw "No requests given";
}
job.requests = requests;
var idx = jobs.length;
jobs[idx] = job;
return idx;
};
/**
* Unregister a one or more request which has been registered with {@link #registerRequest}. As parameter
* the handle returned during the registration process must be given
* @param handle
*/
this.unregister = function(handle) {
if (handle < jobs.length) {
jobs[handle] = undefined;
}
};
/**
* Return an array of handles for currently registered jobs.
* @return Array of job handles or an empty array
*/
this.jobs = function() {
var ret = [],
len = jobs.length;
for (var i = 0; i < len; i++) {
if (jobs[i]) {
ret.push(i);
}
}
return ret;
};
/**
* Start the poller. The interval between two polling attempts can be optionally given or are taken from
* the parameter <code>fetchInterval</code> given at construction time. If no interval is given at all,
* 30 seconds is the default.
*
* If the poller is already running (i.e. {@link #isRunning()} is <code>true</code> then the scheduler
* is restarted, but only if the new interval differs from the currently active one.
*
* @param interval interval in milliseconds between two polling attempts
*/
this.start = function(interval) {
interval = interval || agentOptions.fetchInterval || 30000;
if (pollerIsRunning) {
if (interval === agentOptions.fetchInterval) {
// Nothing to do
return;
}
// Re-start with new interval
this.stop();
}
agentOptions.fetchInterval = interval;
this.timerId = setInterval(callJolokia(this,jobs), interval);
pollerIsRunning = true;
};
/**
* Stop the poller. If the poller is not running, no operation is performed.
*/
this.stop = function() {
if (!pollerIsRunning && this.timerId != undefined) {
return;
}
clearInterval(this.timerId);
this.timerId = null;
pollerIsRunning = false;
};
/**
* Check whether the poller is running.
* @return true if the poller is running, false otherwise.
*/
this.isRunning = function() {
return pollerIsRunning;
};
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
}
// ========================================================================
// Private Methods:
// Create a function called by a timer, which requests the registered requests
// calling the stored callback on receipt. jolokia and jobs are put into the closure
function callJolokia(jolokia,jobs) {
return function() {
var errorCbs = [],
successCbs = [],
i, j,
len = jobs.length;
var requests = [];
for (i = 0; i < len; i++) {
var job = jobs[i];
// Can happen when job has been deleted
// TODO: Can be probably optimized so that only the existing keys of jobs can be visited
if (!job) { continue; }
var reqsLen = job.requests.length;
if (job.success) {
// Success/error pair of callbacks. For multiple request,
// these callback will be called multiple times
var successCb = cbSuccessClosure(job,i);
var errorCb = cbErrorClosure(job,i);
for (j = 0; j < reqsLen; j++) {
requests.push(prepareRequest(job,j));
successCbs.push(successCb);
errorCbs.push(errorCb);
}
} else {
// Job should have a single callback (job.callback) which will be
// called once with all responses at once as an array
var callback = cbCallbackClosure(job,jolokia);
// Add callbacks which collect the responses
for (j = 0; j < reqsLen - 1; j++) {
requests.push(prepareRequest(job,j));
successCbs.push(callback.cb);
errorCbs.push(callback.cb);
}
// Add final callback which finally will call the job.callback with all
// collected responses.
requests.push(prepareRequest(job,reqsLen-1));
successCbs.push(callback.lcb);
errorCbs.push(callback.lcb);
}
}
var opts = {
// Dispatch to the build up callbacks, request by request
success: function(resp, j) {
return successCbs[j].apply(jolokia, [resp, j]);
},
error: function(resp, j) {
return errorCbs[j].apply(jolokia, [resp, j]);
}
};
return jolokia.request(requests, opts);
};
}
// Prepare a request with the proper configuration
function prepareRequest(job,idx) {
var request = job.requests[idx],
config = job.config || {},
// Add the proper ifModifiedSince parameter if already called at least once
extra = job.onlyIfModified && job.lastModified ? { ifModifiedSince: job.lastModified } : {};
request.config = $.extend({}, config, request.config, extra);
return request;
}
// Closure for a full callback which stores the responses in an (closed) array
// which the finally is feed in to the callback as array
function cbCallbackClosure(job,jolokia) {
var responses = [],
callback = job.callback,
lastModified = 0;
return {
cb : addResponse,
lcb : function(resp,j) {
addResponse(resp);
// Callback is called only if at least one non-cached response
// is obtained. Update job's timestamp internally
if (responses.length > 0) {
job.lastModified = lastModified;
callback.apply(jolokia,responses);
}
}
};
function addResponse(resp,j) {
// Only remember responses with values and remember lowest timetamp, too.
if (resp.status != 304) {
if (lastModified == 0 || resp.timestamp < lastModified ) {
lastModified = resp.timestamp;
}
responses.push(resp);
}
}
}
// Own function for creating a closure to avoid reference to mutable state in the loop
function cbErrorClosure(job, i) {
var callback = job.error;
return function(resp,j) {
// If we get a "304 - Not Modified" 'error', we do nothing
if (resp.status == 304) {
return;
}
if (callback) {
callback(resp,i,j)
}
}
}
function cbSuccessClosure(job, i) {
var callback = job.success;
return function(resp,j) {
if (callback) {
// Remember last success callback
if (job.onlyIfModified) {
job.lastModified = resp.timestamp;
}
callback(resp,i,j)
}
}
}
// Construct a callback dispatcher for appropriately dispatching
// to a single callback or within an array of callbacks
function constructCallbackDispatcher(callback) {
if (callback == null) {
return function (response) {
console.warn("Ignoring response " + JSON.stringify(response));
};
} else if (callback === "ignore") {
// Ignore the return value
return function () {
};
}
var callbackArray = $.isArray(callback) ? callback : [ callback ];
return function (response, idx) {
callbackArray[idx % callbackArray.length](response, idx);
}
}
// Extract the HTTP-Method to use and make some sanity checks if
// the method was provided as part of the options, but dont fit
// to the request given
function extractMethod(request, opts) {
var methodGiven = opts && opts.method ? opts.method.toLowerCase() : null,
method;
if (methodGiven) {
if (methodGiven === "get") {
if ($.isArray(request)) {
throw new Error("Cannot use GET with bulk requests");
}
if (request.type.toLowerCase() === "read" && $.isArray(request.attribute)) {
throw new Error("Cannot use GET for read with multiple attributes");
}
if (request.target) {
throw new Error("Cannot use GET request with proxy mode");
}
if (request.config) {
throw new Error("Cannot use GET with request specific config");
}
}
method = methodGiven;
} else {
// Determine method dynamically
method = $.isArray(request) ||
request.config ||
(request.type.toLowerCase() === "read" && $.isArray(request.attribute)) ||
request.target ?
"post" : "get";
}
if (opts.jsonp && method === "post") {
throw new Error("Can not use JSONP with POST requests");
}
return method;
}
// Add processing parameters given as request options
// to an URL as GET query parameters
function addProcessingParameters(url, opts) {
var sep = url.indexOf("?") > 0 ? "&" : "?";
$.each(PROCESSING_PARAMS, function (i, key) {
if (opts[key] != null) {
url += sep + key + "=" + opts[key];
sep = "&";
}
});
return url;
}
// ========================================================================
// GET-Request handling
// Create the URL used for a GET request
function constructGetUrlPath(request) {
var type = request.type;
assertNotNull(type, "No request type given for building a GET request");
type = type.toLowerCase();
var extractor = GET_URL_EXTRACTORS[type];
assertNotNull(extractor, "Unknown request type " + type);
var result = extractor(request);
var parts = result.parts || {};
var url = type;
$.each(parts, function (i, v) {
url += "/" + Jolokia.escape(v)
});
if (result.path) {
url += (result.path[0] == '/' ? "" : "/") + result.path;
}
return url;
}
// For POST requests it is recommended to have a trailing slash at the URL
// in order to avoid a redirect which then results in a GET request.
// See also https://bugs.eclipse.org/bugs/show_bug.cgi?id=331194#c1
// for an explanation
function ensureTrailingSlash(url) {
// Squeeze any URL to a single one, optionally adding one
return url.replace(/\/*$/, "/");
}
// Extractors used for preparing a GET request, i.e. for creating a stack
// of arguments which gets appended to create the proper access URL
// key: lowercase request type.
// The return value is an object with two properties: The 'parts' to glue together, where
// each part gets escaped and a 'path' which is appended literally
var GET_URL_EXTRACTORS = {
"read":function (request) {
return { parts:[ request.mbean, request.attribute ], path:request.path };
},
"write":function (request) {
return { parts:[request.mbean, request.attribute, valueToString(request.value)], path:request.path};
},
"exec":function (request) {
var ret = [ request.mbean, request.operation ];
if (request.arguments && request.arguments.length > 0) {
$.each(request.arguments, function (index, value) {
ret.push(valueToString(value));
});
}
return {parts:ret};
},
"version":function () {
return {};
},
"search":function (request) {
return { parts:[request.mbean]};
},
"list":function (request) {
return { path:request.path};
}
};
// Convert a value to a string for passing it to the Jolokia agent via
// a get request (write, exec). Value can be either a single object or an array
function valueToString(value) {
if (value == null) {
return "[null]";
}
if ($.isArray(value)) {
var ret = "";
for (var i = 0; i < value.length; i++) {
ret += value == null ? "[null]" : singleValueToString(value[i]);
if (i < value.length - 1) {
ret += ",";
}
}
return ret;
} else {
return singleValueToString(value);
}
}
// Single value conversion for write/exec GET requests
function singleValueToString(value) {
if (typeof value === "string" && value.length == 0) {
return "\"\"";
} else {
return value.toString();
}
}
// Check whether a synchronous request was a success or not
// Taken from jQuery 1.4
function httpSuccess(xhr) {
try {
return !xhr.status && location.protocol === "file:" ||
xhr.status >= 200 && xhr.status < 300 ||
xhr.status === 304 || xhr.status === 1223;
} catch (e) {
}
return false;
}
// ===============================================================================================
// Utility methods:
function assertNotNull(object, message) {
if (object == null) {
throw new Error(message);
}
}
// ================================================================================================
// Escape a path part, can be used as a static method outside this function too
Jolokia.prototype.escape = Jolokia.escape = function (part) {
return encodeURIComponent(part.replace(/!/g, "!!").replace(/\//g, "!/"));
};
/**
* Utility method which checks whether a response is an error or a success
* @param resp response to check
* @return true if response is an error, false otherwise
*/
Jolokia.prototype.isError = Jolokia.isError = function(resp) {
return resp.status == null || resp.status != 200;
};
// Return back exported function/constructor
return Jolokia;
};
// =====================================================================================================
// Register either as global or as AMD module
(function (root, factory) {
if (typeof define === 'function' && define.amd) {
// AMD. Register as a named module
define(["jquery"], factory);
} else {
// Browser globals
root.Jolokia = factory(root.jQuery);
}
}(this, function (jQuery) {
return _jolokiaConstructorFunc(jQuery);
}));
}());
| apache-2.0 |
modocache/swift | lib/SILOptimizer/Utils/CheckedCastBrJumpThreading.cpp | 24773 | #define DEBUG_TYPE "sil-simplify-cfg"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Allocator.h"
#include "swift/SIL/SILInstruction.h"
#include "swift/SIL/InstructionUtils.h"
#include "swift/SILOptimizer/Analysis/DominanceAnalysis.h"
#include "swift/SILOptimizer/Utils/CFG.h"
#include "swift/SILOptimizer/Utils/Local.h"
#include "swift/SILOptimizer/Utils/SILInliner.h"
using namespace swift;
namespace {
/// This is a class implementing a dominator-based jump-threading
/// for checked_cast_br [exact].
class CheckedCastBrJumpThreading {
// Basic block of the current checked_cast_br instruction.
SILBasicBlock *BB;
// Condition used by the current checked_cast_br instruction.
SILValue Condition;
SILBasicBlock *ArgBB;
// The current function to be optimized.
SILFunction *Fn;
// Dominator information to be used.
DominanceInfo *DT;
// List of predecessors.
typedef SmallVector<SILBasicBlock *, 8> PredList;
// Predecessors reached only via a path along the
// success branch of the dominating checked_cast_br.
PredList SuccessPreds;
// Predecessors reached only via a path along the
// failure branch of the dominating checked_cast_br.
PredList FailurePreds;
// All other predecessors, where the outcome of the
// checked_cast_br along the path is not known.
unsigned numUnknownPreds = 0;
// Basic blocks to be added to for reprocessing
// after jump-threading is done.
SmallVectorImpl<SILBasicBlock *> &BlocksForWorklist;
// Information for transforming a single checked_cast_br.
// This is the output of the optimization's analysis phase.
struct Edit {
// The block containing the checked_cast_br.
SILBasicBlock *CCBBlock;
// Copy of CheckedCastBrJumpThreading::SuccessPreds.
PredList SuccessPreds;
// Copy of CheckedCastBrJumpThreading::FailurePreds.
PredList FailurePreds;
// The argument of the dominating checked_cast_br's successor block.
SILValue SuccessArg;
bool InvertSuccess;
// True if CheckedCastBrJumpThreading::numUnknownPreds is not 0.
bool hasUnknownPreds;
Edit(SILBasicBlock *CCBBlock, bool InvertSuccess,
const PredList &SuccessPreds,
const PredList &FailurePreds,
bool hasUnknownPreds, SILValue SuccessArg) :
CCBBlock(CCBBlock), SuccessPreds(SuccessPreds), FailurePreds(FailurePreds),
SuccessArg(SuccessArg), InvertSuccess(InvertSuccess),
hasUnknownPreds(hasUnknownPreds) { }
void modifyCFGForUnknownPreds();
void modifyCFGForFailurePreds(Optional<BasicBlockCloner> &Cloner);
void modifyCFGForSuccessPreds(Optional<BasicBlockCloner> &Cloner);
};
// Contains an entry for each checked_cast_br to be optimized.
llvm::SmallVector<Edit *, 8> Edits;
llvm::SpecificBumpPtrAllocator<Edit> EditAllocator;
// Keeps track of what blocks we change the terminator instruction.
llvm::SmallPtrSet<SILBasicBlock *, 16> BlocksToEdit;
// Keeps track of what blocks we clone.
llvm::SmallPtrSet<SILBasicBlock *, 16> BlocksToClone;
bool areEquivalentConditionsAlongPaths(CheckedCastBranchInst *DomCCBI);
bool areEquivalentConditionsAlongSomePaths(CheckedCastBranchInst *DomCCBI,
SILValue DomCondition);
bool handleArgBBIsEntryBlock(SILBasicBlock *ArgBB,
CheckedCastBranchInst *DomCCBI);
bool checkCloningConstraints();
void classifyPredecessor(SILBasicBlock *Pred, bool SuccessDominates,
bool FailureDominates);
SILValue isArgValueEquivalentToCondition(SILValue Value,
SILBasicBlock *DomBB,
SILValue DomValue,
DominanceInfo *DT);
bool trySimplify(CheckedCastBranchInst *CCBI);
public:
CheckedCastBrJumpThreading(SILFunction *Fn, DominanceInfo *DT,
SmallVectorImpl<SILBasicBlock *> &BlocksForWorklist)
: Fn(Fn), DT(DT), BlocksForWorklist(BlocksForWorklist) { }
void optimizeFunction();
};
} // end anonymous namespace
/// Estimate the cost of inlining a given basic block.
static unsigned basicBlockInlineCost(SILBasicBlock *BB, unsigned Cutoff) {
unsigned Cost = 0;
for (auto &I : *BB) {
auto ICost = instructionInlineCost(I);
Cost += unsigned(ICost);
if (Cost > Cutoff)
return Cost;
}
return Cost;
}
/// We cannot duplicate blocks with AllocStack instructions (they need to be
/// FIFO). Other instructions can be duplicated.
static bool canDuplicateBlock(SILBasicBlock *BB) {
for (auto &I : *BB) {
if (!I.isTriviallyDuplicatable())
return false;
}
return true;
}
/// Classify a predecessor of a BB containing checked_cast_br as being
/// reachable via success or failure branches of a dominating checked_cast_br
/// or as unknown if it can be reached via success or failure branches
/// at the same time.
void CheckedCastBrJumpThreading::classifyPredecessor(
SILBasicBlock *Pred, bool SuccessDominates, bool FailureDominates) {
if (SuccessDominates == FailureDominates) {
numUnknownPreds++;
return;
}
if (SuccessDominates) {
SuccessPreds.push_back(Pred);
return;
}
assert(FailureDominates && "success and failure should be mutual exclusive");
FailurePreds.push_back(Pred);
}
/// Check if the root value for Value that comes
/// along the path from DomBB is equivalent to the
/// DomCondition.
SILValue CheckedCastBrJumpThreading::isArgValueEquivalentToCondition(
SILValue Value, SILBasicBlock *DomBB, SILValue DomValue,
DominanceInfo *DT) {
SmallPtrSet<ValueBase *, 16> SeenValues;
DomValue = stripClassCasts(DomValue);
while (true) {
Value = stripClassCasts(Value);
if (Value == DomValue)
return Value;
// We know how to propagate through BBArgs only.
auto *V = dyn_cast<SILArgument>(Value);
if (!V)
return SILValue();
// Have we visited this BB already?
if (!SeenValues.insert(Value).second)
return SILValue();
if (SeenValues.size() > 10)
return SILValue();
SmallVector<SILValue, 4> IncomingValues;
if (!V->getIncomingValues(IncomingValues) || IncomingValues.empty())
return SILValue();
ValueBase *Def = nullptr;
for (auto IncomingValue : IncomingValues) {
// Each incoming value should be either from a block
// dominated by DomBB or it should be the value used in
// condition in DomBB
Value = stripClassCasts(IncomingValue);
if (Value == DomValue)
continue;
// Values should be the same
if (!Def)
Def = Value;
if (Def != Value)
return SILValue();
if (!DT->dominates(DomBB, Value->getParentBB()))
return SILValue();
// OK, this value is a potential candidate
}
Value = IncomingValues[0];
}
}
void CheckedCastBrJumpThreading::Edit::modifyCFGForUnknownPreds() {
if (!hasUnknownPreds)
return;
// Check the FailureBB if it is a BB that contains a class_method
// referring to the same value as a condition. This pattern is typical
// for method chaining code like obj.method1().method2().etc()
auto *CCBI = cast<CheckedCastBranchInst>(CCBBlock->getTerminator());
SILInstruction *Inst = &*CCBI->getFailureBB()->begin();
if (ClassMethodInst *CMI = dyn_cast<ClassMethodInst>(Inst)) {
if (CMI->getOperand() == stripClassCasts(CCBI->getOperand())) {
// Replace checked_cast_br by branch to FailureBB.
SILBuilder(CCBI->getParent()).createBranch(CCBI->getLoc(),
CCBI->getFailureBB());
CCBI->eraseFromParent();
}
}
}
/// Create a copy of the BB as a landing BB
/// for all FailurePreds.
void CheckedCastBrJumpThreading::Edit::
modifyCFGForFailurePreds(Optional<BasicBlockCloner> &Cloner) {
if (FailurePreds.empty())
return;
assert(!Cloner.hasValue());
Cloner.emplace(BasicBlockCloner(CCBBlock));
Cloner->clone();
SILBasicBlock *TargetFailureBB = Cloner->getDestBB();
auto *TI = TargetFailureBB->getTerminator();
SILBuilderWithScope Builder(TI);
// This BB copy branches to a FailureBB.
auto *CCBI = cast<CheckedCastBranchInst>(CCBBlock->getTerminator());
Builder.createBranch(TI->getLoc(), CCBI->getFailureBB());
TI->eraseFromParent();
splitIfCriticalEdge(CCBBlock, CCBI->getFailureBB());
// Redirect all FailurePreds to the copy of BB.
for (auto *Pred : FailurePreds) {
TermInst *TI = Pred->getTerminator();
// Replace branch to BB by branch to TargetFailureBB.
replaceBranchTarget(TI, CCBBlock, TargetFailureBB,
/*PreserveArgs=*/true);
}
}
/// Create a copy of the BB or reuse BB as
/// a landing basic block for all FailurePreds.
void CheckedCastBrJumpThreading::Edit::
modifyCFGForSuccessPreds(Optional<BasicBlockCloner> &Cloner) {
auto *CCBI = cast<CheckedCastBranchInst>(CCBBlock->getTerminator());
if (InvertSuccess) {
SILBuilder B(CCBBlock);
B.createBranch(CCBI->getLoc(), CCBI->getFailureBB());
CCBI->eraseFromParent();
return;
}
if (hasUnknownPreds) {
if (!SuccessPreds.empty()) {
// Create a copy of the BB as a landing BB.
// for all SuccessPreds.
assert(!Cloner.hasValue());
Cloner.emplace(BasicBlockCloner(CCBBlock));
Cloner->clone();
SILBasicBlock *TargetSuccessBB = Cloner->getDestBB();
auto *TI = TargetSuccessBB->getTerminator();
SILBuilderWithScope Builder(TI);
// This BB copy branches to SuccessBB.
// Take argument value from the dominating BB.
Builder.createBranch(TI->getLoc(), CCBI->getSuccessBB(), {SuccessArg});
TI->eraseFromParent();
splitIfCriticalEdge(CCBBlock, CCBI->getSuccessBB());
// Redirect all SuccessPreds to the copy of BB.
for (auto *Pred : SuccessPreds) {
TermInst *TI = Pred->getTerminator();
// Replace branch to BB by branch to TargetSuccessBB.
replaceBranchTarget(TI, CCBBlock, TargetSuccessBB, /*PreserveArgs=*/true);
}
}
return;
}
// There are no predecessors where it is not clear
// if they are dominated by a success or failure branch
// of DomBB. Therefore, there is no need to clone
// the BB for SuccessPreds. Current BB can be re-used
// instead as their target.
// Add an unconditional jump at the end of the block.
// Take argument value from the dominating BB
SILBuilder(CCBBlock).createBranch(CCBI->getLoc(), CCBI->getSuccessBB(),
{SuccessArg});
CCBI->eraseFromParent();
}
/// Handle a special case, where ArgBB is the entry block.
bool CheckedCastBrJumpThreading::handleArgBBIsEntryBlock(SILBasicBlock *ArgBB,
CheckedCastBranchInst *DomCCBI) {
if (ArgBB->getPreds().begin() == ArgBB->getPreds().end()) {
// It must be the entry block
// See if it is reached over Success or Failure path.
bool SuccessDominates = DomCCBI->getSuccessBB() == BB;
bool FailureDominates = DomCCBI->getFailureBB() == BB;
if (BlocksToEdit.count(ArgBB) != 0)
return false;
classifyPredecessor(ArgBB, SuccessDominates, FailureDominates);
return true;
}
return false;
}
// Returns false if cloning required by jump threading cannot
// be performed, because some of the constraints are violated.
bool CheckedCastBrJumpThreading::checkCloningConstraints() {
// Check some cloning related constraints.
// If this argument from a different BB, then jump-threading
// may require too much code duplication.
if (ArgBB && ArgBB != BB)
return false;
// Bail out if current BB cannot be duplicated.
if (!canDuplicateBlock(BB))
return false;
// Check if code-bloat would be too big when this BB
// is jump-threaded.
// TODO: Make InlineCostCutoff parameter configurable?
// Dec 1, 2014:
// We looked at the inline costs of BBs from our benchmark suite
// and found that currently the highest inline cost for the
// whole benchmark suite is 12. In 95% of all cases it is <=3.
const unsigned InlineCostCutoff = 20;
if (basicBlockInlineCost(BB, InlineCostCutoff) >= InlineCostCutoff)
return false;
return true;
}
/// If conditions are not equivalent along all paths, try harder
/// to check if they are actually equivalent along a subset of paths.
/// To do it, try to back-propagate the Condition
/// backwards and see if it is actually equivalent to DomCondition.
/// along some of the paths.
bool CheckedCastBrJumpThreading::
areEquivalentConditionsAlongSomePaths(CheckedCastBranchInst *DomCCBI,
SILValue DomCondition) {
auto *Arg = dyn_cast<SILArgument>(Condition);
if (!Arg)
return false;
ArgBB = Arg->getParent();
SILBasicBlock *DomBB = DomCCBI->getParent();
if (!DT->dominates(DomBB, ArgBB))
return false;
// Incoming values for the BBArg.
SmallVector<SILValue, 4> IncomingValues;
if (ArgBB->getIterator() != ArgBB->getParent()->begin() &&
(!Arg->getIncomingValues(IncomingValues) || IncomingValues.empty()))
return false;
// Check for each predecessor, if the incoming value coming from it
// is equivalent to the DomCondition. If this is the case, it is
// possible to try jump-threading along this path.
if (!handleArgBBIsEntryBlock(ArgBB, DomCCBI)) {
// ArgBB is not the entry block and has predecessors.
unsigned idx = 0;
for (auto *PredBB : ArgBB->getPreds()) {
// We must avoid that we are going to change a block twice.
if (BlocksToEdit.count(PredBB) != 0)
return false;
auto IncomingValue = IncomingValues[idx];
SILValue ReachingValue = isArgValueEquivalentToCondition(
IncomingValue, DomBB, DomCondition, DT);
if (ReachingValue == SILValue()) {
numUnknownPreds++;
idx++;
continue;
}
// Condition is the same if BB is reached over a pass through Pred.
DEBUG(llvm::dbgs() << "Condition is the same if reached over ");
DEBUG(PredBB->print(llvm::dbgs()));
// See if it is reached over Success or Failure path.
SILBasicBlock *DomSuccessBB = DomCCBI->getSuccessBB();
bool SuccessDominates = DT->dominates(DomSuccessBB, PredBB) ||
DT->dominates(DomSuccessBB, BB) ||
DomSuccessBB == BB;
SILBasicBlock *DomFailureBB = DomCCBI->getFailureBB();
bool FailureDominates = DT->dominates(DomFailureBB, PredBB) ||
DT->dominates(DomFailureBB, BB) ||
DomFailureBB == BB;
classifyPredecessor(
PredBB, SuccessDominates, FailureDominates);
idx++;
}
} else {
// ArgBB is the entry block. Check that conditions are the equivalent in this
// case as well.
if (!isArgValueEquivalentToCondition(Condition, DomBB, DomCondition, DT))
return false;
}
// At this point we know for each predecessor of ArgBB if its reached
// over the success, failure or unknown path from DomBB.
// Now we can generate a new BB for preds reaching BB over the success
// path and a new BB for preds reaching BB over the failure path.
// Then we redirect those preds to those new basic blocks.
return true;
}
/// Check if conditions of CCBI and DomCCBI are equivalent along
/// all or at least some paths.
bool CheckedCastBrJumpThreading::
areEquivalentConditionsAlongPaths(CheckedCastBranchInst *DomCCBI) {
// Are conditions equivalent along all paths?
SILValue DomCondition = stripClassCasts(DomCCBI->getOperand());
if (DomCondition == Condition) {
// Conditions are exactly the same, without any restrictions.
// They are equivalent along all paths.
// Figure out for each predecessor which branch of
// the dominating checked_cast_br is used to reach it.
for (auto *PredBB : BB->getPreds()) {
// All predecessors should either unconditionally branch
// to the current BB or be another checked_cast_br instruction.
if (!isa<CheckedCastBranchInst>(PredBB->getTerminator()) &&
!isa<BranchInst>(PredBB->getTerminator()))
return false;
// We must avoid that we are going to change a block twice.
if (BlocksToEdit.count(PredBB) != 0)
return false;
// Don't allow critical edges from PredBB to BB. This ensures that
// splitAllCriticalEdges() will not invalidate our predecessor lists.
if (!BB->getSinglePredecessor() && !PredBB->getSingleSuccessor())
return false;
SILBasicBlock *DomSuccessBB = DomCCBI->getSuccessBB();
bool SuccessDominates =
DT->dominates(DomSuccessBB, PredBB) || DomSuccessBB == BB;
SILBasicBlock *DomFailureBB = DomCCBI->getFailureBB();
bool FailureDominates =
DT->dominates(DomFailureBB, PredBB) || DomFailureBB == BB;
classifyPredecessor(PredBB, SuccessDominates, FailureDominates);
}
return true;
}
// Check if conditions are equivalent along a subset of reaching paths.
return areEquivalentConditionsAlongSomePaths(DomCCBI, DomCondition);
}
/// Try performing a dominator-based jump-threading for
/// checked_cast_br instructions.
bool CheckedCastBrJumpThreading::trySimplify(CheckedCastBranchInst *CCBI) {
// Init information about the checked_cast_br we try to
// jump-thread.
BB = CCBI->getParent();
if (BlocksToEdit.count(BB) != 0)
return false;
Condition = stripClassCasts(CCBI->getOperand());
// Find a dominating checked_cast_br, which performs the same check.
for (auto *Node = DT->getNode(BB)->getIDom(); Node; Node = Node->getIDom()) {
// Get current dominating block.
SILBasicBlock *DomBB = Node->getBlock();
auto *DomTerm = DomBB->getTerminator();
if (!DomTerm->getNumOperands())
continue;
// Check that it is a dominating checked_cast_br.
auto *DomCCBI = dyn_cast<CheckedCastBranchInst>(DomTerm);
if (!DomCCBI)
continue;
// We need to verify that the result type is the same in the
// dominating checked_cast_br, but only for non-exact casts.
// For exact casts, we are interested only in the
// fact that the source operand is the same for
// both instructions.
if (!CCBI->isExact() && !DomCCBI->isExact()) {
if (DomCCBI->getCastType() != CCBI->getCastType())
continue;
}
// Conservatively check that both checked_cast_br instructions
// are either exact or non-exact. This is very conservative,
// but safe.
//
// TODO:
// If the dominating checked_cast_br is non-exact, then
// it is in general not safe to assume that current exact cast
// would have the same outcome. But if the dominating non-exact
// checked_cast_br fails, then the current exact cast would
// always fail as well.
//
// If the dominating checked_cast_br is exact then then
// it is in general not safe to assume that the current non-exact
// cast would have the same outcome. But if the dominating exact
// checked_cast_br succeeds, then the current non-exact cast
// would always succeed as well.
//
// TODO: In some specific cases, it is possible to prove that
// success or failure of the dominating cast is equivalent to
// the success or failure of the current cast, even if one
// of them is exact and the other not. This is the case
// e.g. if the class has no subclasses.
if (DomCCBI->isExact() != CCBI->isExact())
continue;
// We need the block argument of the DomSuccessBB. If we are going to
// clone it for a previous checked_cast_br the argument will not dominate
// the blocks which it's used to dominate anymore.
if (BlocksToClone.count(DomCCBI->getSuccessBB()) != 0)
continue;
// Init state variables for paths analysis
SuccessPreds.clear();
FailurePreds.clear();
numUnknownPreds = 0;
ArgBB = nullptr;
// Are conditions of CCBI and DomCCBI equivalent along (some) paths?
// If this is the case, classify all incoming paths into SuccessPreds,
// FailurePreds or UnknownPreds depending on how they reach CCBI.
if (!areEquivalentConditionsAlongPaths(DomCCBI))
continue;
// Check if any jump-threading is required and possible.
if (SuccessPreds.empty() && FailurePreds.empty())
return false;
// If this check is reachable via success, failure and unknown
// at the same time, then we don't know the outcome of the
// dominating check. No jump-threading is possible in this case.
if (!SuccessPreds.empty() && !FailurePreds.empty() && numUnknownPreds > 0) {
return false;
}
unsigned TotalPreds =
SuccessPreds.size() + FailurePreds.size() + numUnknownPreds;
// We only need to clone the BB if not all of its
// predecessors are in the same group.
if (TotalPreds != SuccessPreds.size() &&
TotalPreds != numUnknownPreds) {
// Check some cloning related constraints.
if (!checkCloningConstraints())
return false;
}
bool InvertSuccess = false;
if (DomCCBI->isExact() && CCBI->isExact() &&
DomCCBI->getCastType() != CCBI->getCastType()) {
if (TotalPreds == SuccessPreds.size()) {
// The dominating exact cast was successful, but it casted to a
// different type. Therefore, the current cast fails for sure.
// Since we are going to change the BB,
// add its successors and predecessors
// for re-processing.
InvertSuccess = true;
} else {
// Otherwise, we don't know if the current cast will succeed or
// fail.
return false;
}
}
// If we have predecessors, where it is not known if they are reached over
// success or failure path, we cannot eliminate a checked_cast_br.
// We have to generate new dedicated BBs as landing BBs for all
// FailurePreds and all SuccessPreds.
// Since we are going to change the BB,
// add its successors and predecessors
// for re-processing.
for (auto *B : BB->getPreds()) {
BlocksForWorklist.push_back(B);
}
for (auto *B : BB->getSuccessorBlocks()) {
BlocksForWorklist.push_back(B);
}
// Remember the blocks we are going to change. So that we ignore them
// for upcoming checked_cast_br instructions.
BlocksToEdit.insert(BB);
BlocksToClone.insert(BB);
for (auto *B : SuccessPreds)
BlocksToEdit.insert(B);
for (auto *B : FailurePreds)
BlocksToEdit.insert(B);
// Record what we want to change.
Edit *edit = new (EditAllocator.Allocate())
Edit(BB, InvertSuccess, SuccessPreds, FailurePreds, numUnknownPreds != 0,
DomCCBI->getSuccessBB()->getBBArg(0));
Edits.push_back(edit);
return true;
}
// Jump-threading was not possible.
return false;
}
/// Optimize the checked_cast_br instructions in a function.
void CheckedCastBrJumpThreading::optimizeFunction() {
// We separate the work in two phases: analyse and transform. This avoids
// re-calculating the dominator tree for each optimized checked_cast_br.
// First phase: analysis.
for (auto &BB : *Fn) {
// Ignore unreachable blocks.
if (!DT->getNode(&BB))
continue;
if (auto *CCBI = dyn_cast<CheckedCastBranchInst>(BB.getTerminator()))
trySimplify(CCBI);
}
assert(BlocksForWorklist.empty() == Edits.empty());
if (Edits.empty())
return;
// Second phase: transformation.
// Remove critical edges for the SSA-updater. We do this once and keep the
// CFG critical-edge free during our transformations.
splitAllCriticalEdges(*Fn, true, nullptr, nullptr);
for (Edit *edit : Edits) {
Optional<BasicBlockCloner> Cloner;
// Create a copy of the BB as a landing BB
// for all FailurePreds.
edit->modifyCFGForFailurePreds(Cloner);
// Create a copy of the BB or reuse BB as
// a landing basic block for all SuccessPreds.
edit->modifyCFGForSuccessPreds(Cloner);
// Handle unknown preds.
edit->modifyCFGForUnknownPreds();
if (Cloner.hasValue()) {
updateSSAAfterCloning(*Cloner.getPointer(), Cloner->getDestBB(),
edit->CCBBlock, false);
if (!Cloner->getDestBB()->pred_empty())
BlocksForWorklist.push_back(Cloner->getDestBB());
}
if (!edit->CCBBlock->pred_empty())
BlocksForWorklist.push_back(edit->CCBBlock);
}
}
namespace swift {
bool tryCheckedCastBrJumpThreading(SILFunction *Fn, DominanceInfo *DT,
SmallVectorImpl<SILBasicBlock *> &BlocksForWorklist) {
CheckedCastBrJumpThreading CCBJumpThreading(Fn, DT, BlocksForWorklist);
CCBJumpThreading.optimizeFunction();
return !BlocksForWorklist.empty();
}
} // end namespace swift
| apache-2.0 |
jayvansantos/scalagen | scalagen/src/main/scala/PlaceHolder.java | 62 | //hack to generate package javadoc
public class PlaceHolder {} | apache-2.0 |
mariomarin/kubernetes | contrib/submit-queue/github/github.go | 8441 | /*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package github
import (
"fmt"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/golang/glog"
"github.com/google/go-github/github"
"golang.org/x/oauth2"
)
func MakeClient(token string) *github.Client {
if len(token) > 0 {
ts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token})
tc := oauth2.NewClient(oauth2.NoContext, ts)
return github.NewClient(tc)
}
return github.NewClient(nil)
}
func hasLabel(labels []github.Label, name string) bool {
for i := range labels {
label := &labels[i]
if label.Name != nil && *label.Name == name {
return true
}
}
return false
}
func hasLabels(labels []github.Label, names []string) bool {
for i := range names {
if !hasLabel(labels, names[i]) {
return false
}
}
return true
}
func fetchAllPRs(client *github.Client, user, project string) ([]github.PullRequest, error) {
page := 1
var result []github.PullRequest
for {
glog.V(4).Infof("Fetching page %d", page)
listOpts := &github.PullRequestListOptions{
Sort: "desc",
ListOptions: github.ListOptions{PerPage: 100, Page: page},
}
prs, response, err := client.PullRequests.List(user, project, listOpts)
if err != nil {
return nil, err
}
result = append(result, prs...)
if response.LastPage == 0 || response.LastPage == page {
break
}
page++
}
return result, nil
}
type PRFunction func(*github.Client, *github.PullRequest, *github.Issue) error
type FilterConfig struct {
MinPRNumber int
UserWhitelist []string
WhitelistOverride string
RequiredStatusContexts []string
}
// For each PR in the project that matches:
// * pr.Number > minPRNumber
// * is mergeable
// * has labels "cla: yes", "lgtm"
// * combinedStatus = 'success' (e.g. all hooks have finished success in github)
// Run the specified function
func ForEachCandidatePRDo(client *github.Client, user, project string, fn PRFunction, once bool, config *FilterConfig) error {
// Get all PRs
prs, err := fetchAllPRs(client, user, project)
if err != nil {
return err
}
userSet := util.StringSet{}
userSet.Insert(config.UserWhitelist...)
for ix := range prs {
if prs[ix].User == nil || prs[ix].User.Login == nil {
glog.V(2).Infof("Skipping PR %d with no user info %v.", *prs[ix].Number, *prs[ix].User)
continue
}
if *prs[ix].Number < config.MinPRNumber {
glog.V(6).Infof("Dropping %d < %d", *prs[ix].Number, config.MinPRNumber)
continue
}
pr, _, err := client.PullRequests.Get(user, project, *prs[ix].Number)
if err != nil {
glog.Errorf("Error getting pull request: %v", err)
continue
}
glog.V(2).Infof("----==== %d ====----", *pr.Number)
// Labels are actually stored in the Issues API, not the Pull Request API
issue, _, err := client.Issues.Get(user, project, *pr.Number)
if err != nil {
glog.Errorf("Failed to get issue for PR: %v", err)
continue
}
glog.V(8).Infof("%v", issue.Labels)
if !hasLabels(issue.Labels, []string{"lgtm", "cla: yes"}) {
continue
}
if !hasLabel(issue.Labels, config.WhitelistOverride) && !userSet.Has(*prs[ix].User.Login) {
glog.V(4).Infof("Dropping %d since %s isn't in whitelist and %s isn't present", *prs[ix].Number, *prs[ix].User.Login, config.WhitelistOverride)
continue
}
// This is annoying, github appears to only temporarily cache mergeability, if it is nil, wait
// for an async refresh and retry.
if pr.Mergeable == nil {
glog.Infof("Waiting for mergeability on %s %d", *pr.Title, *pr.Number)
// TODO: determine what a good empirical setting for this is.
time.Sleep(10 * time.Second)
pr, _, err = client.PullRequests.Get(user, project, *prs[ix].Number)
}
if pr.Mergeable == nil {
glog.Errorf("No mergeability information for %s %d, Skipping.", *pr.Title, *pr.Number)
continue
}
if !*pr.Mergeable {
continue
}
// Validate the status information for this PR
ok, err := ValidateStatus(client, user, project, *pr.Number, config.RequiredStatusContexts, false)
if err != nil {
glog.Errorf("Error validating PR status: %v", err)
continue
}
if !ok {
continue
}
if err := fn(client, pr, issue); err != nil {
glog.Errorf("Failed to run user function: %v", err)
continue
}
if once {
break
}
}
return nil
}
func getCommitStatus(client *github.Client, user, project string, prNumber int) ([]*github.CombinedStatus, error) {
commits, _, err := client.PullRequests.ListCommits(user, project, prNumber, &github.ListOptions{})
if err != nil {
return nil, err
}
commitStatus := make([]*github.CombinedStatus, len(commits))
for ix := range commits {
commit := &commits[ix]
statusList, _, err := client.Repositories.GetCombinedStatus(user, project, *commit.SHA, &github.ListOptions{})
if err != nil {
return nil, err
}
commitStatus[ix] = statusList
}
return commitStatus, nil
}
// Gets the current status of a PR by introspecting the status of the commits in the PR.
// The rules are:
// * If any member of the 'requiredContexts' list is missing, it is 'incomplete'
// * If any commit is 'pending', the PR is 'pending'
// * If any commit is 'error', the PR is in 'error'
// * If any commit is 'failure', the PR is 'failure'
// * Otherwise the PR is 'success'
func GetStatus(client *github.Client, user, project string, prNumber int, requiredContexts []string) (string, error) {
statusList, err := getCommitStatus(client, user, project, prNumber)
if err != nil {
return "", err
}
return computeStatus(statusList, requiredContexts), nil
}
func computeStatus(statusList []*github.CombinedStatus, requiredContexts []string) string {
states := util.StringSet{}
providers := util.StringSet{}
for ix := range statusList {
status := statusList[ix]
glog.V(8).Infof("Checking commit: %s", *status.SHA)
glog.V(8).Infof("Checking commit: %v", status)
states.Insert(*status.State)
for _, subStatus := range status.Statuses {
glog.V(8).Infof("Found status from: %v", subStatus)
providers.Insert(*subStatus.Context)
}
}
for _, provider := range requiredContexts {
if !providers.Has(provider) {
glog.V(8).Infof("Failed to find %s in %v", provider, providers)
return "incomplete"
}
}
switch {
case states.Has("pending"):
return "pending"
case states.Has("error"):
return "error"
case states.Has("failure"):
return "failure"
default:
return "success"
}
}
// Make sure that the combined status for all commits in a PR is 'success'
// if 'waitForPending' is true, this function will wait until the PR is no longer pending (all checks have run)
func ValidateStatus(client *github.Client, user, project string, prNumber int, requiredContexts []string, waitOnPending bool) (bool, error) {
pending := true
for pending {
status, err := GetStatus(client, user, project, prNumber, requiredContexts)
if err != nil {
return false, err
}
switch status {
case "error", "failure":
return false, nil
case "pending":
if !waitOnPending {
return false, nil
}
pending = true
glog.V(4).Info("PR is pending, waiting for 30 seconds")
time.Sleep(30 * time.Second)
case "success":
return true, nil
case "incomplete":
return false, nil
default:
return false, fmt.Errorf("unknown status: %s", status)
}
}
return true, nil
}
// Wait for a PR to move into Pending. This is useful because the request to test a PR again
// is asynchronous with the PR actually moving into a pending state
// TODO: add a timeout
func WaitForPending(client *github.Client, user, project string, prNumber int) error {
for {
status, err := GetStatus(client, user, project, prNumber, []string{})
if err != nil {
return err
}
if status == "pending" {
return nil
}
glog.V(4).Info("PR is not pending, waiting for 30 seconds")
time.Sleep(30 * time.Second)
}
return nil
}
| apache-2.0 |
stevekuznetsov/origin | pkg/user/generated/clientset/typed/user/v1/fake/fake_user.go | 2852 | package fake
import (
v1 "github.com/openshift/origin/pkg/user/api/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
testing "k8s.io/client-go/testing"
)
// FakeUsers implements UserResourceInterface
type FakeUsers struct {
Fake *FakeUserV1
ns string
}
var usersResource = schema.GroupVersionResource{Group: "user.openshift.io", Version: "v1", Resource: "users"}
func (c *FakeUsers) Create(user *v1.User) (result *v1.User, err error) {
obj, err := c.Fake.
Invokes(testing.NewCreateAction(usersResource, c.ns, user), &v1.User{})
if obj == nil {
return nil, err
}
return obj.(*v1.User), err
}
func (c *FakeUsers) Update(user *v1.User) (result *v1.User, err error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateAction(usersResource, c.ns, user), &v1.User{})
if obj == nil {
return nil, err
}
return obj.(*v1.User), err
}
func (c *FakeUsers) Delete(name string, options *meta_v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewDeleteAction(usersResource, c.ns, name), &v1.User{})
return err
}
func (c *FakeUsers) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error {
action := testing.NewDeleteCollectionAction(usersResource, c.ns, listOptions)
_, err := c.Fake.Invokes(action, &v1.UserList{})
return err
}
func (c *FakeUsers) Get(name string, options meta_v1.GetOptions) (result *v1.User, err error) {
obj, err := c.Fake.
Invokes(testing.NewGetAction(usersResource, c.ns, name), &v1.User{})
if obj == nil {
return nil, err
}
return obj.(*v1.User), err
}
func (c *FakeUsers) List(opts meta_v1.ListOptions) (result *v1.UserList, err error) {
obj, err := c.Fake.
Invokes(testing.NewListAction(usersResource, c.ns, opts), &v1.UserList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &v1.UserList{}
for _, item := range obj.(*v1.UserList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested users.
func (c *FakeUsers) Watch(opts meta_v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewWatchAction(usersResource, c.ns, opts))
}
// Patch applies the patch and returns the patched user.
func (c *FakeUsers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.User, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(usersResource, c.ns, name, data, subresources...), &v1.User{})
if obj == nil {
return nil, err
}
return obj.(*v1.User), err
}
| apache-2.0 |
ptkool/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/DataTypeParserSuite.scala | 5602 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.parser
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.types._
class DataTypeParserSuite extends SparkFunSuite {
def parse(sql: String): DataType = CatalystSqlParser.parseDataType(sql)
def checkDataType(dataTypeString: String, expectedDataType: DataType): Unit = {
test(s"parse ${dataTypeString.replace("\n", "")}") {
assert(parse(dataTypeString) === expectedDataType)
}
}
def intercept(sql: String): ParseException =
intercept[ParseException](CatalystSqlParser.parseDataType(sql))
def unsupported(dataTypeString: String): Unit = {
test(s"$dataTypeString is not supported") {
intercept(dataTypeString)
}
}
checkDataType("int", IntegerType)
checkDataType("integer", IntegerType)
checkDataType("BooLean", BooleanType)
checkDataType("tinYint", ByteType)
checkDataType("smallINT", ShortType)
checkDataType("INT", IntegerType)
checkDataType("INTEGER", IntegerType)
checkDataType("bigint", LongType)
checkDataType("float", FloatType)
checkDataType("dOUBle", DoubleType)
checkDataType("decimal(10, 5)", DecimalType(10, 5))
checkDataType("decimal", DecimalType.USER_DEFAULT)
checkDataType("Dec(10, 5)", DecimalType(10, 5))
checkDataType("deC", DecimalType.USER_DEFAULT)
checkDataType("DATE", DateType)
checkDataType("timestamp", TimestampType)
checkDataType("string", StringType)
checkDataType("ChaR(5)", StringType)
checkDataType("ChaRacter(5)", StringType)
checkDataType("varchAr(20)", StringType)
checkDataType("cHaR(27)", StringType)
checkDataType("BINARY", BinaryType)
checkDataType("interval", CalendarIntervalType)
checkDataType("array<doublE>", ArrayType(DoubleType, true))
checkDataType("Array<map<int, tinYint>>", ArrayType(MapType(IntegerType, ByteType, true), true))
checkDataType(
"array<struct<tinYint:tinyint>>",
ArrayType(StructType(StructField("tinYint", ByteType, true) :: Nil), true)
)
checkDataType("MAP<int, STRING>", MapType(IntegerType, StringType, true))
checkDataType("MAp<int, ARRAY<double>>", MapType(IntegerType, ArrayType(DoubleType), true))
checkDataType(
"MAP<int, struct<varchar:string>>",
MapType(IntegerType, StructType(StructField("varchar", StringType, true) :: Nil), true)
)
checkDataType(
"struct<intType: int, ts:timestamp>",
StructType(
StructField("intType", IntegerType, true) ::
StructField("ts", TimestampType, true) :: Nil)
)
// It is fine to use the data type string as the column name.
checkDataType(
"Struct<int: int, timestamp:timestamp>",
StructType(
StructField("int", IntegerType, true) ::
StructField("timestamp", TimestampType, true) :: Nil)
)
checkDataType(
"""
|struct<
| struct:struct<deciMal:DECimal, anotherDecimal:decimAL(5,2)>,
| MAP:Map<timestamp, varchar(10)>,
| arrAy:Array<double>,
| anotherArray:Array<char(9)>>
""".stripMargin,
StructType(
StructField("struct",
StructType(
StructField("deciMal", DecimalType.USER_DEFAULT, true) ::
StructField("anotherDecimal", DecimalType(5, 2), true) :: Nil), true) ::
StructField("MAP", MapType(TimestampType, StringType), true) ::
StructField("arrAy", ArrayType(DoubleType, true), true) ::
StructField("anotherArray", ArrayType(StringType, true), true) :: Nil)
)
// Use backticks to quote column names having special characters.
checkDataType(
"struct<`x+y`:int, `!@#$%^&*()`:string, `1_2.345<>:\"`:varchar(20)>",
StructType(
StructField("x+y", IntegerType, true) ::
StructField("!@#$%^&*()", StringType, true) ::
StructField("1_2.345<>:\"", StringType, true) :: Nil)
)
// Empty struct.
checkDataType("strUCt<>", StructType(Nil))
unsupported("it is not a data type")
unsupported("struct<x+y: int, 1.1:timestamp>")
unsupported("struct<x: int")
unsupported("struct<x int, y string>")
test("Do not print empty parentheses for no params") {
assert(intercept("unkwon").getMessage.contains("unkwon is not supported"))
assert(intercept("unkwon(1,2,3)").getMessage.contains("unkwon(1,2,3) is not supported"))
}
// DataType parser accepts certain reserved keywords.
checkDataType(
"Struct<TABLE: string, DATE:boolean>",
StructType(
StructField("TABLE", StringType, true) ::
StructField("DATE", BooleanType, true) :: Nil)
)
// Use SQL keywords.
checkDataType("struct<end: long, select: int, from: string>",
(new StructType).add("end", LongType).add("select", IntegerType).add("from", StringType))
// DataType parser accepts comments.
checkDataType("Struct<x: INT, y: STRING COMMENT 'test'>",
(new StructType).add("x", IntegerType).add("y", StringType, true, "test"))
}
| apache-2.0 |
luhanhan/horizon | horizon/static/framework/widgets/metadata-tree/metadata-tree.module.js | 8158 | (function () {
'use strict';
/**
* @ngdoc overview
* @name horizon.framework.widgets.metadata-tree
* @description
*
* # horizon.framework.widgets.metadata-tree
*
* The `horizon.framework.widgets.metadata-tree` provides widgets and service
* with logic for editing metadata.
*
* | Directives |
* |--------------------------------------------------------------------------------------------|
* | {@link horizon.framework.widgets.metadata-tree.directive:hzMetadataTree `hzMetadataTree`} |
* | {@link horizon.framework.widgets.metadata-tree.directive:hzMetadataTreeItem `hzMetadataTreeItem`} |
* | {@link horizon.framework.widgets.metadata-tree.directive:hzMetadataTreeUnique `hzMetadataTreeUnique`} |
* |--------------------------------------------------------------------------------------------|
* | Controllers |
* |--------------------------------------------------------------------------------------------|
* | {@link horizon.framework.widgets.metadata-tree.controller:hzMetadataTreeCtrl `hzMetadataTreeCtrl`} |
* | {@link horizon.framework.widgets.metadata-tree.controller:hzMetadataTreeItemCtrl `hzMetadataTreeItemCtrl`} |
*
*/
angular.module('horizon.framework.widgets.metadata-tree', [])
/**
* @ngdoc parameters
* @name horizon.framework.widgets.metadata-tree.constant:metadataTreeDefaults
* @param {object} text Text constants
*/
.constant('horizon.framework.widgets.metadata-tree.defaults', {
text: {
/*eslint-disable max-len */
help: gettext('You can specify resource metadata by moving items from the left column to the right column. In the left columns there are metadata definitions from the Glance Metadata Catalog. Use the "Other" option to add metadata with the key of your choice.'),
/*eslint-enable max-len */
min: gettext('Min'),
max: gettext('Max'),
minLength: gettext('Min length'),
maxLength: gettext('Max length'),
patternMismatch: gettext('Pattern mismatch'),
integerRequired: gettext('Integer required'),
decimalRequired: gettext('Decimal required'),
required: gettext('Required'),
duplicate: gettext('Duplicate keys are not allowed'),
filter: gettext('Filter'),
available: gettext('Available Metadata'),
existing: gettext('Existing Metadata'),
custom: gettext('Custom'),
noAvailable: gettext('No available metadata'),
noExisting: gettext('No existing metadata')
}
})
/**
* @ngdoc directive
* @name horizon.framework.widgets.metadata-tree.directive:hzMetadataTree
* @scope
*
* @description
* The `hzMetadataTree` directive provide support for modifying existing
* metadata properties and adding new from metadata catalog.
*
* @param {Tree=} model Model binding
* @param {object[]=} available List of available namespaces
* @param {object=} existing Key-value pairs with existing properties
* @param {object=} text Text override
*/
.directive('hzMetadataTree', [ 'horizon.framework.widgets.basePath',
function (path) {
return {
scope: {
tree: '=*?model',
available: '=?',
existing: '=?',
text: '=?'
},
controller: 'hzMetadataTreeCtrl',
templateUrl: path + 'metadata-tree/metadata-tree.html'
};
}
])
/**
* @ngdoc directive
* @name horizon.framework.widgets.metadata-tree.directive:hzMetadataTreeItem
* @scope
*
* @description
* The `hzMetadataTreeItem` helper directive displays proper field for
* editing Item.leaf.value depending on Item.leaf.type
*
* @param {expression} action Action for button
* @param {Item} item Item to display
* @param {object} text Text override
*/
.directive('hzMetadataTreeItem', [ 'horizon.framework.widgets.basePath',
function (path) {
return {
scope: {
action: '&',
item: '=',
text: '='
},
controller: 'hzMetadataTreeItemCtrl',
templateUrl: path + 'metadata-tree/metadata-tree-item.html'
};
}
])
/**
* @ngdoc directive
* @name horizon.framework.widgets.metadata-tree.directive:hzMetadataTreeUnique
* @restrict A
*
* @description
* The `hzMetadataTreeUnique` helper directive provides validation
* for field which value should be unique new Item
*/
.directive('hzMetadataTreeUnique', function () {
return {
restrict: 'A',
require: 'ngModel',
link: function (scope, elm, attrs, ctrl) {
ctrl.$validators.unique = function (modelValue, viewValue) {
return !scope.tree.flatTree.some(function (item) {
return item.leaf && item.leaf.name === viewValue;
});
};
}
};
})
/**
* @ngdoc controller
* @name horizon.framework.widgets.metadata-tree.controller:hzMetadataTreeCtrl
* @description
* Controller used by `hzMetadataTree`
*/
.controller('hzMetadataTreeCtrl', [
'$scope',
'horizon.framework.widgets.metadata-tree.service',
'horizon.framework.widgets.metadata-tree.defaults',
function ($scope, metadataTreeService, defaults) {
$scope.availableFilter = function (item) {
return !item.added && (
$scope.filterText.available.length === 0 ? item.visible : true);
};
$scope.text = angular.extend({}, defaults.text, $scope.text);
if (!$scope.tree) {
$scope.tree = new metadataTreeService.Tree($scope.available, $scope.existing);
}
$scope.customItem = '';
$scope.filterText = {
available: '',
existing: ''
};
}
])
/**
* @ngdoc controller
* @name horizon.framework.widgets.metadata-tree.controller:hzMetadataTreeItemCtrl
* @description
* Controller used by `hzMetadataTreeItem`
*/
.controller('hzMetadataTreeItemCtrl', [
'$scope',
function ($scope) {
$scope.formatErrorMessage = function (item, error) {
if (error.min) { return $scope.text.min + ' ' + item.leaf.minimum; }
if (error.max) { return $scope.text.max + ' ' + item.leaf.maximum; }
if (error.minlength) { return $scope.text.minLength + ' ' + item.leaf.minLength; }
if (error.maxlength) { return $scope.text.maxLength + ' ' + item.leaf.maxLength; }
if (error.pattern) {
if (item.leaf.type === 'integer') {
return $scope.text.integerRequired;
} else {
return $scope.text.patternMismatch;
}
}
if (error.number) {
if (item.leaf.type === 'integer') {
return $scope.text.integerRequired;
} else {
return $scope.text.decimalRequired;
}
}
if (error.required) {
return $scope.text.required;
}
};
function remove(array, value) {
var index = array.indexOf(value);
if (index > -1) {
array.splice(index, 1);
}
return array;
}
$scope.opened = false;
if ($scope.item.leaf.type === 'array') {
$scope.values = $scope.item.leaf.items.enum.filter(function (i) {
return $scope.item.leaf.value.indexOf(i) < 0;
}).sort();
if (!$scope.item.leaf.readonly) {
$scope.add = function (val) {
$scope.item.leaf.value.push(val);
$scope.item.leaf.value.sort();
remove($scope.values, val);
};
$scope.remove = function (val) {
remove($scope.item.leaf.value, val);
$scope.values.push(val);
$scope.values.sort();
if ($scope.item.leaf.value.length === 0) {
$scope.opened = true;
}
};
$scope.open = function () {
$scope.opened = !$scope.opened;
};
$scope.opened = $scope.item.leaf.value.length === 0;
}
}
}
]);
})();
| apache-2.0 |
sarvex/tensorflow | tensorflow/core/kernels/mlir_generated/gpu_op_atan2.cc | 1021 | /* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/kernels/mlir_generated/base_gpu_op.h"
namespace tensorflow {
GENERATE_AND_REGISTER_BINARY_GPU_KERNEL(Atan2, DT_HALF);
GENERATE_AND_REGISTER_BINARY_GPU_KERNEL(Atan2, DT_FLOAT);
GENERATE_AND_REGISTER_BINARY_GPU_KERNEL(Atan2, DT_DOUBLE);
} // namespace tensorflow
| apache-2.0 |
salguarnieri/intellij-community | platform/platform-impl/src/com/intellij/openapi/project/DumbPermissionServiceImpl.java | 1560 | /*
* Copyright 2000-2015 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.openapi.project;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
/**
* @author peter
*/
public class DumbPermissionServiceImpl implements DumbPermissionService {
private final ThreadLocal<DumbModePermission> myPermission = new ThreadLocal<DumbModePermission>();
@Override
public void allowStartingDumbModeInside(@NotNull DumbModePermission permission, @NotNull Runnable runnable) {
DumbModePermission prev = myPermission.get();
if (prev == DumbModePermission.MAY_START_MODAL && permission == DumbModePermission.MAY_START_BACKGROUND) {
runnable.run();
return;
}
myPermission.set(permission);
try {
runnable.run();
}
finally {
if (prev == null) {
myPermission.remove();
} else {
myPermission.set(prev);
}
}
}
@Nullable
public DumbModePermission getPermission() {
return myPermission.get();
}
}
| apache-2.0 |
joinany/phoenix-for-cloudera | bin/traceserver.py | 6884 | #!/usr/bin/env python
############################################################################
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
############################################################################
#
# Script to handle launching the trace server process.
#
# usage: traceserver.py [start|stop]
#
import datetime
import getpass
import os
import os.path
import signal
import subprocess
import sys
import tempfile
try:
import daemon
daemon_supported = True
except ImportError:
# daemon script not supported on some platforms (windows?)
daemon_supported = False
import phoenix_utils
phoenix_utils.setPath()
command = None
args = sys.argv
if len(args) > 1:
if args[1] == 'start':
command = 'start'
elif args[1] == 'stop':
command = 'stop'
if command:
args = args[2:]
if os.name == 'nt':
args = subprocess.list2cmdline(args[1:])
else:
import pipes # pipes module isn't available on Windows
args = " ".join([pipes.quote(v) for v in args[1:]])
# HBase configuration folder path (where hbase-site.xml reside) for
# HBase/Phoenix client side property override
hbase_config_path = phoenix_utils.hbase_conf_dir
# default paths ## TODO: add windows support
java_home = os.getenv('JAVA_HOME')
hbase_pid_dir = os.path.join(tempfile.gettempdir(), 'phoenix')
phoenix_log_dir = os.path.join(tempfile.gettempdir(), 'phoenix')
phoenix_file_basename = 'phoenix-%s-traceserver' % getpass.getuser()
phoenix_log_file = '%s.log' % phoenix_file_basename
phoenix_out_file = '%s.out' % phoenix_file_basename
phoenix_pid_file = '%s.pid' % phoenix_file_basename
opts = os.getenv('PHOENIX_TRACESERVER_OPTS', '')
# load hbase-env.??? to extract JAVA_HOME, HBASE_PID_DIR, HBASE_LOG_DIR
hbase_env_path = None
hbase_env_cmd = None
if os.name == 'posix':
hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.sh')
hbase_env_cmd = ['bash', '-c', 'source %s && env' % hbase_env_path]
elif os.name == 'nt':
hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.cmd')
hbase_env_cmd = ['cmd.exe', '/c', 'call %s & set' % hbase_env_path]
if not hbase_env_path or not hbase_env_cmd:
print >> sys.stderr, "hbase-env file unknown on platform %s" % os.name
sys.exit(-1)
hbase_env = {}
if os.path.isfile(hbase_env_path):
p = subprocess.Popen(hbase_env_cmd, stdout = subprocess.PIPE)
for x in p.stdout:
(k, _, v) = x.partition('=')
hbase_env[k.strip()] = v.strip()
if hbase_env.has_key('JAVA_HOME'):
java_home = hbase_env['JAVA_HOME']
if hbase_env.has_key('HBASE_PID_DIR'):
hbase_pid_dir = hbase_env['HBASE_PID_DIR']
if hbase_env.has_key('HBASE_LOG_DIR'):
phoenix_log_dir = hbase_env['HBASE_LOG_DIR']
if hbase_env.has_key('PHOENIX_TRACESERVER_OPTS'):
opts = hbase_env['PHOENIX_TRACESERVER_OPTS']
log_file_path = os.path.join(phoenix_log_dir, phoenix_log_file)
out_file_path = os.path.join(phoenix_log_dir, phoenix_out_file)
pid_file_path = os.path.join(hbase_pid_dir, phoenix_pid_file)
if java_home:
java = os.path.join(java_home, 'bin', 'java')
else:
java = 'java'
# " -Xdebug -Xrunjdwp:transport=dt_socket,address=5005,server=y,suspend=n " + \
# " -XX:+UnlockCommercialFeatures -XX:+FlightRecorder -XX:FlightRecorderOptions=defaultrecording=true,dumponexit=true" + \
java_cmd = '%(java)s $PHOENIX_OPTS ' + \
'-cp ' + hbase_config_path + os.pathsep + phoenix_utils.phoenix_traceserver_jar + os.pathsep + phoenix_utils.phoenix_client_jar + \
" -Dproc_phoenixtraceserver" + \
" -Dlog4j.configuration=file:" + os.path.join(phoenix_utils.current_dir, "log4j.properties") + \
" -Dpsql.root.logger=%(root_logger)s" + \
" -Dpsql.log.dir=%(log_dir)s" + \
" -Dpsql.log.file=%(log_file)s" + \
" " + opts + \
" org.apache.phoenix.tracingwebapp.http.Main " + args
if command == 'start':
if not daemon_supported:
print >> sys.stderr, "daemon mode not supported on this platform"
sys.exit(-1)
# run in the background
d = os.path.dirname(out_file_path)
if not os.path.exists(d):
os.makedirs(d)
with open(out_file_path, 'a+') as out:
context = daemon.DaemonContext(
pidfile = daemon.PidFile(pid_file_path, 'Trace Server already running, PID file found: %s' % pid_file_path),
stdout = out,
stderr = out,
)
print 'starting Trace Server, logging to %s' % log_file_path
with context:
# this block is the main() for the forked daemon process
child = None
cmd = java_cmd % {'java': java, 'root_logger': 'INFO,DRFA', 'log_dir': phoenix_log_dir, 'log_file': phoenix_log_file}
# notify the child when we're killed
def handler(signum, frame):
if child:
child.send_signal(signum)
sys.exit(0)
signal.signal(signal.SIGTERM, handler)
print '%s launching %s' % (datetime.datetime.now(), cmd)
child = subprocess.Popen(cmd.split())
sys.exit(child.wait())
elif command == 'stop':
if not daemon_supported:
print >> sys.stderr, "daemon mode not supported on this platform"
sys.exit(-1)
if not os.path.exists(pid_file_path):
print >> sys.stderr, "no Trace Server to stop because PID file not found, %s" % pid_file_path
sys.exit(0)
if not os.path.isfile(pid_file_path):
print >> sys.stderr, "PID path exists but is not a file! %s" % pid_file_path
sys.exit(1)
pid = None
with open(pid_file_path, 'r') as p:
pid = int(p.read())
if not pid:
sys.exit("cannot read PID file, %s" % pid_file_path)
print "stopping Trace Server pid %s" % pid
with open(out_file_path, 'a+') as out:
print >> out, "%s terminating Trace Server" % datetime.datetime.now()
os.kill(pid, signal.SIGTERM)
else:
# run in the foreground using defaults from log4j.properties
cmd = java_cmd % {'java': java, 'root_logger': 'INFO,console', 'log_dir': '.', 'log_file': 'psql.log'}
child = subprocess.Popen(cmd.split())
sys.exit(child.wait())
| apache-2.0 |
kaviththiranga/developer-studio | esb/org.wso2.developerstudio.eclipse.gmf.esb.diagram/src/org/wso2/developerstudio/eclipse/gmf/esb/diagram/edit/commands/SendMediatorInputConnectorCreateCommand.java | 2878 | package org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.commands;
import org.eclipse.core.commands.ExecutionException;
import org.eclipse.core.runtime.IAdaptable;
import org.eclipse.core.runtime.IProgressMonitor;
import org.eclipse.emf.ecore.EObject;
import org.eclipse.gmf.runtime.common.core.command.CommandResult;
import org.eclipse.gmf.runtime.common.core.command.ICommand;
import org.eclipse.gmf.runtime.emf.type.core.IElementType;
import org.eclipse.gmf.runtime.emf.type.core.commands.EditElementCommand;
import org.eclipse.gmf.runtime.emf.type.core.requests.ConfigureRequest;
import org.eclipse.gmf.runtime.emf.type.core.requests.CreateElementRequest;
import org.eclipse.gmf.runtime.notation.View;
import org.wso2.developerstudio.eclipse.gmf.esb.EsbFactory;
import org.wso2.developerstudio.eclipse.gmf.esb.SendMediator;
import org.wso2.developerstudio.eclipse.gmf.esb.SendMediatorInputConnector;
/**
* @generated
*/
public class SendMediatorInputConnectorCreateCommand extends EditElementCommand {
/**
* @generated
*/
public SendMediatorInputConnectorCreateCommand(CreateElementRequest req) {
super(req.getLabel(), null, req);
}
/**
* FIXME: replace with setElementToEdit()
* @generated
*/
protected EObject getElementToEdit() {
EObject container = ((CreateElementRequest) getRequest()).getContainer();
if (container instanceof View) {
container = ((View) container).getElement();
}
return container;
}
/**
* @generated
*/
public boolean canExecute() {
SendMediator container = (SendMediator) getElementToEdit();
if (container.getInputConnector() != null) {
return false;
}
return true;
}
/**
* @generated
*/
protected CommandResult doExecuteWithResult(IProgressMonitor monitor, IAdaptable info) throws ExecutionException {
SendMediatorInputConnector newElement = EsbFactory.eINSTANCE.createSendMediatorInputConnector();
SendMediator owner = (SendMediator) getElementToEdit();
owner.setInputConnector(newElement);
doConfigure(newElement, monitor, info);
((CreateElementRequest) getRequest()).setNewElement(newElement);
return CommandResult.newOKCommandResult(newElement);
}
/**
* @generated
*/
protected void doConfigure(SendMediatorInputConnector newElement, IProgressMonitor monitor, IAdaptable info)
throws ExecutionException {
IElementType elementType = ((CreateElementRequest) getRequest()).getElementType();
ConfigureRequest configureRequest = new ConfigureRequest(getEditingDomain(), newElement, elementType);
configureRequest.setClientContext(((CreateElementRequest) getRequest()).getClientContext());
configureRequest.addParameters(getRequest().getParameters());
ICommand configureCommand = elementType.getEditCommand(configureRequest);
if (configureCommand != null && configureCommand.canExecute()) {
configureCommand.execute(monitor, info);
}
}
}
| apache-2.0 |
vineetgarg02/hive | service/src/test/org/apache/hive/service/auth/ldap/LdapAuthenticationTestCase.java | 5514 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hive.service.auth.ldap;
import javax.security.sasl.AuthenticationException;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import java.util.EnumMap;
import java.util.Map;
import org.apache.directory.server.ldap.LdapServer;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hive.service.auth.LdapAuthenticationProviderImpl;
import org.junit.Assert;
public final class LdapAuthenticationTestCase {
private final LdapAuthenticationProviderImpl ldapProvider;
public static Builder builder() {
return new Builder();
}
private LdapAuthenticationTestCase(Builder builder) {
this.ldapProvider = new LdapAuthenticationProviderImpl(builder.conf);
}
public void assertAuthenticatePasses(Credentials credentials) {
try {
ldapProvider.Authenticate(credentials.getUser(), credentials.getPassword());
} catch (AuthenticationException e) {
String message = String.format("Authentication failed for user '%s' with password '%s'",
credentials.getUser(), credentials.getPassword());
throw new AssertionError(message, e);
}
}
public void assertAuthenticateFails(Credentials credentials) {
assertAuthenticateFails(credentials.getUser(), credentials.getPassword());
}
public void assertAuthenticateFailsUsingWrongPassword(Credentials credentials) {
assertAuthenticateFails(credentials.getUser(), "not" + credentials.getPassword());
}
public void assertAuthenticateFails(String user, String password) {
try {
ldapProvider.Authenticate(user, password);
Assert.fail(String.format("Expected authentication to fail for %s", user));
} catch (AuthenticationException expected) {
Assert.assertNotNull("Expected authentication exception", expected);
}
}
public static final class Builder {
private final Map<HiveConf.ConfVars, String> overrides = new EnumMap<>(HiveConf.ConfVars.class);
private HiveConf conf;
public Builder baseDN(String baseDN) {
return setVarOnce(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_BASEDN, baseDN);
}
public Builder guidKey(String guidKey) {
return setVarOnce(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_GUIDKEY, guidKey);
}
public Builder userDNPatterns(String... userDNPatterns) {
return setVarOnce(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_USERDNPATTERN,
Joiner.on(':').join(userDNPatterns));
}
public Builder userFilters(String... userFilters) {
return setVarOnce(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_USERFILTER,
Joiner.on(',').join(userFilters));
}
public Builder groupDNPatterns(String... groupDNPatterns) {
return setVarOnce(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_GROUPDNPATTERN,
Joiner.on(':').join(groupDNPatterns));
}
public Builder groupFilters(String... groupFilters) {
return setVarOnce(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_GROUPFILTER,
Joiner.on(',').join(groupFilters));
}
public Builder groupClassKey(String groupClassKey) {
return setVarOnce(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_GROUPCLASS_KEY, groupClassKey);
}
public Builder ldapServer(LdapServer ldapServer) {
return setVarOnce(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_URL,
"ldap://localhost:" + ldapServer.getPort());
}
public Builder customQuery(String customQuery) {
return setVarOnce(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_CUSTOMLDAPQUERY, customQuery);
}
public Builder groupMembershipKey(String groupMembershipKey) {
return setVarOnce(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_GROUPMEMBERSHIP_KEY,
groupMembershipKey);
}
public Builder userMembershipKey(String userMembershipKey) {
return setVarOnce(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_USERMEMBERSHIP_KEY,
userMembershipKey);
}
private Builder setVarOnce(HiveConf.ConfVars confVar, String value) {
Preconditions.checkState(!overrides.containsKey(confVar),
"Property %s has been set already", confVar);
overrides.put(confVar, value);
return this;
}
private void overrideHiveConf() {
conf.set("hive.root.logger", "DEBUG,console");
for (Map.Entry<HiveConf.ConfVars, String> entry : overrides.entrySet()) {
conf.setVar(entry.getKey(), entry.getValue());
}
}
public LdapAuthenticationTestCase build() {
Preconditions.checkState(conf == null,
"Test Case Builder should not be reused. Please create a new instance.");
conf = new HiveConf();
overrideHiveConf();
return new LdapAuthenticationTestCase(this);
}
}
}
| apache-2.0 |
lodengo/openfire_src | src/plugins/jitsivideobridge/src/java/org/ifsoft/rtp/Vp8Packet.java | 12033 | package org.ifsoft.rtp;
import java.util.ArrayList;
public class Vp8Packet
{
private Boolean _extendedControlBitsPresent;
private byte _keyIndex;
private Boolean _keyIndexPresent;
private Boolean _layerSync;
public static Integer _maxPacketSize = Integer.valueOf(1050);
private Boolean _nonReferenceFrame;
private byte _partitionId;
private byte _payload[];
private Short _pictureID;
private Boolean _pictureIDPresent;
private Boolean _startOfPartition;
private byte _temporalLayerIndex;
private Boolean _temporalLayerIndexPresent;
private byte _temporalLevelZeroIndex;
private Boolean _temporalLevelZeroIndexPresent;
public Vp8Packet()
{
_extendedControlBitsPresent = Boolean.valueOf(false);
_keyIndexPresent = Boolean.valueOf(false);
_layerSync = Boolean.valueOf(false);
_nonReferenceFrame = Boolean.valueOf(false);
_pictureID = Short.valueOf((short)0);
_pictureIDPresent = Boolean.valueOf(false);
_startOfPartition = Boolean.valueOf(false);
_temporalLayerIndexPresent = Boolean.valueOf(false);
_temporalLevelZeroIndexPresent = Boolean.valueOf(false);
}
public static byte[] depacketize(Vp8Packet packets[])
{
Integer num = Integer.valueOf(0);
Vp8Packet arr[] = packets;
int len = arr.length;
for(int i = 0; i < len; i++)
{
Vp8Packet packet = arr[i];
num = Integer.valueOf(num.intValue() + ArrayExtensions.getLength(packet.getPayload()).intValue());
}
Integer destinationIndex = Integer.valueOf(0);
byte destination[] = new byte[num.intValue()];
arr = packets;
len = arr.length;
for(int i = 0; i < len; i++)
{
Vp8Packet packet = arr[i];
BitAssistant.copy(packet.getPayload(), 0, destination, destinationIndex.intValue(), ArrayExtensions.getLength(packet.getPayload()).intValue());
destinationIndex = Integer.valueOf(destinationIndex.intValue() + ArrayExtensions.getLength(packet.getPayload()).intValue());
}
return destination;
}
public static byte[] getBytes(Vp8Packet packet)
{
ByteCollection bytes = new ByteCollection();
bytes.add((new Integer((packet.getExtendedControlBitsPresent().booleanValue() ? 0x80 : 0) | (packet.getNonReferenceFrame().booleanValue() ? 0x20 : 0) | (packet.getStartOfPartition().booleanValue() ? 0x10 : 0) | packet.getPartitionId() & 0xf)).byteValue());
if(packet.getExtendedControlBitsPresent().booleanValue())
{
bytes.add((new Integer((packet.getPictureIDPresent().booleanValue() ? 0x80 : 0) | (packet.getTemporalLevelZeroIndexPresent().booleanValue() ? 0x40 : 0) | (packet.getTemporalLayerIndexPresent().booleanValue() ? 0x20 : 0) | (packet.getKeyIndexPresent().booleanValue() ? 0x10 : 0))).byteValue());
if(packet.getPictureIDPresent().booleanValue())
{
byte shortBytesNetwork[] = BitAssistant.getShortBytesNetwork(packet.getPictureID());
bytes.add((new Integer(0x80 | shortBytesNetwork[0] & 0x7f)).byteValue());
bytes.add(shortBytesNetwork[1]);
}
if(packet.getTemporalLevelZeroIndexPresent().booleanValue())
bytes.add(packet.getTemporalLevelZeroIndex());
if(packet.getTemporalLayerIndexPresent().booleanValue() || packet.getKeyIndexPresent().booleanValue())
bytes.add((byte)(packet.getTemporalLayerIndex() << 6 & 0xc0 | (packet.getLayerSync().booleanValue() ? 0x20 : 0) | packet.getKeyIndex() & 0x1f));
}
bytes.addRange(packet.getPayload());
return bytes.toArray();
}
public byte[] getBytes()
{
return getBytes(this);
}
public Boolean getExtendedControlBitsPresent()
{
return _extendedControlBitsPresent;
}
public byte getKeyIndex()
{
return _keyIndex;
}
public Boolean getKeyIndexPresent()
{
return _keyIndexPresent;
}
public Boolean getLayerSync()
{
return _layerSync;
}
public Boolean getNonReferenceFrame()
{
return _nonReferenceFrame;
}
public byte getPartitionId()
{
return _partitionId;
}
public byte[] getPayload()
{
return _payload;
}
public Short getPictureID()
{
return _pictureID;
}
public Boolean getPictureIDPresent()
{
return _pictureIDPresent;
}
public Boolean getStartOfPartition()
{
return _startOfPartition;
}
public byte getTemporalLayerIndex()
{
return _temporalLayerIndex;
}
public Boolean getTemporalLayerIndexPresent()
{
return _temporalLayerIndexPresent;
}
public byte getTemporalLevelZeroIndex()
{
return _temporalLevelZeroIndex;
}
public Boolean getTemporalLevelZeroIndexPresent()
{
return _temporalLevelZeroIndexPresent;
}
public static Vp8Packet[] packetize(byte encodedData[])
{
Integer offset = Integer.valueOf(0);
ArrayList list = new ArrayList();
Integer num2 = Integer.valueOf(1049);
Integer num3 = new Integer((new Double(Math.ceil(new Double((new Double((new Integer(ArrayExtensions.getLength(encodedData).intValue())).doubleValue())).doubleValue() / (new Double((new Integer(num2.intValue())).doubleValue())).doubleValue())))).intValue());
if(num3.intValue() == 0)
num3 = Integer.valueOf(1);
Integer num4 = Integer.valueOf(ArrayExtensions.getLength(encodedData).intValue() / num3.intValue());
Integer num5 = Integer.valueOf(ArrayExtensions.getLength(encodedData).intValue() - num3.intValue() * num4.intValue());
for(Integer i = Integer.valueOf(0); i.intValue() < num3.intValue();)
{
Integer count = num4;
if(i.intValue() < num5.intValue())
{
Integer integer = count;
Integer integer1 = count = Integer.valueOf(count.intValue() + 1);
Integer _tmp = integer;
}
Vp8Packet item = new Vp8Packet();
item.setStartOfPartition(Boolean.valueOf(i.intValue() == 0));
item.setPayload(BitAssistant.subArray(encodedData, offset, count));
list.add(item);
offset = Integer.valueOf(offset.intValue() + count.intValue());
count = i;
i = Integer.valueOf(i.intValue() + 1);
Integer _tmp1 = count;
}
return (Vp8Packet[])list.toArray(new Vp8Packet[0]);
}
public static Vp8Packet parseBytes(byte packetBytes[])
{
Integer index = Integer.valueOf(0);
Vp8Packet packet = new Vp8Packet();
byte num2 = packetBytes[index.intValue()];
packet.setExtendedControlBitsPresent(Boolean.valueOf((num2 & 0x80) == 128));
packet.setNonReferenceFrame(Boolean.valueOf((num2 & 0x20) == 32));
packet.setStartOfPartition(Boolean.valueOf((num2 & 0x10) == 16));
packet.setPartitionId((byte)(num2 & 0xf));
Integer integer = index;
Integer integer1 = index = Integer.valueOf(index.intValue() + 1);
Integer _tmp = integer;
if(packet.getExtendedControlBitsPresent().booleanValue())
{
byte num3 = packetBytes[index.intValue()];
packet.setPictureIDPresent(Boolean.valueOf((num3 & 0x80) == 128));
packet.setTemporalLevelZeroIndexPresent(Boolean.valueOf((num3 & 0x40) == 64));
packet.setTemporalLayerIndexPresent(Boolean.valueOf((num3 & 0x20) == 32));
packet.setKeyIndexPresent(Boolean.valueOf((num3 & 0x10) == 16));
Integer integer2 = index;
Integer integer5 = index = Integer.valueOf(index.intValue() + 1);
Integer _tmp1 = integer2;
if(packet.getPictureIDPresent().booleanValue())
if((packetBytes[index.intValue()] & 0x80) == 128)
{
byte buffer[] = BitAssistant.subArray(packetBytes, index, Integer.valueOf(2));
buffer[0] = (byte)(buffer[0] & 0x7f);
packet.setPictureID(BitAssistant.toShortNetwork(buffer, Integer.valueOf(0)));
index = Integer.valueOf(index.intValue() + 2);
} else
{
byte buffer2[] = new byte[2];
buffer2[1] = packetBytes[index.intValue()];
packet.setPictureID(BitAssistant.toShortNetwork(buffer2, Integer.valueOf(0)));
Integer integer6 = index;
Integer integer9 = index = Integer.valueOf(index.intValue() + 1);
Integer _tmp2 = integer6;
}
if(packet.getTemporalLevelZeroIndexPresent().booleanValue())
{
packet.setTemporalLevelZeroIndex(packetBytes[index.intValue()]);
Integer integer3 = index;
Integer integer7 = index = Integer.valueOf(index.intValue() + 1);
Integer _tmp3 = integer3;
}
if(packet.getTemporalLayerIndexPresent().booleanValue() || packet.getKeyIndexPresent().booleanValue())
{
packet.setTemporalLayerIndex((byte)(packetBytes[index.intValue()] >> 6 & 3));
packet.setLayerSync(Boolean.valueOf((packetBytes[index.intValue()] & 0x20) == 32));
packet.setKeyIndex((byte)(packetBytes[index.intValue()] & 0x1f));
Integer integer4 = index;
Integer integer8 = index = Integer.valueOf(index.intValue() + 1);
Integer _tmp4 = integer4;
}
}
packet.setPayload(BitAssistant.subArray(packetBytes, index));
return packet;
}
private void setExtendedControlBitsPresent(Boolean value)
{
_extendedControlBitsPresent = value;
}
private void setKeyIndex(byte value)
{
_keyIndex = value;
}
private void setKeyIndexPresent(Boolean value)
{
_keyIndexPresent = value;
}
private void setLayerSync(Boolean value)
{
_layerSync = value;
}
private void setNonReferenceFrame(Boolean value)
{
_nonReferenceFrame = value;
}
private void setPartitionId(byte value)
{
_partitionId = value;
}
private void setPayload(byte value[])
{
_payload = value;
}
private void setPictureID(Short value)
{
_pictureID = value;
}
private void setPictureIDPresent(Boolean value)
{
_pictureIDPresent = value;
}
private void setStartOfPartition(Boolean value)
{
_startOfPartition = value;
}
private void setTemporalLayerIndex(byte value)
{
_temporalLayerIndex = value;
}
private void setTemporalLayerIndexPresent(Boolean value)
{
_temporalLayerIndexPresent = value;
}
private void setTemporalLevelZeroIndex(byte value)
{
_temporalLevelZeroIndex = value;
}
private void setTemporalLevelZeroIndexPresent(Boolean value)
{
_temporalLevelZeroIndexPresent = value;
}
public static Integer getSequenceNumberDelta(Integer sequenceNumber, Integer lastSequenceNumber)
{
Integer num = Integer.valueOf(sequenceNumber.intValue() - lastSequenceNumber.intValue());
if(num.intValue() < -32768)
return Integer.valueOf(num.intValue() + 65535);
if(num.intValue() > 32768)
num = Integer.valueOf(num.intValue() - 65535);
return num;
}
}
| apache-2.0 |
imikushin/rancher-compose | Godeps/_workspace/src/github.com/rancherio/go-rancher/client/generated_service_consume_map.go | 3712 | package client
const (
SERVICE_CONSUME_MAP_TYPE = "serviceConsumeMap"
)
type ServiceConsumeMap struct {
Resource
AccountId string `json:"accountId,omitempty" yaml:"account_id,omitempty"`
ConsumedServiceId string `json:"consumedServiceId,omitempty" yaml:"consumed_service_id,omitempty"`
Created string `json:"created,omitempty" yaml:"created,omitempty"`
Data map[string]interface{} `json:"data,omitempty" yaml:"data,omitempty"`
Description string `json:"description,omitempty" yaml:"description,omitempty"`
Kind string `json:"kind,omitempty" yaml:"kind,omitempty"`
Name string `json:"name,omitempty" yaml:"name,omitempty"`
RemoveTime string `json:"removeTime,omitempty" yaml:"remove_time,omitempty"`
Removed string `json:"removed,omitempty" yaml:"removed,omitempty"`
ServiceId string `json:"serviceId,omitempty" yaml:"service_id,omitempty"`
State string `json:"state,omitempty" yaml:"state,omitempty"`
Transitioning string `json:"transitioning,omitempty" yaml:"transitioning,omitempty"`
TransitioningMessage string `json:"transitioningMessage,omitempty" yaml:"transitioning_message,omitempty"`
TransitioningProgress int64 `json:"transitioningProgress,omitempty" yaml:"transitioning_progress,omitempty"`
Uuid string `json:"uuid,omitempty" yaml:"uuid,omitempty"`
}
type ServiceConsumeMapCollection struct {
Collection
Data []ServiceConsumeMap `json:"data,omitempty"`
}
type ServiceConsumeMapClient struct {
rancherClient *RancherClient
}
type ServiceConsumeMapOperations interface {
List(opts *ListOpts) (*ServiceConsumeMapCollection, error)
Create(opts *ServiceConsumeMap) (*ServiceConsumeMap, error)
Update(existing *ServiceConsumeMap, updates interface{}) (*ServiceConsumeMap, error)
ById(id string) (*ServiceConsumeMap, error)
Delete(container *ServiceConsumeMap) error
ActionCreate(*ServiceConsumeMap) (*ServiceConsumeMap, error)
ActionRemove(*ServiceConsumeMap) (*ServiceConsumeMap, error)
}
func newServiceConsumeMapClient(rancherClient *RancherClient) *ServiceConsumeMapClient {
return &ServiceConsumeMapClient{
rancherClient: rancherClient,
}
}
func (c *ServiceConsumeMapClient) Create(container *ServiceConsumeMap) (*ServiceConsumeMap, error) {
resp := &ServiceConsumeMap{}
err := c.rancherClient.doCreate(SERVICE_CONSUME_MAP_TYPE, container, resp)
return resp, err
}
func (c *ServiceConsumeMapClient) Update(existing *ServiceConsumeMap, updates interface{}) (*ServiceConsumeMap, error) {
resp := &ServiceConsumeMap{}
err := c.rancherClient.doUpdate(SERVICE_CONSUME_MAP_TYPE, &existing.Resource, updates, resp)
return resp, err
}
func (c *ServiceConsumeMapClient) List(opts *ListOpts) (*ServiceConsumeMapCollection, error) {
resp := &ServiceConsumeMapCollection{}
err := c.rancherClient.doList(SERVICE_CONSUME_MAP_TYPE, opts, resp)
return resp, err
}
func (c *ServiceConsumeMapClient) ById(id string) (*ServiceConsumeMap, error) {
resp := &ServiceConsumeMap{}
err := c.rancherClient.doById(SERVICE_CONSUME_MAP_TYPE, id, resp)
return resp, err
}
func (c *ServiceConsumeMapClient) Delete(container *ServiceConsumeMap) error {
return c.rancherClient.doResourceDelete(SERVICE_CONSUME_MAP_TYPE, &container.Resource)
}
func (c *ServiceConsumeMapClient) ActionCreate(resource *ServiceConsumeMap) (*ServiceConsumeMap, error) {
resp := &ServiceConsumeMap{}
err := c.rancherClient.doAction(SERVICE_CONSUME_MAP_TYPE, "create", &resource.Resource, nil, resp)
return resp, err
}
func (c *ServiceConsumeMapClient) ActionRemove(resource *ServiceConsumeMap) (*ServiceConsumeMap, error) {
resp := &ServiceConsumeMap{}
err := c.rancherClient.doAction(SERVICE_CONSUME_MAP_TYPE, "remove", &resource.Resource, nil, resp)
return resp, err
}
| apache-2.0 |
travismc1/azure-sdk-for-net | src/TrafficManagerManagement/Generated/TrafficManagerManagementClientExtensions.cs | 1357 | //
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Warning: This code was generated by a tool.
//
// Changes to this file may cause incorrect behavior and will be lost if the
// code is regenerated.
using System;
using System.Linq;
namespace Microsoft.WindowsAzure
{
/// <summary>
/// The Windows Azure Traffic Manager management API provides a RESTful set
/// of web services that interact with Windows Azure Traffic Manager
/// service for creating, updating, listing, and deleting Traffic Manager
/// profiles and definitions. (see
/// http://msdn.microsoft.com/en-us/library/windowsazure/dn166981.aspx for
/// more information)
/// </summary>
public static partial class TrafficManagerManagementClientExtensions
{
}
}
| apache-2.0 |
WIgor/hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/MapAttemptFinishedEvent.java | 9324 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.jobhistory;
import java.util.Set;
import org.apache.avro.util.Utf8;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapred.ProgressSplitsBlock;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.util.JobHistoryEventUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
/**
* Event to record successful completion of a map attempt
*
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class MapAttemptFinishedEvent implements HistoryEvent {
private MapAttemptFinished datum = null;
private TaskAttemptID attemptId;
private TaskType taskType;
private String taskStatus;
private long finishTime;
private String hostname;
private String rackName;
private int port;
private long mapFinishTime;
private String state;
private Counters counters;
int[][] allSplits;
int[] clockSplits;
int[] cpuUsages;
int[] vMemKbytes;
int[] physMemKbytes;
/**
* Create an event for successful completion of map attempts
* @param id Task Attempt ID
* @param taskType Type of the task
* @param taskStatus Status of the task
* @param mapFinishTime Finish time of the map phase
* @param finishTime Finish time of the attempt
* @param hostname Name of the host where the map executed
* @param port RPC port for the tracker host.
* @param rackName Name of the rack where the map executed
* @param state State string for the attempt
* @param counters Counters for the attempt
* @param allSplits the "splits", or a pixelated graph of various
* measurable worker node state variables against progress.
* Currently there are four; wallclock time, CPU time,
* virtual memory and physical memory.
*
* If you have no splits data, code {@code null} for this
* parameter.
*/
public MapAttemptFinishedEvent
(TaskAttemptID id, TaskType taskType, String taskStatus,
long mapFinishTime, long finishTime, String hostname, int port,
String rackName, String state, Counters counters, int[][] allSplits) {
this.attemptId = id;
this.taskType = taskType;
this.taskStatus = taskStatus;
this.mapFinishTime = mapFinishTime;
this.finishTime = finishTime;
this.hostname = hostname;
this.rackName = rackName;
this.port = port;
this.state = state;
this.counters = counters;
this.allSplits = allSplits;
this.clockSplits = ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
this.cpuUsages = ProgressSplitsBlock.arrayGetCPUTime(allSplits);
this.vMemKbytes = ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
this.physMemKbytes = ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
/**
* @deprecated please use the constructor with an additional
* argument, an array of splits arrays instead. See
* {@link org.apache.hadoop.mapred.ProgressSplitsBlock}
* for an explanation of the meaning of that parameter.
*
* Create an event for successful completion of map attempts
* @param id Task Attempt ID
* @param taskType Type of the task
* @param taskStatus Status of the task
* @param mapFinishTime Finish time of the map phase
* @param finishTime Finish time of the attempt
* @param hostname Name of the host where the map executed
* @param state State string for the attempt
* @param counters Counters for the attempt
*/
@Deprecated
public MapAttemptFinishedEvent
(TaskAttemptID id, TaskType taskType, String taskStatus,
long mapFinishTime, long finishTime, String hostname,
String state, Counters counters) {
this(id, taskType, taskStatus, mapFinishTime, finishTime, hostname, -1, "",
state, counters, null);
}
MapAttemptFinishedEvent() {}
public Object getDatum() {
if (datum == null) {
datum = new MapAttemptFinished();
datum.setTaskid(new Utf8(attemptId.getTaskID().toString()));
datum.setAttemptId(new Utf8(attemptId.toString()));
datum.setTaskType(new Utf8(taskType.name()));
datum.setTaskStatus(new Utf8(taskStatus));
datum.setMapFinishTime(mapFinishTime);
datum.setFinishTime(finishTime);
datum.setHostname(new Utf8(hostname));
datum.setPort(port);
if (rackName != null) {
datum.setRackname(new Utf8(rackName));
}
datum.setState(new Utf8(state));
datum.setCounters(EventWriter.toAvro(counters));
datum.setClockSplits(AvroArrayUtils.toAvro(ProgressSplitsBlock
.arrayGetWallclockTime(allSplits)));
datum.setCpuUsages(AvroArrayUtils.toAvro(ProgressSplitsBlock
.arrayGetCPUTime(allSplits)));
datum.setVMemKbytes(AvroArrayUtils.toAvro(ProgressSplitsBlock
.arrayGetVMemKbytes(allSplits)));
datum.setPhysMemKbytes(AvroArrayUtils.toAvro(ProgressSplitsBlock
.arrayGetPhysMemKbytes(allSplits)));
}
return datum;
}
public void setDatum(Object oDatum) {
this.datum = (MapAttemptFinished)oDatum;
this.attemptId = TaskAttemptID.forName(datum.getAttemptId().toString());
this.taskType = TaskType.valueOf(datum.getTaskType().toString());
this.taskStatus = datum.getTaskStatus().toString();
this.mapFinishTime = datum.getMapFinishTime();
this.finishTime = datum.getFinishTime();
this.hostname = datum.getHostname().toString();
this.rackName = datum.getRackname().toString();
this.port = datum.getPort();
this.state = datum.getState().toString();
this.counters = EventReader.fromAvro(datum.getCounters());
this.clockSplits = AvroArrayUtils.fromAvro(datum.getClockSplits());
this.cpuUsages = AvroArrayUtils.fromAvro(datum.getCpuUsages());
this.vMemKbytes = AvroArrayUtils.fromAvro(datum.getVMemKbytes());
this.physMemKbytes = AvroArrayUtils.fromAvro(datum.getPhysMemKbytes());
}
/** Get the task ID */
public TaskID getTaskId() { return attemptId.getTaskID(); }
/** Get the attempt id */
public TaskAttemptID getAttemptId() {
return attemptId;
}
/** Get the task type */
public TaskType getTaskType() {
return taskType;
}
/** Get the task status */
public String getTaskStatus() { return taskStatus.toString(); }
/** Get the map phase finish time */
public long getMapFinishTime() { return mapFinishTime; }
/** Get the attempt finish time */
public long getFinishTime() { return finishTime; }
/** Get the host name */
public String getHostname() { return hostname.toString(); }
/** Get the tracker rpc port */
public int getPort() { return port; }
/** Get the rack name */
public String getRackName() {
return rackName == null ? null : rackName.toString();
}
/** Get the state string */
public String getState() { return state.toString(); }
/** Get the counters */
Counters getCounters() { return counters; }
/** Get the event type */
public EventType getEventType() {
return EventType.MAP_ATTEMPT_FINISHED;
}
public int[] getClockSplits() {
return clockSplits;
}
public int[] getCpuUsages() {
return cpuUsages;
}
public int[] getVMemKbytes() {
return vMemKbytes;
}
public int[] getPhysMemKbytes() {
return physMemKbytes;
}
@Override
public TimelineEvent toTimelineEvent() {
TimelineEvent tEvent = new TimelineEvent();
tEvent.setId(StringUtils.toUpperCase(getEventType().name()));
tEvent.addInfo("TASK_TYPE", getTaskType().toString());
tEvent.addInfo("FINISH_TIME", getFinishTime());
tEvent.addInfo("STATUS", getTaskStatus());
tEvent.addInfo("STATE", getState());
tEvent.addInfo("MAP_FINISH_TIME", getMapFinishTime());
tEvent.addInfo("HOSTNAME", getHostname());
tEvent.addInfo("PORT", getPort());
tEvent.addInfo("RACK_NAME", getRackName());
tEvent.addInfo("ATTEMPT_ID", getAttemptId() == null ?
"" : getAttemptId().toString());
return tEvent;
}
@Override
public Set<TimelineMetric> getTimelineMetrics() {
Set<TimelineMetric> metrics = JobHistoryEventUtils
.countersToTimelineMetric(getCounters(), finishTime);
return metrics;
}
}
| apache-2.0 |
Miciah/origin | vendor/github.com/aws/aws-sdk-go/service/sns/errors.go | 5963 | // Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package sns
const (
// ErrCodeAuthorizationErrorException for service response error code
// "AuthorizationError".
//
// Indicates that the user has been denied access to the requested resource.
ErrCodeAuthorizationErrorException = "AuthorizationError"
// ErrCodeConcurrentAccessException for service response error code
// "ConcurrentAccess".
//
// Can't perform multiple operations on a tag simultaneously. Perform the operations
// sequentially.
ErrCodeConcurrentAccessException = "ConcurrentAccess"
// ErrCodeEndpointDisabledException for service response error code
// "EndpointDisabled".
//
// Exception error indicating endpoint disabled.
ErrCodeEndpointDisabledException = "EndpointDisabled"
// ErrCodeFilterPolicyLimitExceededException for service response error code
// "FilterPolicyLimitExceeded".
//
// Indicates that the number of filter polices in your AWS account exceeds the
// limit. To add more filter polices, submit an SNS Limit Increase case in the
// AWS Support Center.
ErrCodeFilterPolicyLimitExceededException = "FilterPolicyLimitExceeded"
// ErrCodeInternalErrorException for service response error code
// "InternalError".
//
// Indicates an internal service error.
ErrCodeInternalErrorException = "InternalError"
// ErrCodeInvalidParameterException for service response error code
// "InvalidParameter".
//
// Indicates that a request parameter does not comply with the associated constraints.
ErrCodeInvalidParameterException = "InvalidParameter"
// ErrCodeInvalidParameterValueException for service response error code
// "ParameterValueInvalid".
//
// Indicates that a request parameter does not comply with the associated constraints.
ErrCodeInvalidParameterValueException = "ParameterValueInvalid"
// ErrCodeInvalidSecurityException for service response error code
// "InvalidSecurity".
//
// The credential signature isn't valid. You must use an HTTPS endpoint and
// sign your request using Signature Version 4.
ErrCodeInvalidSecurityException = "InvalidSecurity"
// ErrCodeKMSAccessDeniedException for service response error code
// "KMSAccessDenied".
//
// The ciphertext references a key that doesn't exist or that you don't have
// access to.
ErrCodeKMSAccessDeniedException = "KMSAccessDenied"
// ErrCodeKMSDisabledException for service response error code
// "KMSDisabled".
//
// The request was rejected because the specified customer master key (CMK)
// isn't enabled.
ErrCodeKMSDisabledException = "KMSDisabled"
// ErrCodeKMSInvalidStateException for service response error code
// "KMSInvalidState".
//
// The request was rejected because the state of the specified resource isn't
// valid for this request. For more information, see How Key State Affects Use
// of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the AWS Key Management Service Developer Guide.
ErrCodeKMSInvalidStateException = "KMSInvalidState"
// ErrCodeKMSNotFoundException for service response error code
// "KMSNotFound".
//
// The request was rejected because the specified entity or resource can't be
// found.
ErrCodeKMSNotFoundException = "KMSNotFound"
// ErrCodeKMSOptInRequired for service response error code
// "KMSOptInRequired".
//
// The AWS access key ID needs a subscription for the service.
ErrCodeKMSOptInRequired = "KMSOptInRequired"
// ErrCodeKMSThrottlingException for service response error code
// "KMSThrottling".
//
// The request was denied due to request throttling. For more information about
// throttling, see Limits (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html#requests-per-second)
// in the AWS Key Management Service Developer Guide.
ErrCodeKMSThrottlingException = "KMSThrottling"
// ErrCodeNotFoundException for service response error code
// "NotFound".
//
// Indicates that the requested resource does not exist.
ErrCodeNotFoundException = "NotFound"
// ErrCodePlatformApplicationDisabledException for service response error code
// "PlatformApplicationDisabled".
//
// Exception error indicating platform application disabled.
ErrCodePlatformApplicationDisabledException = "PlatformApplicationDisabled"
// ErrCodeResourceNotFoundException for service response error code
// "ResourceNotFound".
//
// Can't tag resource. Verify that the topic exists.
ErrCodeResourceNotFoundException = "ResourceNotFound"
// ErrCodeStaleTagException for service response error code
// "StaleTag".
//
// A tag has been added to a resource with the same ARN as a deleted resource.
// Wait a short while and then retry the operation.
ErrCodeStaleTagException = "StaleTag"
// ErrCodeSubscriptionLimitExceededException for service response error code
// "SubscriptionLimitExceeded".
//
// Indicates that the customer already owns the maximum allowed number of subscriptions.
ErrCodeSubscriptionLimitExceededException = "SubscriptionLimitExceeded"
// ErrCodeTagLimitExceededException for service response error code
// "TagLimitExceeded".
//
// Can't add more than 50 tags to a topic.
ErrCodeTagLimitExceededException = "TagLimitExceeded"
// ErrCodeTagPolicyException for service response error code
// "TagPolicy".
//
// The request doesn't comply with the IAM tag policy. Correct your request
// and then retry it.
ErrCodeTagPolicyException = "TagPolicy"
// ErrCodeThrottledException for service response error code
// "Throttled".
//
// Indicates that the rate at which requests have been submitted for this action
// exceeds the limit for your account.
ErrCodeThrottledException = "Throttled"
// ErrCodeTopicLimitExceededException for service response error code
// "TopicLimitExceeded".
//
// Indicates that the customer already owns the maximum allowed number of topics.
ErrCodeTopicLimitExceededException = "TopicLimitExceeded"
)
| apache-2.0 |
RyanCavanaugh/TypeScript | tests/cases/fourslash/protoVarInContextualObjectLiteral.ts | 2177 | /// <reference path='fourslash.ts' />
////var o1 : {
//// __proto__: number;
//// p: number;
////} = {
//// /*1*/
//// };
////var o2: {
//// __proto__: number;
//// p: number;
////} = {
//// /*2*/
//// };
////var o3: {
//// "__proto__": number;
//// p: number;
////} = {
//// /*3*/
//// };
////var o4: {
//// "__proto__": number;
//// p: number;
////} = {
//// /*4*/
//// };
////var o5: {
//// __proto__: number;
//// ___proto__: string;
//// p: number;
////} = {
//// /*5*/
//// };
////var o6: {
//// __proto__: number;
//// ___proto__: string;
//// p: number;
////} = {
//// /*6*/
//// };
const tripleProto: FourSlashInterface.ExpectedCompletionEntry = { name: "___proto__", text: "(property) ___proto__: string" };
const proto: FourSlashInterface.ExpectedCompletionEntry = { name: "__proto__", text: "(property) __proto__: number" };
const protoQuoted: FourSlashInterface.ExpectedCompletionEntry = { name: "__proto__", text: '(property) "__proto__": number' };
const p: FourSlashInterface.ExpectedCompletionEntry = { name: "p", text: "(property) p: number" };
verify.completions({ marker: "1", exact: [proto, p] });
edit.insert('__proto__: 10,');
verify.completions({ exact: p });
verify.completions({ marker: "2", exact: [proto, p] });
edit.insert('"__proto__": 10,');
verify.completions({ exact: p });
verify.completions({ marker: "3", exact: [protoQuoted, p] })
edit.insert('__proto__: 10,');
verify.completions({ exact: p });
verify.completions({ marker: "4", exact: [protoQuoted, p] });
edit.insert('"__proto__": 10,');
verify.completions({ exact: p });
verify.completions({ marker: "5", exact: [proto, tripleProto, p] });
edit.insert('__proto__: 10,');
verify.completions({ exact: [tripleProto, p] });
edit.insert('"___proto__": "10",');
verify.completions({ exact: p });
verify.completions({ marker: "6", exact: [proto, tripleProto, p] });
edit.insert('___proto__: "10",');
verify.completions({ exact: [proto, p] });
edit.insert('"__proto__": 10,');
verify.completions({ exact: p });
| apache-2.0 |
davidroberts63/chutzpah | Facts/Testable.cs | 1201 | using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Moq;
using StructureMap.AutoMocking;
namespace Chutzpah.Facts
{
public class Testable<TClassUnderTest> where TClassUnderTest : class
{
protected MoqAutoMocker<TClassUnderTest> autoMocker = new MoqAutoMocker<TClassUnderTest>();
public Testable()
{
}
public Testable(Action<Testable<TClassUnderTest>> setup)
{
setup(this);
}
public Mock<TDependencyToMock> Mock<TDependencyToMock>() where TDependencyToMock : class
{
var a = autoMocker.Get<TDependencyToMock>();
return Moq.Mock.Get(a);
}
public void Inject<T>(T type)
{
autoMocker.Inject(type);
}
public void InjectArray<T>(T[] types)
{
autoMocker.InjectArray(types);
}
public void MockSelf()
{
autoMocker.PartialMockTheClassUnderTest();
}
public TClassUnderTest ClassUnderTest
{
get { return autoMocker.ClassUnderTest; }
}
}
}
| apache-2.0 |
ingvagabund/origin | vendor/gonum.org/v1/gonum/graph/graphs/gen/duplication.go | 3242 | // Copyright ©2015 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gen
import (
"fmt"
"math"
"sort"
"golang.org/x/exp/rand"
"gonum.org/v1/gonum/graph"
"gonum.org/v1/gonum/graph/internal/ordered"
)
// UndirectedMutator is an undirected graph builder that can remove edges.
type UndirectedMutator interface {
graph.UndirectedBuilder
graph.EdgeRemover
}
// Duplication constructs a graph in the destination, dst, of order n. New nodes
// are created by duplicating an existing node and all its edges. Each new edge is
// deleted with probability delta. Additional edges are added between the new node
// and existing nodes with probability alpha/|V|. An exception to this addition
// rule is made for the parent node when sigma is not NaN; in this case an edge is
// created with probability sigma. With the exception of the sigma parameter, this
// corresponds to the completely correlated case in doi:10.1016/S0022-5193(03)00028-6.
// If src is not nil it is used as the random source, otherwise rand.Float64 is used.
func Duplication(dst UndirectedMutator, n int, delta, alpha, sigma float64, src rand.Source) error {
// As described in doi:10.1016/S0022-5193(03)00028-6 but
// also clarified in doi:10.1186/gb-2007-8-4-r51.
if delta < 0 || delta > 1 {
return fmt.Errorf("gen: bad delta: delta=%v", delta)
}
if alpha <= 0 || alpha > 1 {
return fmt.Errorf("gen: bad alpha: alpha=%v", alpha)
}
if sigma < 0 || sigma > 1 {
return fmt.Errorf("gen: bad sigma: sigma=%v", sigma)
}
var (
rnd func() float64
rndN func(int) int
)
if src == nil {
rnd = rand.Float64
rndN = rand.Intn
} else {
r := rand.New(src)
rnd = r.Float64
rndN = r.Intn
}
nodes := graph.NodesOf(dst.Nodes())
sort.Sort(ordered.ByID(nodes))
if len(nodes) == 0 {
n--
u := dst.NewNode()
dst.AddNode(u)
nodes = append(nodes, u)
}
for i := 0; i < n; i++ {
u := nodes[rndN(len(nodes))]
d := dst.NewNode()
did := d.ID()
// Add the duplicate node.
dst.AddNode(d)
// Loop until we have connectivity
// into the rest of the graph.
for {
// Add edges to parent's neighbours.
to := graph.NodesOf(dst.From(u.ID()))
sort.Sort(ordered.ByID(to))
for _, v := range to {
vid := v.ID()
if rnd() < delta || dst.HasEdgeBetween(vid, did) {
continue
}
if vid < did {
dst.SetEdge(dst.NewEdge(v, d))
} else {
dst.SetEdge(dst.NewEdge(d, v))
}
}
// Add edges to old nodes.
scaledAlpha := alpha / float64(len(nodes))
for _, v := range nodes {
uid := u.ID()
vid := v.ID()
switch vid {
case uid:
if !math.IsNaN(sigma) {
if i == 0 || rnd() < sigma {
if vid < did {
dst.SetEdge(dst.NewEdge(v, d))
} else {
dst.SetEdge(dst.NewEdge(d, v))
}
}
continue
}
fallthrough
default:
if rnd() < scaledAlpha && !dst.HasEdgeBetween(vid, did) {
if vid < did {
dst.SetEdge(dst.NewEdge(v, d))
} else {
dst.SetEdge(dst.NewEdge(d, v))
}
}
}
}
if dst.From(did).Len() != 0 {
break
}
}
nodes = append(nodes, d)
}
return nil
}
| apache-2.0 |
cshannon/activemq-artemis | artemis-server/src/main/java/org/apache/activemq/artemis/core/persistence/impl/nullpm/NullStorageManager.java | 18140 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.artemis.core.persistence.impl.nullpm;
import javax.transaction.xa.Xid;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Executor;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.activemq.artemis.api.core.Message;
import org.apache.activemq.artemis.api.core.Pair;
import org.apache.activemq.artemis.api.core.SimpleString;
import org.apache.activemq.artemis.core.io.IOCallback;
import org.apache.activemq.artemis.core.io.IOCriticalErrorListener;
import org.apache.activemq.artemis.core.io.SequentialFile;
import org.apache.activemq.artemis.core.journal.Journal;
import org.apache.activemq.artemis.core.journal.JournalLoadInformation;
import org.apache.activemq.artemis.core.paging.PageTransactionInfo;
import org.apache.activemq.artemis.core.paging.PagedMessage;
import org.apache.activemq.artemis.core.paging.PagingManager;
import org.apache.activemq.artemis.core.paging.PagingStore;
import org.apache.activemq.artemis.core.paging.cursor.PagePosition;
import org.apache.activemq.artemis.core.persistence.AddressBindingInfo;
import org.apache.activemq.artemis.core.persistence.GroupingInfo;
import org.apache.activemq.artemis.core.persistence.OperationContext;
import org.apache.activemq.artemis.core.persistence.QueueBindingInfo;
import org.apache.activemq.artemis.core.persistence.QueueStatus;
import org.apache.activemq.artemis.core.persistence.StorageManager;
import org.apache.activemq.artemis.core.persistence.config.PersistedAddressSetting;
import org.apache.activemq.artemis.core.persistence.config.PersistedRoles;
import org.apache.activemq.artemis.core.persistence.impl.PageCountPending;
import org.apache.activemq.artemis.core.postoffice.Binding;
import org.apache.activemq.artemis.core.postoffice.PostOffice;
import org.apache.activemq.artemis.core.replication.ReplicationManager;
import org.apache.activemq.artemis.core.server.LargeServerMessage;
import org.apache.activemq.artemis.core.server.MessageReference;
import org.apache.activemq.artemis.core.server.RouteContextList;
import org.apache.activemq.artemis.core.server.files.FileStoreMonitor;
import org.apache.activemq.artemis.core.server.group.impl.GroupBinding;
import org.apache.activemq.artemis.core.server.impl.AddressInfo;
import org.apache.activemq.artemis.core.server.impl.JournalLoader;
import org.apache.activemq.artemis.core.transaction.ResourceManager;
import org.apache.activemq.artemis.core.transaction.Transaction;
public class NullStorageManager implements StorageManager {
private final AtomicLong idSequence = new AtomicLong(0);
private volatile boolean started;
private final IOCriticalErrorListener ioCriticalErrorListener;
public NullStorageManager(IOCriticalErrorListener ioCriticalErrorListener) {
this.ioCriticalErrorListener = ioCriticalErrorListener;
}
public NullStorageManager() {
this(new IOCriticalErrorListener() {
@Override
public void onIOException(Throwable code, String message, SequentialFile file) {
code.printStackTrace();
}
});
}
@Override
public void criticalError(Throwable error) {
}
@Override
public long storeQueueStatus(long queueID, QueueStatus status) throws Exception {
return 0;
}
@Override
public void deleteQueueStatus(long recordID) throws Exception {
}
@Override
public void injectMonitor(FileStoreMonitor monitor) throws Exception {
}
private static final OperationContext dummyContext = new OperationContext() {
@Override
public void onError(final int errorCode, final String errorMessage) {
}
@Override
public void done() {
}
@Override
public void executeOnCompletion(IOCallback runnable, boolean storeOnly) {
runnable.done();
}
@Override
public void storeLineUp() {
}
@Override
public boolean waitCompletion(final long timeout) throws Exception {
return true;
}
@Override
public void waitCompletion() throws Exception {
}
@Override
public void replicationLineUp() {
}
@Override
public void replicationDone() {
}
@Override
public void pageSyncLineUp() {
}
@Override
public void pageSyncDone() {
}
@Override
public void executeOnCompletion(final IOCallback runnable) {
runnable.done();
}
};
@Override
public void deleteQueueBinding(long tx, final long queueBindingID) throws Exception {
}
@Override
public void addAddressBinding(long tx, AddressInfo addressInfo) throws Exception {
}
@Override
public void deleteAddressBinding(long tx, long addressBindingID) throws Exception {
}
@Override
public void commit(final long txID) throws Exception {
}
@Override
public JournalLoadInformation loadBindingJournal(final List<QueueBindingInfo> queueBindingInfos,
final List<GroupingInfo> groupingInfos,
final List<AddressBindingInfo> addressBindingInfos) throws Exception {
return new JournalLoadInformation();
}
@Override
public void prepare(final long txID, final Xid xid) throws Exception {
}
@Override
public void rollback(final long txID) throws Exception {
}
@Override
public void rollbackBindings(final long txID) throws Exception {
}
@Override
public void commitBindings(final long txID) throws Exception {
}
@Override
public void storeReference(final long queueID, final long messageID, final boolean last) throws Exception {
}
@Override
public void storeReferenceTransactional(final long txID, final long queueID, final long messageID) throws Exception {
}
@Override
public void storeAcknowledge(final long queueID, final long messageID) throws Exception {
}
@Override
public void storeAcknowledgeTransactional(final long txID,
final long queueID,
final long messageiD) throws Exception {
}
@Override
public void deleteMessage(final long messageID) throws Exception {
}
@Override
public void storeMessage(final Message message) throws Exception {
}
@Override
public void storeMessageTransactional(final long txID, final Message message) throws Exception {
}
@Override
public void updateScheduledDeliveryTime(final MessageReference ref) throws Exception {
}
@Override
public void updateScheduledDeliveryTimeTransactional(final long txID, final MessageReference ref) throws Exception {
}
@Override
public void storePageTransaction(final long txID, final PageTransactionInfo pageTransaction) throws Exception {
}
@Override
public void updateDeliveryCount(final MessageReference ref) throws Exception {
}
@Override
public void storeDuplicateID(final SimpleString address, final byte[] duplID, final long recordID) throws Exception {
}
@Override
public void storeDuplicateIDTransactional(final long txID,
final SimpleString address,
final byte[] duplID,
final long recordID) throws Exception {
}
@Override
public void updateDuplicateIDTransactional(final long txID,
final SimpleString address,
final byte[] duplID,
final long recordID) throws Exception {
}
@Override
public long storeHeuristicCompletion(final Xid xid, final boolean isCommit) throws Exception {
return generateID();
}
@Override
public void deleteHeuristicCompletion(final long txID) throws Exception {
}
@Override
public void addQueueBinding(final long tx, final Binding binding) throws Exception {
}
@Override
public LargeServerMessage createLargeMessage() {
return new NullStorageLargeServerMessage();
}
@Override
public LargeServerMessage createLargeMessage(final long id, final Message message) {
NullStorageLargeServerMessage largeMessage = new NullStorageLargeServerMessage();
largeMessage.copyHeadersAndProperties(message);
largeMessage.setMessageID(id);
return largeMessage;
}
@Override
public long generateID() {
long id = idSequence.getAndIncrement();
return id;
}
@Override
public long getCurrentID() {
return idSequence.get();
}
@Override
public synchronized void start() throws Exception {
if (started) {
throw new IllegalStateException("Already started");
}
started = true;
}
@Override
public synchronized void stop() throws Exception {
if (!started) {
throw new IllegalStateException("Not started");
}
idSequence.set(0);
started = false;
}
@Override
public synchronized boolean isStarted() {
return started;
}
@Override
public JournalLoadInformation loadMessageJournal(final PostOffice postOffice,
final PagingManager pagingManager,
final ResourceManager resourceManager,
final Map<Long, QueueBindingInfo> queueInfos,
final Map<SimpleString, List<Pair<byte[], Long>>> duplicateIDMap,
final Set<Pair<Long, Long>> pendingLargeMessages,
List<PageCountPending> pendingNonTXPageCounter,
final JournalLoader journalLoader) throws Exception {
return new JournalLoadInformation();
}
@Override
public void deleteDuplicateIDTransactional(final long txID, final long recordID) throws Exception {
}
@Override
public void deleteDuplicateID(final long recordID) throws Exception {
}
@Override
public void pageClosed(final SimpleString storeName, final int pageNumber) {
}
@Override
public void pageDeleted(final SimpleString storeName, final int pageNumber) {
}
@Override
public void pageWrite(final PagedMessage message, final int pageNumber) {
}
@Override
public void addGrouping(final GroupBinding groupBinding) throws Exception {
}
@Override
public void deleteGrouping(final long tx, final GroupBinding groupBinding) throws Exception {
}
@Override
public boolean waitOnOperations(final long timeout) throws Exception {
return true;
}
@Override
public void afterCompleteOperations(final IOCallback run) {
run.done();
}
@Override
public void afterStoreOperations(IOCallback run) {
run.done();
}
@Override
public void waitOnOperations() throws Exception {
}
@Override
public OperationContext getContext() {
return NullStorageManager.dummyContext;
}
@Override
public OperationContext newContext(final Executor executor) {
return NullStorageManager.dummyContext;
}
@Override
public OperationContext newSingleThreadContext() {
return NullStorageManager.dummyContext;
}
@Override
public void setContext(final OperationContext context) {
}
@Override
public void clearContext() {
}
@Override
public List<PersistedAddressSetting> recoverAddressSettings() throws Exception {
return Collections.emptyList();
}
@Override
public void storeAddressSetting(final PersistedAddressSetting addressSetting) throws Exception {
}
@Override
public List<PersistedRoles> recoverPersistedRoles() throws Exception {
return Collections.emptyList();
}
@Override
public void storeSecurityRoles(final PersistedRoles persistedRoles) throws Exception {
}
@Override
public void deleteAddressSetting(final SimpleString addressMatch) throws Exception {
}
@Override
public void deleteSecurityRoles(final SimpleString addressMatch) throws Exception {
}
@Override
public void deletePageTransactional(final long recordID) throws Exception {
}
@Override
public void updatePageTransaction(final long txID,
final PageTransactionInfo pageTransaction,
final int depage) throws Exception {
}
@Override
public void storeCursorAcknowledge(final long queueID, final PagePosition position) {
}
@Override
public void storeCursorAcknowledgeTransactional(final long txID, final long queueID, final PagePosition position) {
}
@Override
public void deleteCursorAcknowledgeTransactional(final long txID, final long ackID) throws Exception {
}
@Override
public void deleteCursorAcknowledge(long ackID) throws Exception {
}
@Override
public void storePageCompleteTransactional(long txID, long queueID, PagePosition position) throws Exception {
}
@Override
public void deletePageComplete(long ackID) throws Exception {
}
@Override
public long storePageCounter(final long txID, final long queueID, final long value) throws Exception {
return 0;
}
@Override
public long storePendingCounter(long queueID, long pageID, int inc) throws Exception {
return -1;
}
@Override
public void deleteIncrementRecord(final long txID, final long recordID) throws Exception {
}
@Override
public void deletePageCounter(final long txID, final long recordID) throws Exception {
}
@Override
public void deletePendingPageCounter(long txID, long recordID) throws Exception {
}
@Override
public long storePageCounterInc(final long txID, final long queueID, final int add) throws Exception {
return 0;
}
@Override
public long storePageCounterInc(final long queueID, final int add) throws Exception {
return 0;
}
@Override
public void commit(final long txID, final boolean lineUpContext) throws Exception {
}
@Override
public void lineUpContext() {
}
@Override
public void confirmPendingLargeMessageTX(final Transaction transaction,
final long messageID,
final long recordID) throws Exception {
}
@Override
public void confirmPendingLargeMessage(final long recordID) throws Exception {
}
@Override
public void stop(final boolean ioCriticalError, boolean sendFailover) throws Exception {
}
@Override
public Journal getBindingsJournal() {
return null;
}
@Override
public Journal getMessageJournal() {
return null;
}
@Override
public void startReplication(final ReplicationManager replicationManager,
final PagingManager pagingManager,
final String nodeID,
final boolean autoFailBack,
long initialReplicationSyncTimeout) throws Exception {
// no-op
}
@Override
public boolean addToPage(PagingStore store,
Message msg,
Transaction tx,
RouteContextList listCtx) throws Exception {
/**
* Exposing the read-lock here is an encapsulation violation done in order to keep the code
* simpler. The alternative would be to add a second method, say 'verifyPaging', to
* PagingStore.
* <p>
* Adding this second method would also be more surprise prone as it would require a certain
* calling order.
* <p>
* The reasoning is that exposing the lock is more explicit and therefore `less bad`.
*/
return store.page(msg, tx, listCtx, null);
}
@Override
public void stopReplication() {
// no-op
}
@Override
public SequentialFile createFileForLargeMessage(final long messageID, final LargeMessageExtension extension) {
throw new UnsupportedOperationException();
}
@Override
public void addBytesToLargeMessage(SequentialFile appendFile, long messageID, byte[] bytes) throws Exception {
// no-op
}
@Override
public void beforePageRead() throws Exception {
}
@Override
public void afterPageRead() throws Exception {
}
@Override
public ByteBuffer allocateDirectBuffer(final int size) {
return ByteBuffer.allocateDirect(size);
}
@Override
public void freeDirectBuffer(final ByteBuffer buffer) {
// We can just have hope on GC here :-)
}
@Override
public void storeID(final long journalID, final long id) throws Exception {
// no-op
}
@Override
public void readLock() {
// no-op
}
@Override
public void readUnLock() {
// no-op
}
@Override
public void persistIdGenerator() {
// no-op
}
@Override
public void deleteID(long journalD) throws Exception {
}
}
| apache-2.0 |
google-code-export/protostuff | protostuff-json/src/test/java/com/dyuproject/protostuff/JsonXNumericRuntimeObjectSchemaTest.java | 1138 | //========================================================================
//Copyright 2007-2011 David Yu [email protected]
//------------------------------------------------------------------------
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//http://www.apache.org/licenses/LICENSE-2.0
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//========================================================================
package com.dyuproject.protostuff;
/**
* Test jsonx (numeric) ser/deser for runtime {@link Object} fields.
*
* @author David Yu
* @created Feb 4, 2011
*/
public class JsonXNumericRuntimeObjectSchemaTest extends JsonXRuntimeObjectSchemaTest
{
protected boolean isNumeric()
{
return true;
}
}
| apache-2.0 |
tnachen/kafka | core/src/main/scala/kafka/producer/Partitioner.scala | 1080 | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.producer
trait Partitioner[T] {
/**
* Uses the key to calculate a partition bucket id for routing
* the data to the appropriate broker partition
* @return an integer between 0 and numPartitions-1
*/
def partition(key: T, numPartitions: Int): Int
}
| apache-2.0 |
Stacey-Gammon/elasticsearch | test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java | 16767 | /*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations;
import org.apache.lucene.index.AssertingDirectoryReader;
import org.apache.lucene.index.CompositeReaderContext;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.AssertingIndexSearcher;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryCache;
import org.apache.lucene.search.QueryCachingPolicy;
import org.apache.lucene.search.Weight;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.MockBigArrays;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache.Listener;
import org.elasticsearch.index.cache.query.DisabledQueryCache;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.ContentPath;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper.BuilderContext;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.ObjectMapper;
import org.elasticsearch.index.mapper.ObjectMapper.Nested;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.query.support.NestedScope;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
import org.elasticsearch.mock.orig.Mockito;
import org.elasticsearch.search.fetch.FetchPhase;
import org.elasticsearch.search.fetch.subphase.DocValueFieldsFetchSubPhase;
import org.elasticsearch.search.fetch.subphase.FetchSourceSubPhase;
import org.elasticsearch.search.internal.ContextIndexSearcher;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.lookup.SearchLookup;
import org.elasticsearch.test.ESTestCase;
import org.junit.After;
import org.mockito.Matchers;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import static org.mockito.Matchers.anyObject;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
/**
* Base class for testing {@link Aggregator} implementations.
* Provides helpers for constructing and searching an {@link Aggregator} implementation based on a provided
* {@link AggregationBuilder} instance.
*/
public abstract class AggregatorTestCase extends ESTestCase {
private static final String NESTEDFIELD_PREFIX = "nested_";
private List<Releasable> releasables = new ArrayList<>();
/** Create a factory for the given aggregation builder. */
protected AggregatorFactory<?> createAggregatorFactory(AggregationBuilder aggregationBuilder,
IndexSearcher indexSearcher,
MappedFieldType... fieldTypes) throws IOException {
IndexSettings indexSettings = createIndexSettings();
SearchContext searchContext = createSearchContext(indexSearcher, indexSettings);
CircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService();
when(searchContext.bigArrays()).thenReturn(new MockBigArrays(Settings.EMPTY, circuitBreakerService));
// TODO: now just needed for top_hits, this will need to be revised for other agg unit tests:
MapperService mapperService = mapperServiceMock();
when(mapperService.getIndexSettings()).thenReturn(indexSettings);
when(mapperService.hasNested()).thenReturn(false);
when(searchContext.mapperService()).thenReturn(mapperService);
IndexFieldDataService ifds = new IndexFieldDataService(indexSettings,
new IndicesFieldDataCache(Settings.EMPTY, new IndexFieldDataCache.Listener() {
}), circuitBreakerService, mapperService);
when(searchContext.getForField(Mockito.any(MappedFieldType.class))).thenAnswer(new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
return ifds.getForField((MappedFieldType) invocationOnMock.getArguments()[0]);
}
});
SearchLookup searchLookup = new SearchLookup(mapperService, ifds::getForField, new String[]{"type"});
when(searchContext.lookup()).thenReturn(searchLookup);
QueryShardContext queryShardContext = queryShardContextMock(mapperService, fieldTypes, circuitBreakerService);
when(searchContext.getQueryShardContext()).thenReturn(queryShardContext);
for (MappedFieldType fieldType : fieldTypes) {
when(searchContext.smartNameFieldType(fieldType.name())).thenReturn(fieldType);
}
return aggregationBuilder.build(searchContext, null);
}
protected <A extends Aggregator> A createAggregator(AggregationBuilder aggregationBuilder,
IndexSearcher indexSearcher,
MappedFieldType... fieldTypes) throws IOException {
@SuppressWarnings("unchecked")
A aggregator = (A) createAggregatorFactory(aggregationBuilder, indexSearcher, fieldTypes).create(null, true);
return aggregator;
}
protected SearchContext createSearchContext(IndexSearcher indexSearcher, IndexSettings indexSettings) {
Engine.Searcher searcher = new Engine.Searcher("aggregator_test", indexSearcher);
QueryCache queryCache = new DisabledQueryCache(indexSettings);
QueryCachingPolicy queryCachingPolicy = new QueryCachingPolicy() {
@Override
public void onUse(Query query) {
}
@Override
public boolean shouldCache(Query query) throws IOException {
// never cache a query
return false;
}
};
ContextIndexSearcher contextIndexSearcher = new ContextIndexSearcher(searcher, queryCache, queryCachingPolicy);
SearchContext searchContext = mock(SearchContext.class);
when(searchContext.numberOfShards()).thenReturn(1);
when(searchContext.searcher()).thenReturn(contextIndexSearcher);
when(searchContext.fetchPhase())
.thenReturn(new FetchPhase(Arrays.asList(new FetchSourceSubPhase(), new DocValueFieldsFetchSubPhase())));
when(searchContext.getObjectMapper(anyString())).thenAnswer(invocation -> {
String fieldName = (String) invocation.getArguments()[0];
if (fieldName.startsWith(NESTEDFIELD_PREFIX)) {
BuilderContext context = new BuilderContext(indexSettings.getSettings(), new ContentPath());
return new ObjectMapper.Builder<>(fieldName).nested(Nested.newNested(false, false)).build(context);
}
return null;
});
when(searchContext.bitsetFilterCache()).thenReturn(new BitsetFilterCache(indexSettings, mock(Listener.class)));
doAnswer(invocation -> {
/* Store the releasables so we can release them at the end of the test case. This is important because aggregations don't
* close their sub-aggregations. This is fairly similar to what the production code does. */
releasables.add((Releasable) invocation.getArguments()[0]);
return null;
}).when(searchContext).addReleasable(anyObject(), anyObject());
return searchContext;
}
protected IndexSettings createIndexSettings() {
return new IndexSettings(
IndexMetaData.builder("_index").settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
.numberOfShards(1)
.numberOfReplicas(0)
.creationDate(System.currentTimeMillis())
.build(),
Settings.EMPTY
);
}
/**
* sub-tests that need a more complex mock can overwrite this
*/
protected MapperService mapperServiceMock() {
return mock(MapperService.class);
}
/**
* sub-tests that need a more complex mock can overwrite this
*/
protected QueryShardContext queryShardContextMock(MapperService mapperService, MappedFieldType[] fieldTypes,
CircuitBreakerService circuitBreakerService) {
QueryShardContext queryShardContext = mock(QueryShardContext.class);
when(queryShardContext.getMapperService()).thenReturn(mapperService);
for (MappedFieldType fieldType : fieldTypes) {
when(queryShardContext.fieldMapper(fieldType.name())).thenReturn(fieldType);
when(queryShardContext.getForField(fieldType)).then(invocation -> fieldType.fielddataBuilder(mapperService.getIndexSettings()
.getIndex().getName())
.build(mapperService.getIndexSettings(), fieldType, new IndexFieldDataCache.None(), circuitBreakerService, mapperService));
}
NestedScope nestedScope = new NestedScope();
when(queryShardContext.isFilter()).thenCallRealMethod();
Mockito.doCallRealMethod().when(queryShardContext).setIsFilter(Matchers.anyBoolean());
when(queryShardContext.nestedScope()).thenReturn(nestedScope);
return queryShardContext;
}
protected <A extends InternalAggregation, C extends Aggregator> A search(IndexSearcher searcher,
Query query,
AggregationBuilder builder,
MappedFieldType... fieldTypes) throws IOException {
C a = createAggregator(builder, searcher, fieldTypes);
a.preCollection();
searcher.search(query, a);
a.postCollection();
@SuppressWarnings("unchecked")
A internalAgg = (A) a.buildAggregation(0L);
return internalAgg;
}
/**
* Divides the provided {@link IndexSearcher} in sub-searcher, one for each segment,
* builds an aggregator for each sub-searcher filtered by the provided {@link Query} and
* returns the reduced {@link InternalAggregation}.
*/
protected <A extends InternalAggregation, C extends Aggregator> A searchAndReduce(IndexSearcher searcher,
Query query,
AggregationBuilder builder,
MappedFieldType... fieldTypes) throws IOException {
final IndexReaderContext ctx = searcher.getTopReaderContext();
final ShardSearcher[] subSearchers;
if (ctx instanceof LeafReaderContext) {
subSearchers = new ShardSearcher[1];
subSearchers[0] = new ShardSearcher((LeafReaderContext) ctx, ctx);
} else {
final CompositeReaderContext compCTX = (CompositeReaderContext) ctx;
final int size = compCTX.leaves().size();
subSearchers = new ShardSearcher[size];
for(int searcherIDX=0;searcherIDX<subSearchers.length;searcherIDX++) {
final LeafReaderContext leave = compCTX.leaves().get(searcherIDX);
subSearchers[searcherIDX] = new ShardSearcher(leave, compCTX);
}
}
List<InternalAggregation> aggs = new ArrayList<> ();
Query rewritten = searcher.rewrite(query);
Weight weight = searcher.createWeight(rewritten, true, 1f);
C root = createAggregator(builder, searcher, fieldTypes);
for (ShardSearcher subSearcher : subSearchers) {
C a = createAggregator(builder, subSearcher, fieldTypes);
a.preCollection();
subSearcher.search(weight, a);
a.postCollection();
aggs.add(a.buildAggregation(0L));
}
if (aggs.isEmpty()) {
return null;
} else {
if (randomBoolean() && aggs.size() > 1) {
// sometimes do an incremental reduce
int toReduceSize = aggs.size();
Collections.shuffle(aggs, random());
int r = randomIntBetween(1, toReduceSize);
List<InternalAggregation> toReduce = aggs.subList(0, r);
A reduced = (A) aggs.get(0).doReduce(toReduce,
new InternalAggregation.ReduceContext(root.context().bigArrays(), null, false));
aggs = new ArrayList<>(aggs.subList(r, toReduceSize));
aggs.add(reduced);
}
// now do the final reduce
@SuppressWarnings("unchecked")
A internalAgg = (A) aggs.get(0).doReduce(aggs, new InternalAggregation.ReduceContext(root.context().bigArrays(), null,
true));
return internalAgg;
}
}
private static class ShardSearcher extends IndexSearcher {
private final List<LeafReaderContext> ctx;
ShardSearcher(LeafReaderContext ctx, IndexReaderContext parent) {
super(parent);
this.ctx = Collections.singletonList(ctx);
}
public void search(Weight weight, Collector collector) throws IOException {
search(ctx, weight, collector);
}
@Override
public String toString() {
return "ShardSearcher(" + ctx.get(0) + ")";
}
}
protected static DirectoryReader wrap(DirectoryReader directoryReader) throws IOException {
return ElasticsearchDirectoryReader.wrap(directoryReader, new ShardId(new Index("_index", "_na_"), 0));
}
/**
* Added to randomly run with more assertions on the index searcher level,
* like {@link org.apache.lucene.util.LuceneTestCase#newSearcher(IndexReader)}, which can't be used because it also
* wraps in the IndexSearcher's IndexReader with other implementations that we can't handle. (e.g. ParallelCompositeReader)
*/
protected static IndexSearcher newIndexSearcher(IndexReader indexReader) {
if (randomBoolean()) {
return new AssertingIndexSearcher(random(), indexReader);
} else {
return new IndexSearcher(indexReader);
}
}
/**
* Added to randomly run with more assertions on the index reader level,
* like {@link org.apache.lucene.util.LuceneTestCase#wrapReader(IndexReader)}, which can't be used because it also
* wraps in the IndexReader with other implementations that we can't handle. (e.g. ParallelCompositeReader)
*/
protected static IndexReader maybeWrapReaderEs(DirectoryReader reader) throws IOException {
if (randomBoolean()) {
return new AssertingDirectoryReader(reader);
} else {
return reader;
}
}
@After
private void cleanupReleasables() {
Releasables.close(releasables);
releasables.clear();
}
}
| apache-2.0 |
guo7711/offline-editor-js | lib/tiles/FileSaver.js | 10047 | /* FileSaver.js
* A saveAs() FileSaver implementation.
* 2013-10-21
*
* By Eli Grey, http://eligrey.com
* License: X11/MIT
* See LICENSE.md
*/
/*global self */
/*jslint bitwise: true, regexp: true, confusion: true, es5: true, vars: true, white: true,
plusplus: true */
/*! @source http://purl.eligrey.com/github/FileSaver.js/blob/master/FileSaver.js */
O.esri.Tiles.saveAs =
// IE 10 support, see Eli Grey's original source
// || (typeof navigator !== 'undefined' && navigator.msSaveOrOpenBlob && navigator.msSaveOrOpenBlob.bind(navigator))
function(view) {
"use strict";
var
doc = view.document
// only get URL when necessary in case BlobBuilder.js hasn't overridden it yet
, get_URL = function() {
return view.URL || view.webkitURL || view;
}
, URL = view.URL || view.webkitURL || view
, save_link = doc.createElementNS("http://www.w3.org/1999/xhtml", "a")
, can_use_save_link = !view.externalHost && "download" in save_link
, click = function(node) {
var event = doc.createEvent("MouseEvents");
event.initMouseEvent(
"click", true, false, view, 0, 0, 0, 0, 0
, false, false, false, false, 0, null
);
node.dispatchEvent(event);
}
, webkit_req_fs = view.webkitRequestFileSystem
, req_fs = view.requestFileSystem || webkit_req_fs || view.mozRequestFileSystem
, throw_outside = function (ex) {
(view.setImmediate || view.setTimeout)(function() {
throw ex;
}, 0);
}
, force_saveable_type = "application/octet-stream"
, fs_min_size = 0
, deletion_queue = []
, process_deletion_queue = function() {
var i = deletion_queue.length;
while (i--) {
var file = deletion_queue[i];
if (typeof file === "string") { // file is an object URL
URL.revokeObjectURL(file);
} else { // file is a File
file.remove();
}
}
deletion_queue.length = 0; // clear queue
}
, dispatch = function(filesaver, event_types, event) {
event_types = [].concat(event_types);
var i = event_types.length;
while (i--) {
var listener = filesaver["on" + event_types[i]];
if (typeof listener === "function") {
try {
listener.call(filesaver, event || filesaver);
} catch (ex) {
throw_outside(ex);
}
}
}
}
, FileSaver = function(blob, name) {
// First try a.download, then web filesystem, then object URLs
var
filesaver = this
, type = blob.type
, blob_changed = false
, object_url
, target_view
, get_object_url = function() {
var object_url = get_URL().createObjectURL(blob);
deletion_queue.push(object_url);
return object_url;
}
, dispatch_all = function() {
dispatch(filesaver, "writestart progress write writeend".split(" "));
}
// on any filesys errors revert to saving with object URLs
, fs_error = function() {
// don't create more object URLs than needed
if (blob_changed || !object_url) {
object_url = get_object_url(blob);
}
if (target_view) {
target_view.location.href = object_url;
} else {
window.open(object_url, "_blank");
}
filesaver.readyState = filesaver.DONE;
dispatch_all();
}
, abortable = function(func) {
return function() {
if (filesaver.readyState !== filesaver.DONE) {
return func.apply(this, arguments);
}
};
}
, create_if_not_found = {create: true, exclusive: false}
, slice
;
filesaver.readyState = filesaver.INIT;
if (!name) {
name = "download";
}
if (can_use_save_link) {
object_url = get_object_url(blob);
// FF for Android has a nasty garbage collection mechanism
// that turns all objects that are not pure javascript into 'deadObject'
// this means `doc` and `save_link` are unusable and need to be recreated
// `view` is usable though:
doc = view.document;
save_link = doc.createElementNS("http://www.w3.org/1999/xhtml", "a");
save_link.href = object_url;
save_link.download = name;
var event = doc.createEvent("MouseEvents");
event.initMouseEvent(
"click", true, false, view, 0, 0, 0, 0, 0
, false, false, false, false, 0, null
);
save_link.dispatchEvent(event);
filesaver.readyState = filesaver.DONE;
dispatch_all();
return;
}
// Object and web filesystem URLs have a problem saving in Google Chrome when
// viewed in a tab, so I force save with application/octet-stream
// http://code.google.com/p/chromium/issues/detail?id=91158
if (view.chrome && type && type !== force_saveable_type) {
slice = blob.slice || blob.webkitSlice;
blob = slice.call(blob, 0, blob.size, force_saveable_type);
blob_changed = true;
}
// Since I can't be sure that the guessed media type will trigger a download
// in WebKit, I append .download to the filename.
// https://bugs.webkit.org/show_bug.cgi?id=65440
if (webkit_req_fs && name !== "download") {
name += ".download";
}
if (type === force_saveable_type || webkit_req_fs) {
target_view = view;
}
if (!req_fs) {
fs_error();
return;
}
fs_min_size += blob.size;
req_fs(view.TEMPORARY, fs_min_size, abortable(function(fs) {
fs.root.getDirectory("saved", create_if_not_found, abortable(function(dir) {
var save = function() {
dir.getFile(name, create_if_not_found, abortable(function(file) {
file.createWriter(abortable(function(writer) {
writer.onwriteend = function(event) {
target_view.location.href = file.toURL();
deletion_queue.push(file);
filesaver.readyState = filesaver.DONE;
dispatch(filesaver, "writeend", event);
};
writer.onerror = function() {
var error = writer.error;
if (error.code !== error.ABORT_ERR) {
fs_error();
}
};
"writestart progress write abort".split(" ").forEach(function(event) {
writer["on" + event] = filesaver["on" + event];
});
writer.write(blob);
filesaver.abort = function() {
writer.abort();
filesaver.readyState = filesaver.DONE;
};
filesaver.readyState = filesaver.WRITING;
}), fs_error);
}), fs_error);
};
dir.getFile(name, {create: false}, abortable(function(file) {
// delete file if it already exists
file.remove();
save();
}), abortable(function(ex) {
if (ex.code === ex.NOT_FOUND_ERR) {
save();
} else {
fs_error();
}
}));
}), fs_error);
}), fs_error);
}
, FS_proto = FileSaver.prototype
, saveAs = function(blob, name) {
return new FileSaver(blob, name);
}
;
FS_proto.abort = function() {
var filesaver = this;
filesaver.readyState = filesaver.DONE;
dispatch(filesaver, "abort");
};
FS_proto.readyState = FS_proto.INIT = 0;
FS_proto.WRITING = 1;
FS_proto.DONE = 2;
FS_proto.error =
FS_proto.onwritestart =
FS_proto.onprogress =
FS_proto.onwrite =
FS_proto.onabort =
FS_proto.onerror =
FS_proto.onwriteend =
null;
view.addEventListener("unload", process_deletion_queue, false);
return saveAs;
}(this.self || this.window || this.content);
// `self` is undefined in Firefox for Android content script context
// while `this` is nsIContentFrameMessageManager
// with an attribute `content` that corresponds to the window
//if (typeof module !== 'undefined') module.exports = saveAs;
| apache-2.0 |
shuainie/azure-sdk-for-net | src/DataFactoryManagement/Generated/IDataFactoryOperations.cs | 5999 | //
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Warning: This code was generated by a tool.
//
// Changes to this file may cause incorrect behavior and will be lost if the
// code is regenerated.
using System;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.Azure.Management.DataFactories.Models;
using Microsoft.WindowsAzure;
namespace Microsoft.Azure.Management.DataFactories
{
/// <summary>
/// Operations for managing data factories.
/// </summary>
public partial interface IDataFactoryOperations
{
/// <summary>
/// Create or update a data factory.
/// </summary>
/// <param name='resourceGroupName'>
/// The resource group name of the data factory.
/// </param>
/// <param name='parameters'>
/// The parameters required to create or update a data factory.
/// </param>
/// <param name='cancellationToken'>
/// Cancellation token.
/// </param>
/// <returns>
/// The create or update data factory operation response.
/// </returns>
Task<DataFactoryCreateOrUpdateResponse> BeginCreateOrUpdateAsync(string resourceGroupName, DataFactoryCreateOrUpdateParameters parameters, CancellationToken cancellationToken);
/// <summary>
/// Create or update a data factory.
/// </summary>
/// <param name='resourceGroupName'>
/// The resource group name of the data factory.
/// </param>
/// <param name='parameters'>
/// The parameters required to create or update a data factory.
/// </param>
/// <param name='cancellationToken'>
/// Cancellation token.
/// </param>
/// <returns>
/// The create or update data factory operation response.
/// </returns>
Task<DataFactoryCreateOrUpdateResponse> CreateOrUpdateAsync(string resourceGroupName, DataFactoryCreateOrUpdateParameters parameters, CancellationToken cancellationToken);
/// <summary>
/// Delete a data factory instance.
/// </summary>
/// <param name='resourceGroupName'>
/// The resource group name of the data factory.
/// </param>
/// <param name='dataFactoryName'>
/// A unique data factory instance name.
/// </param>
/// <param name='cancellationToken'>
/// Cancellation token.
/// </param>
/// <returns>
/// A standard service response including an HTTP status code and
/// request ID.
/// </returns>
Task<OperationResponse> DeleteAsync(string resourceGroupName, string dataFactoryName, CancellationToken cancellationToken);
/// <summary>
/// Gets a data factory instance.
/// </summary>
/// <param name='resourceGroupName'>
/// The resource group name of the data factory.
/// </param>
/// <param name='dataFactoryName'>
/// A unique data factory instance name.
/// </param>
/// <param name='cancellationToken'>
/// Cancellation token.
/// </param>
/// <returns>
/// The Get data factory operation response.
/// </returns>
Task<DataFactoryGetResponse> GetAsync(string resourceGroupName, string dataFactoryName, CancellationToken cancellationToken);
/// <summary>
/// The Get Operation Status operation returns the status of the
/// specified operation. After calling an asynchronous operation, you
/// can call Get Operation Status to determine whether the operation
/// has succeeded, failed, or is still in progress.
/// </summary>
/// <param name='operationStatusLink'>
/// Location value returned by the Begin operation.
/// </param>
/// <param name='cancellationToken'>
/// Cancellation token.
/// </param>
/// <returns>
/// The create or update data factory operation response.
/// </returns>
Task<DataFactoryCreateOrUpdateResponse> GetCreateOrUpdateStatusAsync(string operationStatusLink, CancellationToken cancellationToken);
/// <summary>
/// Gets the first page of data factory instances with the link to the
/// next page.
/// </summary>
/// <param name='resourceGroupName'>
/// The resource group name of the data factories.
/// </param>
/// <param name='cancellationToken'>
/// Cancellation token.
/// </param>
/// <returns>
/// The List data factories operation response.
/// </returns>
Task<DataFactoryListResponse> ListAsync(string resourceGroupName, CancellationToken cancellationToken);
/// <summary>
/// Gets the next page of data factory instances with the link to the
/// next page.
/// </summary>
/// <param name='nextLink'>
/// The url to the next data factories page.
/// </param>
/// <param name='cancellationToken'>
/// Cancellation token.
/// </param>
/// <returns>
/// The List data factories operation response.
/// </returns>
Task<DataFactoryListResponse> ListNextAsync(string nextLink, CancellationToken cancellationToken);
}
}
| apache-2.0 |
alivecor/tensorflow | tensorflow/contrib/seq2seq/python/ops/helper.py | 26000 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library of helpers for use with SamplingDecoders.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops.distributions import bernoulli
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.util import nest
__all__ = [
"Helper",
"TrainingHelper",
"GreedyEmbeddingHelper",
"SampleEmbeddingHelper",
"CustomHelper",
"ScheduledEmbeddingTrainingHelper",
"ScheduledOutputTrainingHelper",
"InferenceHelper",
]
_transpose_batch_time = decoder._transpose_batch_time # pylint: disable=protected-access
def _unstack_ta(inp):
return tensor_array_ops.TensorArray(
dtype=inp.dtype, size=array_ops.shape(inp)[0],
element_shape=inp.get_shape()[1:]).unstack(inp)
@six.add_metaclass(abc.ABCMeta)
class Helper(object):
"""Interface for implementing sampling in seq2seq decoders.
Helper instances are used by `BasicDecoder`.
"""
@abc.abstractproperty
def batch_size(self):
"""Batch size of tensor returned by `sample`.
Returns a scalar int32 tensor.
"""
raise NotImplementedError("batch_size has not been implemented")
@abc.abstractproperty
def sample_ids_shape(self):
"""Shape of tensor returned by `sample`, excluding the batch dimension.
Returns a `TensorShape`.
"""
raise NotImplementedError("sample_ids_shape has not been implemented")
@abc.abstractproperty
def sample_ids_dtype(self):
"""DType of tensor returned by `sample`.
Returns a DType.
"""
raise NotImplementedError("sample_ids_dtype has not been implemented")
@abc.abstractmethod
def initialize(self, name=None):
"""Returns `(initial_finished, initial_inputs)`."""
pass
@abc.abstractmethod
def sample(self, time, outputs, state, name=None):
"""Returns `sample_ids`."""
pass
@abc.abstractmethod
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""Returns `(finished, next_inputs, next_state)`."""
pass
class CustomHelper(Helper):
"""Base abstract class that allows the user to customize sampling."""
def __init__(self, initialize_fn, sample_fn, next_inputs_fn,
sample_ids_shape=None, sample_ids_dtype=None):
"""Initializer.
Args:
initialize_fn: callable that returns `(finished, next_inputs)`
for the first iteration.
sample_fn: callable that takes `(time, outputs, state)`
and emits tensor `sample_ids`.
next_inputs_fn: callable that takes `(time, outputs, state, sample_ids)`
and emits `(finished, next_inputs, next_state)`.
sample_ids_shape: Either a list of integers, or a 1-D Tensor of type
`int32`, the shape of each value in the `sample_ids` batch. Defaults to
a scalar.
sample_ids_dtype: The dtype of the `sample_ids` tensor. Defaults to int32.
"""
self._initialize_fn = initialize_fn
self._sample_fn = sample_fn
self._next_inputs_fn = next_inputs_fn
self._batch_size = None
self._sample_ids_shape = tensor_shape.TensorShape(sample_ids_shape or [])
self._sample_ids_dtype = sample_ids_dtype or dtypes.int32
@property
def batch_size(self):
if self._batch_size is None:
raise ValueError("batch_size accessed before initialize was called")
return self._batch_size
@property
def sample_ids_shape(self):
return self._sample_ids_shape
@property
def sample_ids_dtype(self):
return self._sample_ids_dtype
def initialize(self, name=None):
with ops.name_scope(name, "%sInitialize" % type(self).__name__):
(finished, next_inputs) = self._initialize_fn()
if self._batch_size is None:
self._batch_size = array_ops.size(finished)
return (finished, next_inputs)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(
name, "%sSample" % type(self).__name__, (time, outputs, state)):
return self._sample_fn(time=time, outputs=outputs, state=state)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(
name, "%sNextInputs" % type(self).__name__, (time, outputs, state)):
return self._next_inputs_fn(
time=time, outputs=outputs, state=state, sample_ids=sample_ids)
class TrainingHelper(Helper):
"""A helper for use during training. Only reads inputs.
Returned sample_ids are the argmax of the RNN output logits.
"""
def __init__(self, inputs, sequence_length, time_major=False, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
name: Name scope for any created operations.
Raises:
ValueError: if `sequence_length` is not a 1D tensor.
"""
with ops.name_scope(name, "TrainingHelper", [inputs, sequence_length]):
inputs = ops.convert_to_tensor(inputs, name="inputs")
if not time_major:
inputs = nest.map_structure(_transpose_batch_time, inputs)
self._input_tas = nest.map_structure(_unstack_ta, inputs)
self._sequence_length = ops.convert_to_tensor(
sequence_length, name="sequence_length")
if self._sequence_length.get_shape().ndims != 1:
raise ValueError(
"Expected sequence_length to be a vector, but received shape: %s" %
self._sequence_length.get_shape())
self._zero_inputs = nest.map_structure(
lambda inp: array_ops.zeros_like(inp[0, :]), inputs)
self._batch_size = array_ops.size(sequence_length)
@property
def batch_size(self):
return self._batch_size
@property
def sample_ids_shape(self):
return tensor_shape.TensorShape([])
@property
def sample_ids_dtype(self):
return dtypes.int32
def initialize(self, name=None):
with ops.name_scope(name, "TrainingHelperInitialize"):
finished = math_ops.equal(0, self._sequence_length)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: self._zero_inputs,
lambda: nest.map_structure(lambda inp: inp.read(0), self._input_tas))
return (finished, next_inputs)
def sample(self, time, outputs, name=None, **unused_kwargs):
with ops.name_scope(name, "TrainingHelperSample", [time, outputs]):
sample_ids = math_ops.cast(
math_ops.argmax(outputs, axis=-1), dtypes.int32)
return sample_ids
def next_inputs(self, time, outputs, state, name=None, **unused_kwargs):
"""next_inputs_fn for TrainingHelper."""
with ops.name_scope(name, "TrainingHelperNextInputs",
[time, outputs, state]):
next_time = time + 1
finished = (next_time >= self._sequence_length)
all_finished = math_ops.reduce_all(finished)
def read_from_ta(inp):
return inp.read(next_time)
next_inputs = control_flow_ops.cond(
all_finished, lambda: self._zero_inputs,
lambda: nest.map_structure(read_from_ta, self._input_tas))
return (finished, next_inputs, state)
class ScheduledEmbeddingTrainingHelper(TrainingHelper):
"""A training helper that adds scheduled sampling.
Returns -1s for sample_ids where no sampling took place; valid sample id
values elsewhere.
"""
def __init__(self, inputs, sequence_length, embedding, sampling_probability,
time_major=False, seed=None, scheduling_seed=None, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
sampling_probability: A 0D `float32` tensor: the probability of sampling
categorically from the output ids instead of reading directly from the
inputs.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
seed: The sampling seed.
scheduling_seed: The schedule decision rule sampling seed.
name: Name scope for any created operations.
Raises:
ValueError: if `sampling_probability` is not a scalar or vector.
"""
with ops.name_scope(name, "ScheduledEmbeddingSamplingWrapper",
[embedding, sampling_probability]):
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._sampling_probability = ops.convert_to_tensor(
sampling_probability, name="sampling_probability")
if self._sampling_probability.get_shape().ndims not in (0, 1):
raise ValueError(
"sampling_probability must be either a scalar or a vector. "
"saw shape: %s" % (self._sampling_probability.get_shape()))
self._seed = seed
self._scheduling_seed = scheduling_seed
super(ScheduledEmbeddingTrainingHelper, self).__init__(
inputs=inputs,
sequence_length=sequence_length,
time_major=time_major,
name=name)
def initialize(self, name=None):
return super(ScheduledEmbeddingTrainingHelper, self).initialize(name=name)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
[time, outputs, state]):
# Return -1s where we did not sample, and sample_ids elsewhere
select_sampler = bernoulli.Bernoulli(
probs=self._sampling_probability, dtype=dtypes.bool)
select_sample = select_sampler.sample(
sample_shape=self.batch_size, seed=self._scheduling_seed)
sample_id_sampler = categorical.Categorical(logits=outputs)
return array_ops.where(
select_sample,
sample_id_sampler.sample(seed=self._seed),
gen_array_ops.fill([self.batch_size], -1))
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
[time, outputs, state, sample_ids]):
(finished, base_next_inputs, state) = (
super(ScheduledEmbeddingTrainingHelper, self).next_inputs(
time=time,
outputs=outputs,
state=state,
sample_ids=sample_ids,
name=name))
def maybe_sample():
"""Perform scheduled sampling."""
where_sampling = math_ops.cast(
array_ops.where(sample_ids > -1), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(sample_ids <= -1), dtypes.int32)
sample_ids_sampling = array_ops.gather_nd(sample_ids, where_sampling)
inputs_not_sampling = array_ops.gather_nd(
base_next_inputs, where_not_sampling)
sampled_next_inputs = self._embedding_fn(sample_ids_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(indices=where_sampling,
updates=sampled_next_inputs,
shape=base_shape)
+ array_ops.scatter_nd(indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: base_next_inputs, maybe_sample)
return (finished, next_inputs, state)
class ScheduledOutputTrainingHelper(TrainingHelper):
"""A training helper that adds scheduled sampling directly to outputs.
Returns False for sample_ids where no sampling took place; True elsewhere.
"""
def __init__(self, inputs, sequence_length, sampling_probability,
time_major=False, seed=None, next_inputs_fn=None,
auxiliary_inputs=None, name=None):
"""Initializer.
Args:
inputs: A (structure) of input tensors.
sequence_length: An int32 vector tensor.
sampling_probability: A 0D `float32` tensor: the probability of sampling
from the outputs instead of reading directly from the inputs.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
seed: The sampling seed.
next_inputs_fn: (Optional) callable to apply to the RNN outputs to create
the next input when sampling. If `None` (default), the RNN outputs will
be used as the next inputs.
auxiliary_inputs: An optional (structure of) auxiliary input tensors with
a shape that matches `inputs` in all but (potentially) the final
dimension. These tensors will be concatenated to the sampled output or
the `inputs` when not sampling for use as the next input.
name: Name scope for any created operations.
Raises:
ValueError: if `sampling_probability` is not a scalar or vector.
"""
with ops.name_scope(name, "ScheduledOutputTrainingHelper",
[inputs, auxiliary_inputs, sampling_probability]):
self._sampling_probability = ops.convert_to_tensor(
sampling_probability, name="sampling_probability")
if self._sampling_probability.get_shape().ndims not in (0, 1):
raise ValueError(
"sampling_probability must be either a scalar or a vector. "
"saw shape: %s" % (self._sampling_probability.get_shape()))
if auxiliary_inputs is None:
maybe_concatenated_inputs = inputs
else:
inputs = ops.convert_to_tensor(inputs, name="inputs")
auxiliary_inputs = ops.convert_to_tensor(
auxiliary_inputs, name="auxiliary_inputs")
maybe_concatenated_inputs = nest.map_structure(
lambda x, y: array_ops.concat((x, y), -1),
inputs, auxiliary_inputs)
if not time_major:
auxiliary_inputs = nest.map_structure(
_transpose_batch_time, auxiliary_inputs)
self._auxiliary_input_tas = (
nest.map_structure(_unstack_ta, auxiliary_inputs)
if auxiliary_inputs is not None else None)
self._seed = seed
self._next_inputs_fn = next_inputs_fn
super(ScheduledOutputTrainingHelper, self).__init__(
inputs=maybe_concatenated_inputs,
sequence_length=sequence_length,
time_major=time_major,
name=name)
def initialize(self, name=None):
return super(ScheduledOutputTrainingHelper, self).initialize(name=name)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(name, "ScheduledOutputTrainingHelperSample",
[time, outputs, state]):
sampler = bernoulli.Bernoulli(probs=self._sampling_probability)
return sampler.sample(sample_shape=self.batch_size, seed=self._seed)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(name, "ScheduledOutputTrainingHelperNextInputs",
[time, outputs, state, sample_ids]):
(finished, base_next_inputs, state) = (
super(ScheduledOutputTrainingHelper, self).next_inputs(
time=time,
outputs=outputs,
state=state,
sample_ids=sample_ids,
name=name))
sample_ids = math_ops.cast(sample_ids, dtypes.bool)
def maybe_sample():
"""Perform scheduled sampling."""
def maybe_concatenate_auxiliary_inputs(outputs_, indices=None):
"""Concatenate outputs with auxiliary inputs, if they exist."""
if self._auxiliary_input_tas is None:
return outputs_
next_time = time + 1
auxiliary_inputs = nest.map_structure(
lambda ta: ta.read(next_time), self._auxiliary_input_tas)
if indices is not None:
auxiliary_inputs = array_ops.gather_nd(auxiliary_inputs, indices)
return nest.map_structure(
lambda x, y: array_ops.concat((x, y), -1),
outputs_, auxiliary_inputs)
if self._next_inputs_fn is None:
return array_ops.where(
sample_ids, maybe_concatenate_auxiliary_inputs(outputs),
base_next_inputs)
where_sampling = math_ops.cast(
array_ops.where(sample_ids), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(math_ops.logical_not(sample_ids)), dtypes.int32)
outputs_sampling = array_ops.gather_nd(outputs, where_sampling)
inputs_not_sampling = array_ops.gather_nd(base_next_inputs,
where_not_sampling)
sampled_next_inputs = maybe_concatenate_auxiliary_inputs(
self._next_inputs_fn(outputs_sampling), where_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(indices=where_sampling,
updates=sampled_next_inputs,
shape=base_shape)
+ array_ops.scatter_nd(indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
all_finished = math_ops.reduce_all(finished)
no_samples = math_ops.logical_not(math_ops.reduce_any(sample_ids))
next_inputs = control_flow_ops.cond(
math_ops.logical_or(all_finished, no_samples),
lambda: base_next_inputs, maybe_sample)
return (finished, next_inputs, state)
class GreedyEmbeddingHelper(Helper):
"""A helper for use during inference.
Uses the argmax of the output (treated as logits) and passes the
result through an embedding layer to get the next input.
"""
def __init__(self, embedding, start_tokens, end_token):
"""Initializer.
Args:
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`. The returned tensor
will be passed to the decoder input.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
Raises:
ValueError: if `start_tokens` is not a 1D tensor or `end_token` is not a
scalar.
"""
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._start_tokens = ops.convert_to_tensor(
start_tokens, dtype=dtypes.int32, name="start_tokens")
self._end_token = ops.convert_to_tensor(
end_token, dtype=dtypes.int32, name="end_token")
if self._start_tokens.get_shape().ndims != 1:
raise ValueError("start_tokens must be a vector")
self._batch_size = array_ops.size(start_tokens)
if self._end_token.get_shape().ndims != 0:
raise ValueError("end_token must be a scalar")
self._start_inputs = self._embedding_fn(self._start_tokens)
@property
def batch_size(self):
return self._batch_size
@property
def sample_ids_shape(self):
return tensor_shape.TensorShape([])
@property
def sample_ids_dtype(self):
return dtypes.int32
def initialize(self, name=None):
finished = array_ops.tile([False], [self._batch_size])
return (finished, self._start_inputs)
def sample(self, time, outputs, state, name=None):
"""sample for GreedyEmbeddingHelper."""
del time, state # unused by sample_fn
# Outputs are logits, use argmax to get the most probable id
if not isinstance(outputs, ops.Tensor):
raise TypeError("Expected outputs to be a single Tensor, got: %s" %
type(outputs))
sample_ids = math_ops.cast(
math_ops.argmax(outputs, axis=-1), dtypes.int32)
return sample_ids
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""next_inputs_fn for GreedyEmbeddingHelper."""
del time, outputs # unused by next_inputs_fn
finished = math_ops.equal(sample_ids, self._end_token)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished,
# If we're finished, the next_inputs value doesn't matter
lambda: self._start_inputs,
lambda: self._embedding_fn(sample_ids))
return (finished, next_inputs, state)
class SampleEmbeddingHelper(GreedyEmbeddingHelper):
"""A helper for use during inference.
Uses sampling (from a distribution) instead of argmax and passes the
result through an embedding layer to get the next input.
"""
def __init__(self, embedding, start_tokens, end_token,
softmax_temperature=None, seed=None):
"""Initializer.
Args:
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`. The returned tensor
will be passed to the decoder input.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
softmax_temperature: (Optional) `float32` scalar, value to divide the
logits by before computing the softmax. Larger values (above 1.0) result
in more random samples, while smaller values push the sampling
distribution towards the argmax. Must be strictly greater than 0.
Defaults to 1.0.
seed: (Optional) The sampling seed.
Raises:
ValueError: if `start_tokens` is not a 1D tensor or `end_token` is not a
scalar.
"""
super(SampleEmbeddingHelper, self).__init__(
embedding, start_tokens, end_token)
self._softmax_temperature = softmax_temperature
self._seed = seed
def sample(self, time, outputs, state, name=None):
"""sample for SampleEmbeddingHelper."""
del time, state # unused by sample_fn
# Outputs are logits, we sample instead of argmax (greedy).
if not isinstance(outputs, ops.Tensor):
raise TypeError("Expected outputs to be a single Tensor, got: %s" %
type(outputs))
if self._softmax_temperature is None:
logits = outputs
else:
logits = outputs / self._softmax_temperature
sample_id_sampler = categorical.Categorical(logits=logits)
sample_ids = sample_id_sampler.sample(seed=self._seed)
return sample_ids
class InferenceHelper(Helper):
"""A helper to use during inference with a custom sampling function."""
def __init__(self, sample_fn, sample_shape, sample_dtype,
start_inputs, end_fn, next_inputs_fn=None):
"""Initializer.
Args:
sample_fn: A callable that takes `outputs` and emits tensor `sample_ids`.
sample_shape: Either a list of integers, or a 1-D Tensor of type `int32`,
the shape of the each sample in the batch returned by `sample_fn`.
sample_dtype: the dtype of the sample returned by `sample_fn`.
start_inputs: The initial batch of inputs.
end_fn: A callable that takes `sample_ids` and emits a `bool` vector
shaped `[batch_size]` indicating whether each sample is an end token.
next_inputs_fn: (Optional) A callable that takes `sample_ids` and returns
the next batch of inputs. If not provided, `sample_ids` is used as the
next batch of inputs.
"""
self._sample_fn = sample_fn
self._end_fn = end_fn
self._sample_shape = tensor_shape.TensorShape(sample_shape)
self._sample_dtype = sample_dtype
self._next_inputs_fn = next_inputs_fn
self._batch_size = array_ops.shape(start_inputs)[0]
self._start_inputs = ops.convert_to_tensor(
start_inputs, name="start_inputs")
@property
def batch_size(self):
return self._batch_size
@property
def sample_ids_shape(self):
return self._sample_shape
@property
def sample_ids_dtype(self):
return self._sample_dtype
def initialize(self, name=None):
finished = array_ops.tile([False], [self._batch_size])
return (finished, self._start_inputs)
def sample(self, time, outputs, state, name=None):
del time, state # unused by sample
return self._sample_fn(outputs)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
del time, outputs # unused by next_inputs
if self._next_inputs_fn is None:
next_inputs = sample_ids
else:
next_inputs = self._next_inputs_fn(sample_ids)
finished = self._end_fn(sample_ids)
return (finished, next_inputs, state)
| apache-2.0 |
YolandaMDavis/nifi | nifi-nar-bundles/nifi-azure-bundle/nifi-azure-processors/src/test/java/org/apache/nifi/processors/azure/storage/ITFetchAzureDataLakeStorage.java | 17042 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.processors.azure.storage;
import com.azure.storage.file.datalake.models.DataLakeStorageException;
import com.google.common.collect.Sets;
import org.apache.nifi.processor.Processor;
import org.apache.nifi.processor.exception.ProcessException;
import org.apache.nifi.provenance.ProvenanceEventRecord;
import org.apache.nifi.provenance.ProvenanceEventType;
import org.apache.nifi.util.MockFlowFile;
import org.junit.Ignore;
import org.junit.Test;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import static org.junit.Assert.assertEquals;
public class ITFetchAzureDataLakeStorage extends AbstractAzureDataLakeStorageIT {
@Override
protected Class<? extends Processor> getProcessorClass() {
return FetchAzureDataLakeStorage.class;
}
@Test
public void testFetchFileFromDirectory() {
// GIVEN
String directory = "TestDirectory";
String filename = "testFile.txt";
String fileContent = "AzureFileContent";
String inputFlowFileContent = "InputFlowFileContent";
createDirectoryAndUploadFile(directory, filename, fileContent);
// WHEN
// THEN
testSuccessfulFetch(fileSystemName, directory, filename, inputFlowFileContent, fileContent);
}
@Test
public void testFetchFileFromRoot() {
// GIVEN
String directory= "";
String filename = "testFile.txt";
String fileContent = "AzureFileContent";
String inputFlowFileContent = "InputFlowFileContent";
uploadFile(directory, filename, fileContent);
// WHEN
// THEN
testSuccessfulFetch(fileSystemName, directory, filename, inputFlowFileContent, fileContent);
}
@Test
public void testFetchFileFromDirectoryWithWhitespace() {
// GIVEN
String directory= "A Test Directory";
String filename = "testFile.txt";
String fileContent = "AzureFileContent";
String inputFlowFileContent = "InputFlowFileContent";
createDirectoryAndUploadFile(directory, filename, fileContent);
// WHEN
// THEN
testSuccessfulFetch(fileSystemName, directory, filename, inputFlowFileContent, fileContent);
}
@Test
public void testFetchFileWithWhitespaceFromDirectory() {
// GIVEN
String directory= "TestDirectory";
String filename = "A test file.txt";
String fileContent = "AzureFileContent";
String inputFlowFileContent = "InputFlowFileContent";
createDirectoryAndUploadFile(directory, filename, fileContent);
// WHEN
// THEN
testSuccessfulFetch(fileSystemName, directory, filename, inputFlowFileContent, fileContent);
}
@Test
public void testFetchFileCaseSensitiveFilename() {
// GIVEN
String directory = "TestDirectory";
String filename1 = "testFile.txt";
String filename2 = "testfile.txt";
String fileContent1 = "ContentOfFile1";
String fileContent2 = "ContentOfFile2";
String inputFlowFileContent = "InputFlowFileContent";
createDirectoryAndUploadFile(directory, filename1, fileContent1);
uploadFile(directory, filename2, fileContent2);
// WHEN
// THEN
testSuccessfulFetch(fileSystemName, directory, filename1, inputFlowFileContent, fileContent1);
runner.clearProvenanceEvents();
runner.clearTransferState();
testSuccessfulFetch(fileSystemName, directory, filename2, inputFlowFileContent, fileContent2);
}
@Test
public void testFetchFileCaseSensitiveDirectoryName() {
// GIVEN
String directory1 = "TestDirectory";
String directory2 = "Testdirectory";
String filename1 = "testFile1.txt";
String filename2 = "testFile2.txt";
String fileContent1 = "ContentOfFile1";
String fileContent2 = "ContentOfFile2";
String inputFlowFileContent = "InputFlowFileContent";
createDirectoryAndUploadFile(directory1, filename1, fileContent1);
createDirectoryAndUploadFile(directory2, filename2, fileContent2);
// WHEN
// THEN
testSuccessfulFetch(fileSystemName, directory1, filename1, inputFlowFileContent, fileContent1);
runner.clearProvenanceEvents();
runner.clearTransferState();
testSuccessfulFetch(fileSystemName, directory2, filename2, inputFlowFileContent, fileContent2);
}
@Test
public void testFetchFileFromDeepDirectoryStructure() {
// GIVEN
String directory= "Directory01/Directory02/Directory03/Directory04/Directory05/Directory06/Directory07/"
+ "Directory08/Directory09/Directory10/Directory11/Directory12/Directory13/Directory14/Directory15/"
+ "Directory16/Directory17/Directory18/Directory19/Directory20/TestDirectory";
String filename = "testFile.txt";
String fileContent = "AzureFileContent";
String inputFlowFileContent = "InputFlowFileContent";
createDirectoryAndUploadFile(directory, filename, fileContent);
// WHEN
// THEN
testSuccessfulFetch(fileSystemName, directory, filename, inputFlowFileContent, fileContent);
}
@Test
public void testFetchDirectory() {
// GIVEN
String parentDirectory = "ParentDirectory";
String childDirectory = "ChildDirectory";
String filename = "testFile.txt";
String fileContent = "AzureFileContent";
String inputFlowFileContent = "InputFlowFileContent";
createDirectoryAndUploadFile(parentDirectory + "/" + childDirectory, filename, fileContent);
// WHEN
// THEN
testFailedFetchWithProcessException(fileSystemName, parentDirectory, childDirectory, inputFlowFileContent, inputFlowFileContent);
}
@Test
public void testFetchNonExistentFileSystem() {
// GIVEN
String fileSystem = "NonExistentFileSystem";
String directory = "TestDirectory";
String filename = "testFile.txt";
String inputFlowFileContent = "InputFlowFileContent";
// WHEN
// THEN
testFailedFetch(fileSystem, directory, filename, inputFlowFileContent, inputFlowFileContent, 400);
}
@Test
public void testFetchNonExistentDirectory() {
// GIVEN
String directory = "TestDirectory";
String filename = "testFile.txt";
String inputFlowFileContent = "InputFlowFileContent";
// WHEN
// THEN
testFailedFetch(fileSystemName, directory, filename, inputFlowFileContent, inputFlowFileContent, 404);
}
@Test
public void testFetchNonExistentFile() {
// GIVEN
String directory = "TestDirectory";
String filename = "testFile.txt";
String inputFlowFileContent = "InputFlowFileContent";
fileSystemClient.createDirectory(directory);
// WHEN
// THEN
testFailedFetch(fileSystemName, directory, filename, inputFlowFileContent, inputFlowFileContent, 404);
}
@Ignore("Takes some time, only recommended for manual testing.")
@Test
public void testFetchLargeFile() {
// GIVEN
String directory = "TestDirectory";
String filename = "testFile.txt";
byte[] fileContentBytes = new byte[100_000_000];
String fileContent = new String(fileContentBytes);
String inputFlowFileContent = "InputFlowFileContent";
createDirectoryAndUploadFile(directory, filename, fileContent);
// WHEN
// THEN
testSuccessfulFetch(fileSystemName, directory, filename, inputFlowFileContent, fileContent);
}
@Test
public void testFetchInvalidDirectoryName() {
// GIVEN
String directory = "TestDirectory";
String invalidDirectoryName = "Test/\\Directory";
String filename = "testFile.txt";
String fileContent = "AzureFileContent";
String inputFlowFileContent = "InputFlowFileContent";
createDirectoryAndUploadFile(directory, filename, fileContent);
// WHEN
// THEN
testFailedFetch(fileSystemName, invalidDirectoryName, filename, inputFlowFileContent, inputFlowFileContent, 404);
}
@Test
public void testFetchInvalidFilename() {
// GIVEN
String directory = "TestDirectory";
String filename = "testFile.txt";
String invalidFilename = "test/\\File.txt";
String fileContent = "AzureFileContent";
String inputFlowFileContent = "InputFlowFileContent";
createDirectoryAndUploadFile(directory, filename, fileContent);
// WHEN
// THEN
testFailedFetch(fileSystemName, directory, invalidFilename, inputFlowFileContent, inputFlowFileContent, 404);
}
@Test
public void testFetchUsingExpressionLanguage() {
// GIVEN
String expLangFileSystem = "az.filesystem";
String expLangDirectory = "az.directory";
String expLangFilename = "az.filename";
String directory = "TestDirectory";
String filename = "testFile.txt";
String fileContent = "AzureFileContent";
String inputFlowFileContent = "InputFlowFileContent";
Map<String, String> attributes = new HashMap<>();
attributes.put(expLangFileSystem, fileSystemName);
attributes.put(expLangDirectory, directory);
attributes.put(expLangFilename, filename);
createDirectoryAndUploadFile(directory, filename, fileContent);
// WHEN
// THEN
testSuccessfulFetch("${" + expLangFileSystem + "}",
"${" + expLangDirectory + "}",
"${" + expLangFilename + "}",
attributes,
inputFlowFileContent,
fileContent);
}
@Test
public void testFetchUsingExpressionLanguageFileSystemIsNotSpecified() {
// GIVEN
String expLangFileSystem = "az.filesystem";
String expLangDirectory = "az.directory";
String expLangFilename = "az.filename";
String directory = "TestDirectory";
String filename = "testFile.txt";
String fileContent = "AzureFileContent";
String inputFlowFileContent = "InputFlowFileContent";
Map<String, String> attributes = new HashMap<>();
attributes.put(expLangDirectory, directory);
attributes.put(expLangFilename, filename);
createDirectoryAndUploadFile(directory, filename, fileContent);
// WHEN
// THEN
testFailedFetchWithProcessException("${" + expLangFileSystem + "}",
"${" + expLangDirectory + "}",
"${" + expLangFilename + "}",
attributes,
inputFlowFileContent,
inputFlowFileContent);
}
@Test
public void testFetchUsingExpressionLanguageFilenameIsNotSpecified() {
// GIVEN
String expLangFileSystem = "az.filesystem";
String expLangDirectory = "az.directory";
String expLangFilename = "az.filename";
String directory = "TestDirectory";
String filename = "testFile.txt";
String fileContent = "AzureFileContent";
String inputFlowFileContent = "InputFlowFileContent";
Map<String, String> attributes = new HashMap<>();
attributes.put(expLangFileSystem, fileSystemName);
attributes.put(expLangDirectory, directory);
createDirectoryAndUploadFile(directory, filename, fileContent);
// WHEN
// THEN
testFailedFetchWithProcessException("${" + expLangFileSystem + "}",
"${" + expLangDirectory + "}",
"${" + expLangFilename + "}",
attributes,
inputFlowFileContent,
inputFlowFileContent);
}
private void testSuccessfulFetch(String fileSystem, String directory, String filename, String inputFlowFileContent, String expectedFlowFileContent) {
testSuccessfulFetch(fileSystem, directory, filename, Collections.emptyMap(), inputFlowFileContent, expectedFlowFileContent);
}
private void testSuccessfulFetch(String fileSystem, String directory, String filename, Map<String, String> attributes, String inputFlowFileContent, String expectedFlowFileContent) {
// GIVEN
Set<ProvenanceEventType> expectedEventTypes = Sets.newHashSet(ProvenanceEventType.CONTENT_MODIFIED, ProvenanceEventType.FETCH);
setRunnerProperties(fileSystem, directory, filename);
// WHEN
startRunner(inputFlowFileContent, attributes);
// THEN
assertSuccess(expectedFlowFileContent, expectedEventTypes);
}
private void testFailedFetch(String fileSystem, String directory, String filename, String inputFlowFileContent, String expectedFlowFileContent, int expectedErrorCode) {
testFailedFetch(fileSystem, directory, filename, Collections.emptyMap(), inputFlowFileContent, expectedFlowFileContent, expectedErrorCode);
}
private void testFailedFetch(String fileSystem, String directory, String filename, Map<String, String> attributes,
String inputFlowFileContent, String expectedFlowFileContent, int expectedErrorCode) {
// GIVEN
setRunnerProperties(fileSystem, directory, filename);
// WHEN
startRunner(inputFlowFileContent, attributes);
// THEN
DataLakeStorageException e = (DataLakeStorageException)runner.getLogger().getErrorMessages().get(0).getThrowable();
assertEquals(expectedErrorCode, e.getStatusCode());
assertFailure(expectedFlowFileContent);
}
private void testFailedFetchWithProcessException(String fileSystem, String directory, String filename, String inputFlowFileContent, String expectedFlowFileContent) {
testFailedFetchWithProcessException(fileSystem, directory, filename, Collections.emptyMap(), inputFlowFileContent, expectedFlowFileContent);
}
private void testFailedFetchWithProcessException(String fileSystem, String directory, String filename, Map<String, String> attributes,
String inputFlowFileContent, String expectedFlowFileContent) {
// GIVEN
setRunnerProperties(fileSystem, directory, filename);
// WHEN
startRunner(inputFlowFileContent, attributes);
// THEN
Throwable exception = runner.getLogger().getErrorMessages().get(0).getThrowable();
assertEquals(ProcessException.class, exception.getClass());
assertFailure(expectedFlowFileContent);
}
private void setRunnerProperties(String fileSystem, String directory, String filename) {
runner.setProperty(FetchAzureDataLakeStorage.FILESYSTEM, fileSystem);
runner.setProperty(FetchAzureDataLakeStorage.DIRECTORY, directory);
runner.setProperty(FetchAzureDataLakeStorage.FILE, filename);
runner.assertValid();
}
private void startRunner(String inputFlowFileContent, Map<String, String> attributes) {
runner.enqueue(inputFlowFileContent, attributes);
runner.run();
}
private void assertSuccess(String expectedFlowFileContent, Set<ProvenanceEventType> expectedEventTypes) {
runner.assertAllFlowFilesTransferred(FetchAzureDataLakeStorage.REL_SUCCESS, 1);
MockFlowFile flowFile = runner.getFlowFilesForRelationship(FetchAzureDataLakeStorage.REL_SUCCESS).get(0);
flowFile.assertContentEquals(expectedFlowFileContent);
Set<ProvenanceEventType> actualEventTypes = runner.getProvenanceEvents().stream()
.map(ProvenanceEventRecord::getEventType)
.collect(Collectors.toSet());
assertEquals(expectedEventTypes, actualEventTypes);
}
private void assertFailure(String expectedFlowFileContent) {
runner.assertAllFlowFilesTransferred(FetchAzureDataLakeStorage.REL_FAILURE, 1);
MockFlowFile flowFile = runner.getFlowFilesForRelationship(FetchAzureDataLakeStorage.REL_FAILURE).get(0);
flowFile.assertContentEquals(expectedFlowFileContent);
}
}
| apache-2.0 |
mingderwang/angr | angr/variableseekr.py | 13439 | import itertools
import logging
from collections import defaultdict
from simuvex import SimIRSB, SimProcedure
l = logging.getLogger(name="angr.variableseekr")
class Variable(object):
def __init__(self, idx, size, irsb_addr, stmt_id, ins_addr, custom_name=None, type_=None):
self._idx = idx
self._irsb_addr = irsb_addr
self._ins_addr = ins_addr
self._stmt_id = stmt_id
self._size = size
self._custom_name = custom_name
self._type = type_
# A model used to model the possible value range of this variable
self._model = None
@property
def irsb_addr(self):
return self._irsb_addr
@property
def stmt_id(self):
return self._stmt_id
@property
def name(self):
return "var_%d" % self._idx
@property
def type(self):
return self._type
@property
def decl(self):
if self._type is not None:
return "{} {}".format(self._type, self)
else:
return str(self)
@property
def model(self):
return self._model
@model.setter
def model(self, value):
self._model = value
@property
def upper_bound(self):
if self._model is not None:
return self._model.upper_bound
else:
return None
@property
def lower_bound(self):
if self._model is not None:
return self._model.lower_bound
else:
return None
@property
def stride(self):
if self._model is not None:
return self._model.stride
else:
return None
def __repr__(self):
if self._custom_name is not None:
return self._custom_name
else:
return self.name
class DummyVariable(Variable):
def __init__(self, idx, size, irsb_addr, stmt_id, ins_addr):
Variable.__init__(self, idx, size, irsb_addr, stmt_id, ins_addr)
@property
def name(self):
return "dummy_var_%d" % self._idx
class RegisterVariable(Variable):
def __init__(self, idx, size, irsb_addr, stmt_id, ins_addr, offset, custom_name=None, type_=None):
Variable.__init__(self, idx, size, irsb_addr, stmt_id, ins_addr, custom_name, type_)
self._offset = offset
@property
def name(self):
return 'reg_var_%d' % self._idx
class StackVariable(Variable):
'''
_offset refers to the offset from stack base
'''
def __init__(self, idx, size, irsb_addr, stmt_id, ins_addr, offset, addr, type_=None):
if type(size) not in [int, long]:
import ipdb; ipdb.set_trace()
Variable.__init__(self, idx, size, irsb_addr, stmt_id, ins_addr, type_)
self._offset = offset
self._addr = addr
@property
def idx(self):
return self._idx
@property
def offset(self):
return self._offset
@offset.setter
def offset(self, value):
self._offset = value
def detail_str(self):
'''
FIXME: This is a temporary fix for simuvex Issue #31
'''
if type(self._size) in [int, long]:
s = 'StackVar %d [%s|%d] <irsb 0x%x, stmt %d> (%s-%s,%s)' % (self._idx, hex(self._offset), self._size, self._irsb_addr, self._stmt_id, self.lower_bound, self.upper_bound, self.stride)
else:
s = 'StackVar %d [%s|%s] <irsb 0x%x, stmt %d> (%s-%s,%s)' % (self._idx, hex(self._offset), self._size, self._irsb_addr, self._stmt_id, self.lower_bound, self.upper_bound, self.stride)
return s
class VariableManager(object):
def __init__(self, func_addr):
self._func_addr = func_addr
self._var_list = []
self._stmt_to_var_map = {} # Maps a tuple of (irsb_addr, stmt_id) to the corresponding variable
self._stack_variable_map = defaultdict(dict) # Maps stack offset to a stack variable
def add(self, var):
self._var_list.append(var)
tpl = (var.irsb_addr, var.stmt_id)
self._stmt_to_var_map[tpl] = var
if isinstance(var, StackVariable):
self._stack_variable_map[var.offset][tpl] = var
def add_ref(self, var, irsb_addr, stmt_id):
tpl = (irsb_addr, stmt_id)
self._stmt_to_var_map[tpl] = var
def get(self, irsb_addr, stmt_id):
tpl = (irsb_addr, stmt_id)
if tpl in self._stmt_to_var_map:
return self._stmt_to_var_map[tpl]
else:
return None
def get_stack_variable(self, offset, irsb_addr, stmt_id):
tpl = (irsb_addr, stmt_id)
if offset in self._stack_variable_map and \
tpl in self._stack_variable_map[offset]:
return self._stack_variable_map[offset][tpl]
else:
return None
def get_stack_variables(self, offset):
if offset in self._stack_variable_map and \
len(self._stack_variable_map[offset]):
return self._stack_variable_map[offset].values()
else:
return None
@property
def variables(self):
return self._var_list
class VariableSeekr(object):
def __init__(self, project, cfg, vfg):
self._cfg = cfg
self._vfg = vfg
self._project = project
# A shortcut to arch
self._arch = project.arch
self._variable_managers = {}
def construct(self, func_start=None):
self._do_work(func_start)
def _variable_manager(self, function_start):
if function_start not in self._variable_managers:
variable_manager = VariableManager(function_start)
self._variable_managers[function_start] = variable_manager
return variable_manager
else:
return self._variable_managers[function_start]
def _do_work(self, func_start):
function_manager = self._cfg.function_manager
functions = function_manager.functions
if func_start not in functions:
raise AngrInvalidArgumentError('No such function exists in FunctionManager: function 0x%x.', func_start)
func = functions[func_start]
var_idx = itertools.count()
initial_run = self._vfg.get_any_irsb(func_start) # FIXME: This is buggy
if initial_run is None:
raise AngrInvalidArgumentError('No such SimRun exists in VFG: 0x%x.', func_start)
run_stack = [initial_run]
processed_runs = set()
processed_runs.add(initial_run)
while len(run_stack) > 0:
current_run = run_stack.pop()
if isinstance(current_run, SimIRSB):
irsb = current_run
memory = irsb.exits()[0].state.memory
events = memory.state.log.events_of_type('uninitialized')
print events
for _, region in memory.regions.iteritems():
if region.is_stack:
for tpl, aloc in region.alocs.items():
irsb_addr, stmt_id = tpl
variable_manager = self._variable_manager(region.related_function_addr)
if variable_manager.get_stack_variable(aloc.offset, irsb_addr, stmt_id) is None:
stack_var = StackVariable(var_idx.next(), aloc.size, irsb_addr, stmt_id, 0, aloc.offset, 0)
variable_manager.add(stack_var)
elif isinstance(current_run, SimProcedure):
pass
# simproc = current_run
# for ref in simproc.refs():
# handler_name = '_handle_reference_%s' % type(ref).__name__
# if hasattr(self, handler_name):
# getattr(self, handler_name)(func, var_idx,
# variable_manager,
# regmap,
# temp_var_map,
# simproc,
# simproc.addr, -1,
# concrete_sp, ref)
# Successors
successors = self._vfg.get_successors(current_run, excluding_fakeret=False)
for suc in successors:
if suc not in processed_runs and suc.addr in func.basic_blocks:
run_stack.append(suc)
processed_runs.add(suc)
# Post-processing
if func.bp_on_stack:
# base pointer is pushed on the stack. To be consistent with IDA,
# we wanna adjust the offset of each stack variable
for var in variable_manager.variables:
if isinstance(var, StackVariable):
var.offset += self._arch.bits / 8
def _collect_tmp_deps(self, tmp_tuple, temp_var_map):
'''
Return all registers that a tmp relies on
'''
reg_deps = set()
for tmp_dep in tmp_tuple:
if tmp_dep in temp_var_map:
reg_deps |= temp_var_map[tmp_dep]
return reg_deps
def _try_concretize(self, se, exp):
'''
FIXME: This ia a temporary fix for simuvex Issue #31
'''
if type(exp) in [int, long]:
return exp
else:
# Try to concretize it
if se.symbolic(exp):
return exp
else:
return se.any_int(exp)
def _handle_reference_SimMemRead(self, func, var_idx, variable_manager, regmap, temp_var_map, current_run, ins_addr, stmt_id, concrete_sp, ref):
addr = ref.addr
if not addr.symbolic:
concrete_addr = current_run.initial_state.se.any_int(addr)
offset = concrete_addr - concrete_sp
if abs(offset) < STACK_SIZE:
# Let's see if this variable already exists
existing_var = variable_manager.get_stack_variable(offset)
if existing_var is not None:
# We found it!
# Add a reference to that variable
variable_manager.add_ref(existing_var, current_run.addr, stmt_id)
else:
# This is a variable that is read before created/written
l.debug("Stack variable %d has never been written before.", offset)
size_ = self._try_concretize(current_run.initial_state.se, ref.size)
stack_var = StackVariable(var_idx.next(), size_, current_run.addr, stmt_id, ins_addr, offset, concrete_addr)
variable_manager.add(stack_var)
if offset > 0:
func.add_argument_stack_variable(offset)
def _handle_reference_SimTmpWrite(self, func, var_idx, variable_manager, regmap, temp_var_map, current_run, ins_addr, stmt_id, concrete_sp, ref):
tmp_var_id = ref.tmp
temp_var_map[tmp_var_id] = set(ref.data_reg_deps)
for tmp_dep in ref.data_tmp_deps:
if tmp_dep in temp_var_map:
temp_var_map[tmp_var_id] |= temp_var_map[tmp_dep]
def _handle_reference_SimMemWrite(self, func, var_idx, variable_manager, regmap, temp_var_map, current_run, ins_addr, stmt_id, concrete_sp, ref):
addr = ref.addr
if not addr.symbolic:
concrete_addr = current_run.initial_state.se.any_int(addr)
offset = concrete_addr - concrete_sp
if abs(offset) < STACK_SIZE:
# What will be written?
# If the value is the stack pointer, we don't treat it as a variable.
# Instead, we'll report this fact to func.
reg_deps = self._collect_tmp_deps(ref.data_tmp_deps, temp_var_map)
reg_deps |= set(ref.data_reg_deps)
if len(reg_deps) == 1 and self._arch.bp_offset in reg_deps:
# Report to func
func.bp_on_stack = True
return
# As this is a write, it must be a new
# variable (although it might be
# overlapping with another variable).
size_ = self._try_concretize(current_run.initial_state.se, ref.size)
stack_var = StackVariable(var_idx.next(), size_, current_run.addr, stmt_id, ins_addr, offset, concrete_addr)
variable_manager.add(stack_var)
def _handle_reference_SimRegRead(self, func, var_idx, variable_manager, regmap, temp_var_map, current_run, ins_addr, stmt_id, concrete_sp, ref):
if ref.offset in [self._arch.sp_offset, self._arch.ip_offset]:
# Ignore stack pointer
return
if not regmap.contains(ref.offset):
# The register has never been written before
func.add_argument_register(ref.offset)
def _handle_reference_SimRegWrite(self, func, var_idx, variable_manager, regmap, temp_var_map, current_run, ins_addr, stmt_id, concrete_sp, ref):
if ref.offset in [self._arch.sp_offset, self._arch.ip_offset]:
# Ignore stack pointers and program counter
return
regmap.assign(ref.offset, 1)
def get_variable_manager(self, func_addr):
if func_addr in self._variable_managers:
return self._variable_managers[func_addr]
else:
return None
from .errors import AngrInvalidArgumentError
| bsd-2-clause |
koala-framework/koala-framework | Kwc/Directories/List/EventItemInserted.php | 102 | <?php
class Kwc_Directories_List_EventItemInserted extends Kwc_Directories_List_EventItemAbstract
{
}
| bsd-2-clause |
Ant59/Elkarte | attachments/index.php | 331 | <?php
/**
* This file is here solely to protect your attachments directory.
*/
// Look for Settings.php....
if (file_exists(dirname(dirname(__FILE__)) . '/Settings.php'))
{
// Found it!
require(dirname(dirname(__FILE__)) . '/Settings.php');
header('Location: ' . $boardurl);
}
// Can't find it... just forget it.
else
exit; | bsd-3-clause |
ecalvovi/AliRoot | PYTHIA8/pythia8205/share/Pythia8/phpdoc/ColourReconnection.php | 21946 | <html>
<head>
<title>Colour reconnection</title>
<link rel="stylesheet" type="text/css" href="pythia.css"/>
<link rel="shortcut icon" href="pythia32.gif"/>
</head>
<body>
<script language=javascript type=text/javascript>
function stopRKey(evt) {
var evt = (evt) ? evt : ((event) ? event : null);
var node = (evt.target) ? evt.target :((evt.srcElement) ? evt.srcElement : null);
if ((evt.keyCode == 13) && (node.type=="text"))
{return false;}
}
document.onkeypress = stopRKey;
</script>
<?php
if($_POST['saved'] == 1) {
if($_POST['filepath'] != "files/") {
echo "<font color='red'>SETTINGS SAVED TO FILE</font><br/><br/>"; }
else {
echo "<font color='red'>NO FILE SELECTED YET.. PLEASE DO SO </font><a href='SaveSettings.php'>HERE</a><br/><br/>"; }
}
?>
<form method='post' action='ColourReconnection.php'>
<h2>Colour Reconnection</h2>
The colour flows in the separate subprocesses defined in the
multiparton-interactions scenario are tied together via the assignment
of colour flow in the beam remnant. This is not an unambiguous
procedure, and currently two different methods are implemented. In the first
model the colour flow is reconstructed by how a PS could have
constructed the configuration. In the second model, the full QCD colour
calculation is taken into account, however the dynamical effects are modeled
loosely, only an overall saturation is taken into account. The idea is to
later account for other dynamical effects through colour reconnections.
<p/>
A simple "minimal" procedure of colour flow only via the beam remnants
does not result in a scenario in agreement with data, however,
notably not a sufficiently steep rise of
<i><pT>(n_ch)</i>. The true origin of this behaviour and the
correct mechanism to reproduce it remains one of the big unsolved issues
at the borderline between perturbative and nonperturbative QCD. Since no final
answer is known, several models are implemented. The different models also
rely on the two different colour assignments in the beam remnant. There are
two, somewhat motivated, models implemented: the original PYTHIA scheme and
a new scheme that tries to incorporate more of the colour knowledge from QCD.
<p/>
The original PYTHIA scheme relies on the PS-like colour configuration of the
beam remnant. This is combined with an additional step, wherein the gluons
of a lower-<i>pT</i> MPI system are merged with the ones in a higher-pT MPI.
A more detailed description of the merging can be found below.
Relative to the other models it tests fewer reconnection possibilities,
and therefore tends to be reasonably fast.
<p/>
The new scheme [<a href="Bibliography.php" target="page">Chr14a</a>]relies on the full QCD colour configuration
in the beam remnant. This is followed up by a colour reconnection, where the
potential string energy is minimized (ie. the <i>lambda</i> measure is
minimized). The QCD colour rules are also incorporated in the colour
reconnection, and determine the probability that a reconnection is allowed.
The model also allows the creation of junction structures.
<p/>
In addition to the two models described above, a simple model is implemented,
wherein gluons can be moved from one location to another so as to reduce the
total string length. This is one out of a range of simple models developed
to study potential colour reconnection effects e.g. on top mass
[<a href="Bibliography.php" target="page">Arg14</a>], not from the point of view of having the most realistic
description, but in order to probe the potential worst-case spread of
predictions. All of these models are made available separately in
<code>include/Pythia8Plugins/ColourReconnectionHooks.h</code>, with the
setup illustrated in <code>examples/main29.cc</code>, but only the
gluon-move one is sufficiently general and realistic that it has been
included among the standard options here.
<br/><br/><strong>ColourReconnection:reconnect</strong> <input type="radio" name="1" value="on" checked="checked"><strong>On</strong>
<input type="radio" name="1" value="off"><strong>Off</strong>
(<code>default = <strong>on</strong></code>)<br/>
Allow or not a system to be merged with another one.
<br/><br/><table><tr><td><strong>ColourReconnection:mode </td><td> (<code>default = <strong>0</strong></code>; <code>minimum = 0</code>; <code>maximum = 2</code>)</td></tr></table>
Determine which model is used for colour reconnection. Beware that
different <code>BeamRemnants:remnantMode</code> should be used for
different reconnection schemes.
<br/>
<input type="radio" name="2" value="0" checked="checked"><strong>0 </strong>: The MPI-based original Pythia 8 scheme. <br/>
<input type="radio" name="2" value="1"><strong>1 </strong>: The new more QCD based scheme. <br/>
<input type="radio" name="2" value="2"><strong>2 </strong>: The new gluon-move model. <br/>
<h3>The MPI-based scheme</h3>
In this scheme partons are classified by which MPI system they belong to.
The colour flow of two such systems can be fused, and if so the partons
of the lower-<i>pT</i> system are added to the strings defined by the
higher-<i>pT</i> system in such a way as to give the smallest total
string length. The bulk of these lower-<i>pT</i> partons are gluons,
and this is what the scheme is optimized to handle.
<p/>
In more detail, an MPI system with a scale <i>pT</i> of the hard
interaction (normally <i>2 → 2</i>) can be merged with one of
a harder scale with a probability that is
<i>pT0_Rec^2 / (pT0_Rec^2 + pT^2)</i>, where <i>pT0_Rec</i> is
<code>range</code> times <i>pT0</i>, the latter being the same
energy-dependent dampening parameter as used for MPIs.
Thus it is easy to merge a low-<i>pT</i> system with any other,
but difficult to merge two high-<i>pT</i> ones with each other.
<br/><br/><table><tr><td><strong>ColourReconnection:range </td><td></td><td> <input type="text" name="3" value="1.8" size="20"/> (<code>default = <strong>1.8</strong></code>; <code>minimum = 0.</code>; <code>maximum = 10.</code>)</td></tr></table>
The <code>range</code> parameter defined above. The higher this number is
the more reconnections can occur. For values above unity the reconnection
rate tends to saturate, since then most systems are already connected with
each other. This is why 10 is set as an effective upper limit, beyond
which it is not meaningful to let the parameter go.
<p/>
The reconnection procedure is applied iteratively. Thus first the
reconnection probability <i>P = pT0_Rec^2 / (pT0_Rec^2 + pT^2)</i> of the
lowest-<i>pT</i> system is found, and gives the probability for merger with
the second-lowest one. If not merged, it is tested with the third-lowest
one, and so on. For the <i>m</i>'th higher system the reconnection
probability thus becomes <i>(1 - P)^(m-1) P</i>. That is, there is
no explicit dependence on the higher <i>pT</i> scale, but implicitly
there is via the survival probability of not already having been merged
with a lower-<i>pT</i> system. Also note that the total reconnection
probability for the lowest-<i>pT</i> system in an event with <i>n</i>
systems becomes <i>1 - (1 - P)^(n-1)</i>. Once the fate of the
lowest-<i>pT</i> system has been decided, the second-lowest is considered
with respect to the ones above it, then the third-lowest, and so on.
<p/>
Once it has been decided which systems should be joined, the actual merging
is carried out in the opposite direction. That is, first the hardest
system is studied, and all colour dipoles in it are found (including to
the beam remnants, as defined by the holes of the incoming partons).
Next each softer system to be merged is studied in turn. Its gluons are,
in decreasing <i>pT</i> order, inserted on the colour dipole <i>i,j</i>
that gives the smallest <i>(p_g p_i)(p_g p_j)/(p_i p_j)</i>, i.e.
minimizes the "disturbance" on the existing dipole, in terms of
<i>pT^2</i> or <i>Lambda</i> measure (string length). The insertion
of the gluon means that the old dipole is replaced by two new ones.
Also the (rather few) quark-antiquark pairs that can be traced back to
a gluon splitting are treated in close analogy with the gluon case.
Quark lines that attach directly to the beam remnants cannot be merged
but are left behind.
<p/>
The joining procedure can be viewed as a more sophisticated variant of
the one introduced already in [<a href="Bibliography.php" target="page">Sjo87</a>]. Clearly it is ad hoc.
It hopefully captures some elements of truth. The lower <i>pT</i> scale
a system has the larger its spatial extent and therefore the larger its
overlap with other systems. It could be argued that one should classify
individual initial-state partons by <i>pT</i> rather than the system
as a whole. However, for final-state radiation, a soft gluon radiated off
a hard parton is actually produced at late times and therefore probably
less likely to reconnect. In the balance, a classification by system
<i>pT</i> scale appears sensible as a first try.
<p/>
Note that the reconnection is carried out before resonance decays are
considered by default. Colour inside a resonance therefore is not
reconnected. The
<code><?php $filepath = $_GET["filepath"];
echo "<a href='MasterSwitches.php?filepath=".$filepath."' target='page'>";?>PartonLevel:earlyResDec</a></code>
can be switched on to perform resonance decays before colour reconnection,
and then the partons from resonance decays could be affected.
Ideally the time scales of resonance decays and of colour reconnection
should be picked dynamically, but this is not yet the case. Notably
the <i>W</i>, <i>Z</i> and <i>t</i> have intermediate decay time
scales, somewhat but not much shorter than typical hadronization times,
whereas the <i>H</i> is much more long-lived.
<h3>The newer scheme</h3>
<br/><br/><table><tr><td><strong>ColourReconnection:m0 </td><td></td><td> <input type="text" name="4" value="0.5" size="20"/> (<code>default = <strong>0.5</strong></code>; <code>minimum = 0.1</code>; <code>maximum = 5.</code>)</td></tr></table>
This is the variable used in the lambda measure for the string length.
See the different choices of lambda measure for exact formulaes. This variable
is in the new model also used as a cut for forming pseudo particles that are
not colour reconnected.
<br/><br/><table><tr><td><strong>ColourReconnection:nColours </td><td></td><td> <input type="text" name="5" value="9" size="20"/> (<code>default = <strong>9</strong></code>; <code>minimum = 1</code>; <code>maximum = 30</code>)</td></tr></table>
The number of reconnection colours, this should not be confused with the
standard number of QCD colours. Each string is given an integer number between
0 and <code>nColours - 1</code>. Only strings with the same number are allowed
to do a normal string reconnection. The default value provides
the standard QCD probability that a triplet and an anti-triplet is in a
singlet state. The probability for two strings to form a junction structure is
in QCD given by the product of two triplets, which gives a probability of 1/3.
Therefore the number of reconnection colours for junction formation is
<code>iColours % 3</code>, where iColours refer to the integer of the string.
The behaviour of junction formation therefore only changes slightly with this
variable.
<br/><br/><strong>ColourReconnection:sameNeighbourColours</strong> <input type="radio" name="6" value="on"><strong>On</strong>
<input type="radio" name="6" value="off" checked="checked"><strong>Off</strong>
(<code>default = <strong>off</strong></code>)<br/>
In the normal colour reconnection two neighbouring strings are not allowed
to have the same colour. Similar two strings orginating from a gluon split is
not allowed to reconnect. The physics motivation for this is that it would
require colour singlet gluons, and therefore for ordinary physics studies this
should be turned off. But for testing of extreme scenarios (i.e. 1 colour),
this variable needs to be turned on, since it is not possible to have
different neighbouring colours.
<br/><br/><strong>ColourReconnection:allowJunctions</strong> <input type="radio" name="7" value="on" checked="checked"><strong>On</strong>
<input type="radio" name="7" value="off"><strong>Off</strong>
(<code>default = <strong>on</strong></code>)<br/>
This switch disables the formation of junctions in the colour reconnection.
<br/><br/><table><tr><td><strong>ColourReconnection:lambdaForm </td><td> (<code>default = <strong>0</strong></code>; <code>minimum = 0</code>; <code>maximum = 2</code>)</td></tr></table>
This allows to switch between different options for what
<ei>lambda</ei>-measure to use.
The formula shown are how much each end of a dipole or junction contribute to
the total <ei>lambda</ei>-measure. The energies are defined in respectively
the dipole or junction restframe.
<br/>
<input type="radio" name="8" value="0" checked="checked"><strong>0 </strong>: <ei>lambda = ln (1 + sqrt(2) E/m0)</ei> <br/>
<input type="radio" name="8" value="1"><strong>1 </strong>: <ei>lambda = ln (1 + 2 E/m0)</ei> <br/>
<input type="radio" name="8" value="2"><strong>2 </strong>: <ei>lambda = ln (2 E/m0)</ei> <br/>
<br/><br/><table><tr><td><strong>ColourReconnection:minimumGainJun </td><td></td><td> <input type="text" name="9" value="1" size="20"/> (<code>default = <strong>1</strong></code>; <code>minimum = -100</code>; <code>maximum = 100</code>)</td></tr></table>
The minimum <i>lambda</i> has to decrease in order to create a junction
antijunction pair.
<br/><br/><strong>ColourReconnection:allowDoubleJunRem</strong> <input type="radio" name="10" value="on" checked="checked"><strong>On</strong>
<input type="radio" name="10" value="off"><strong>Off</strong>
(<code>default = <strong>on</strong></code>)<br/>
This parameter tells whether or not to allow a directly connected
junction-antijunction pair to split into two strings. The lambda measure of
the junction system is compared to that of the two possible string
configurations. If the chosen configuration is the junction system, a q-qbar
system is inserted between the junctions by removing some energy/momentum from
the other legs.
<h3>The gluon-move scheme</h3>
This approach contains two steps, a first "move" one and an optional
second "flip" one. Both are intended to reduce the total "string length"
<i>lambda</i> measure of an event. For multiparton topologies the
correct <i>lambda</i> measure can become quite cumbersome, so here it
is approximated by the sum of <i>lambda</i> contributions from each pair
of partons connected by a colour string piece. For two partons <i>i</i>
and <i>j</i> with invariant mass <i>m_ij</i> this contribution
is defined as <i>lambda_ij = ln(1 + m^2_ij / m2Lambda)</i>.
The 1 is added ad hoc to avoid problems in the <i>m_ij → 0</i>
limit, problems which mainly comes from the approximate treatment,
and <i>m2Lambda</i> is a parameter set below.
<p/>
In the move step all final gluons are identified, alternatively only a
fraction <i>fracGluon</i> of them, and also all colour-connected
parton pairs. For each gluon and each pair it is calculated how the total
<i>lambda</i> would shift if the gluon would be removed from its current
location and inserted in between the pair. The gluon move that gives the
largest negative shift, if any, is then carried out. Alternatively, only
shifts more negative than <i>dLambdaCut</i> are considered. The procedure
is iterated so long as allowed moves can be found.
<p/>
There is some fine print. If a colour singlet subsystem consists of two
gluons only then it is not allowed to move any of them, since that then
would result in in a colour singlet gluon. Also, at most as many moves
are made as there are gluons, which normally should be enough. A specific
gluon may be moved more than once, however. Finally, a gluon directly
connected to a junction cannot be moved, and also no gluon can be inserted
between it and the junction. This is entirely for practical reasons, but
should not be a problem, since junctions are rare in this model.
<p/>
The gluon-move steps will not break the connection between string endpoints,
in the sense that a quark and an antiquark that are colour-connected via
a number of gluons will remain so, only the number and identity of the
intermediate gluons may change. Such a scenario may be too restrictive.
Therefore an optional second flip step is introduced. In it all such
colour chains are identified, omitting closed gluon loops. The lambda
change is defined by what happens if the two colour lines are crossed
somewhere, e.g. such that two systems <i>q1 - g1 - qbar1</i> and
<i>q2 - g2 - qbar2</i> are flipped to <i>q1 - g1 - g2 - qbar2</i>
and <i>q2 - qbar1</i>. The flip that gives the largest <i>lambda</i>
reduction is carried out, again with <i>dLambdaCut</i> offering a
possibility to restrict the options. As with the move step, the procedure
is repeated so long as it is allowed. An important restriction is
imposed, however, that a given system is only allowed to flip once,
and not with itself. The practical reason is that repeated flips could
split off closed gluon loops quite easily, which tends to result in
bad agreement with data.
<p/>
As an option, singlet subsystems containing a junction may or may not
be allowed to take part in the flip step. Since the number of junction
systems is limited in this model the differences are not so important.
<br/><br/><table><tr><td><strong>ColourReconnection:m2Lambda </td><td></td><td> <input type="text" name="11" value="1." size="20"/> (<code>default = <strong>1.</strong></code>; <code>minimum = 0.25</code>; <code>maximum = 16.</code>)</td></tr></table>
The <i>m2Lambda</i> parameter used in the definition of the approximate
<i>lambda</i> measure above. It represents an approximate hadronic
mass-square scale, cf. <i>m0</i> in the previous model. Its value is
uncertain up to factors of 2, but the <i>lambda</i> change induced by
a potential move or flip is rather insensitive to the precise value,
owing to large cancellations.
<br/><br/><table><tr><td><strong>ColourReconnection:fracGluon </td><td></td><td> <input type="text" name="12" value="1." size="20"/> (<code>default = <strong>1.</strong></code>; <code>minimum = 0.</code>; <code>maximum = 1.</code>)</td></tr></table>
The probability that a given gluon will be considered for being moved.
It thus gives the average fraction of gluons being considered.
<br/><br/><table><tr><td><strong>ColourReconnection:dLambdaCut </td><td></td><td> <input type="text" name="13" value="0." size="20"/> (<code>default = <strong>0.</strong></code>; <code>minimum = 0.</code>; <code>maximum = 10.</code>)</td></tr></table>
Restrict gluon moves and colour flips to those that reduce <i>lambda</i>
by more than this amount. The larger this number, the fewer moves and flips
will be performed, but those that remain are the ones most likely to produce
large effects.
<br/><br/><table><tr><td><strong>ColourReconnection:flipMode </td><td> (<code>default = <strong>0</strong></code>; <code>minimum = 0</code>; <code>maximum = 2</code>)</td></tr></table>
Performing the flip step or not.
<br/>
<input type="radio" name="14" value="0" checked="checked"><strong>0 </strong>: No flip handling. <br/>
<input type="radio" name="14" value="1"><strong>1 </strong>: Allow flips, but not for strings in junction topologies. <br/>
<input type="radio" name="14" value="2"><strong>2 </strong>: Allow flips, including for strings in junction topologies. <br/>
<input type="hidden" name="saved" value="1"/>
<?php
echo "<input type='hidden' name='filepath' value='".$_GET["filepath"]."'/>"?>
<table width="100%"><tr><td align="right"><input type="submit" value="Save Settings" /></td></tr></table>
</form>
<?php
if($_POST["saved"] == 1)
{
$filepath = $_POST["filepath"];
$handle = fopen($filepath, 'a');
if($_POST["1"] != "on")
{
$data = "ColourReconnection:reconnect = ".$_POST["1"]."\n";
fwrite($handle,$data);
}
if($_POST["2"] != "0")
{
$data = "ColourReconnection:mode = ".$_POST["2"]."\n";
fwrite($handle,$data);
}
if($_POST["3"] != "1.8")
{
$data = "ColourReconnection:range = ".$_POST["3"]."\n";
fwrite($handle,$data);
}
if($_POST["4"] != "0.5")
{
$data = "ColourReconnection:m0 = ".$_POST["4"]."\n";
fwrite($handle,$data);
}
if($_POST["5"] != "9")
{
$data = "ColourReconnection:nColours = ".$_POST["5"]."\n";
fwrite($handle,$data);
}
if($_POST["6"] != "off")
{
$data = "ColourReconnection:sameNeighbourColours = ".$_POST["6"]."\n";
fwrite($handle,$data);
}
if($_POST["7"] != "on")
{
$data = "ColourReconnection:allowJunctions = ".$_POST["7"]."\n";
fwrite($handle,$data);
}
if($_POST["8"] != "0")
{
$data = "ColourReconnection:lambdaForm = ".$_POST["8"]."\n";
fwrite($handle,$data);
}
if($_POST["9"] != "1")
{
$data = "ColourReconnection:minimumGainJun = ".$_POST["9"]."\n";
fwrite($handle,$data);
}
if($_POST["10"] != "on")
{
$data = "ColourReconnection:allowDoubleJunRem = ".$_POST["10"]."\n";
fwrite($handle,$data);
}
if($_POST["11"] != "1.")
{
$data = "ColourReconnection:m2Lambda = ".$_POST["11"]."\n";
fwrite($handle,$data);
}
if($_POST["12"] != "1.")
{
$data = "ColourReconnection:fracGluon = ".$_POST["12"]."\n";
fwrite($handle,$data);
}
if($_POST["13"] != "0.")
{
$data = "ColourReconnection:dLambdaCut = ".$_POST["13"]."\n";
fwrite($handle,$data);
}
if($_POST["14"] != "0")
{
$data = "ColourReconnection:flipMode = ".$_POST["14"]."\n";
fwrite($handle,$data);
}
fclose($handle);
}
?>
</body>
</html>
<!-- Copyright (C) 2015 Torbjorn Sjostrand -->
| bsd-3-clause |
UCL-ShippingGroup/cartodb-1 | lib/assets/javascripts/cartodb/table/basemap/basemap_dropdown/pattern_basemap.js | 1729 |
/**
* Pattern image base layer
*
* - It extends from color basemap
*/
cdb.admin.BackgroundMapImageView = cdb.admin.BackgroundMapColorView.extend({
events: {
'click': '_openImagePicker'
},
initialize: function() {
_.bindAll(this, 'setPattern');
this.map = this.options.map;
this.user = this.options.user;
this.template = cdb.templates.getTemplate('table/views/basemap/pattern_basemap');
this.bindMap(this.map);
this.map.bind('savingLayersFinish', this._changeModel, this);
this._initBinds();
},
activate: function(url) {
var lyr = new cdb.admin.PlainLayer({
color: '',
image: url,
maxZoom: 28 //allow the user to zoom to the atom
});
this.model = lyr;
this.map.changeProvider('leaflet', lyr);
return false;
},
_openImagePicker: function(e) {
this.killEvent(e);
var dialog = new cdb.editor.ImagePickerView({
user: this.user,
kind: 'pattern'
});
dialog.appendToBody();
dialog.bind('fileChosen', this.setPattern, this);
},
setPattern: function(url) {
cdb.god.trigger("closeDialogs");
// Set new model
this.activate(url);
// Render color
this.render();
// Set general thumb
//$("ul.options .basemap_dropdown .info strong").text("Image pattern");
$("ul.options .basemap_dropdown a.thumb")
.css({
"background-image": "url(" + url + ")",
"background-size": "34px",
"background-position": "50% 50%",
"background-color": "transparent"
});
this.selectButton();
// event tracking "Applied pattern as basemap"
cdb.god.trigger('metrics', 'pattern_basemap', {
email: window.user_data.email
});
}
});
| bsd-3-clause |
chromium/chromium | third_party/blink/web_tests/external/wpt_automation/file-system-access/showDirectoryPicker-manual.https-automation.sub.js | 138 | const path =
String.raw`{{fs_path(resources/data)}}`.replace('wpt_automation', 'wpt');
testRunner.setFilePathForMockFileDialog(path);
| bsd-3-clause |
chromium/chromium | third_party/blink/renderer/core/layout/ng/inline/ng_physical_line_box_fragment_test.cc | 2844 | // Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/core/layout/ng/inline/ng_physical_line_box_fragment.h"
#include "third_party/blink/renderer/core/layout/ng/inline/ng_inline_cursor.h"
#include "third_party/blink/renderer/core/layout/ng/ng_layout_test.h"
#include "third_party/blink/renderer/core/layout/ng/ng_physical_box_fragment.h"
namespace blink {
class NGPhysicalLineBoxFragmentTest : public NGLayoutTest {
public:
NGPhysicalLineBoxFragmentTest() : NGLayoutTest() {}
protected:
Vector<const NGPhysicalLineBoxFragment*> GetLineBoxes() const {
const Element* container = GetElementById("root");
DCHECK(container);
const LayoutObject* layout_object = container->GetLayoutObject();
DCHECK(layout_object) << container;
DCHECK(layout_object->IsLayoutBlockFlow()) << container;
NGInlineCursor cursor(*To<LayoutBlockFlow>(layout_object));
Vector<const NGPhysicalLineBoxFragment*> lines;
for (cursor.MoveToFirstLine(); cursor; cursor.MoveToNextLine())
lines.push_back(cursor.Current()->LineBoxFragment());
return lines;
}
const NGPhysicalLineBoxFragment* GetLineBox() const {
Vector<const NGPhysicalLineBoxFragment*> lines = GetLineBoxes();
if (!lines.IsEmpty())
return lines.front();
return nullptr;
}
};
#define EXPECT_BOX_FRAGMENT(id, fragment) \
{ \
EXPECT_TRUE(fragment); \
EXPECT_TRUE(fragment->IsBox()); \
EXPECT_TRUE(fragment->GetNode()); \
EXPECT_EQ(GetElementById(id), fragment->GetNode()); \
}
TEST_F(NGPhysicalLineBoxFragmentTest, HasPropagatedDescendantsFloat) {
SetBodyInnerHTML(R"HTML(
<!DOCTYPE html>
<style>
div {
font-size: 10px;
width: 10ch;
}
.float { float: left; }
</style>
<div id=root>12345678 12345<div class=float>float</div></div>
)HTML");
Vector<const NGPhysicalLineBoxFragment*> lines = GetLineBoxes();
EXPECT_EQ(lines.size(), 2u);
EXPECT_FALSE(lines[0]->HasPropagatedDescendants());
EXPECT_TRUE(lines[1]->HasPropagatedDescendants());
}
TEST_F(NGPhysicalLineBoxFragmentTest, HasPropagatedDescendantsOOF) {
SetBodyInnerHTML(R"HTML(
<!DOCTYPE html>
<style>
div {
font-size: 10px;
width: 10ch;
}
.abspos { position: absolute; }
</style>
<div id=root>12345678 12345<div class=abspos>abspos</div></div>
)HTML");
Vector<const NGPhysicalLineBoxFragment*> lines = GetLineBoxes();
EXPECT_EQ(lines.size(), 2u);
EXPECT_FALSE(lines[0]->HasPropagatedDescendants());
EXPECT_TRUE(lines[1]->HasPropagatedDescendants());
}
} // namespace blink
| bsd-3-clause |
yusuke2255/dotty | src/dotty/tools/dotc/transform/Pickler.scala | 3886 | package dotty.tools.dotc
package transform
import core._
import Contexts.Context
import Decorators._
import tasty._
import config.Printers.{noPrinter, pickling}
import java.io.PrintStream
import Periods._
import Phases._
import Symbols._
import Flags.Module
import util.SourceFile
import collection.mutable
/** This phase pickles trees */
class Pickler extends Phase {
import ast.tpd._
override def phaseName: String = "pickler"
private def output(name: String, msg: String) = {
val s = new PrintStream(name)
s.print(msg)
s.close
}
private val beforePickling = new mutable.HashMap[ClassSymbol, String]
/** Drop any elements of this list that are linked module classes of other elements in the list */
private def dropCompanionModuleClasses(clss: List[ClassSymbol])(implicit ctx: Context): List[ClassSymbol] = {
val companionModuleClasses =
clss.filterNot(_ is Module).map(_.linkedClass).filterNot(_.isAbsent)
clss.filterNot(companionModuleClasses.contains)
}
override def run(implicit ctx: Context): Unit = {
val unit = ctx.compilationUnit
pickling.println(i"unpickling in run ${ctx.runId}")
for { cls <- dropCompanionModuleClasses(topLevelClasses(unit.tpdTree))
tree <- sliceTopLevel(unit.tpdTree, cls) } {
if (ctx.settings.YtestPickler.value) beforePickling(cls) = tree.show
val pickler = new TastyPickler()
unit.picklers += (cls -> pickler)
val treePkl = new TreePickler(pickler)
treePkl.pickle(tree :: Nil)
pickler.addrOfTree = treePkl.buf.addrOfTree
pickler.addrOfSym = treePkl.addrOfSym
if (unit.source.exists)
pickleSourcefile(pickler, unit.source)
if (tree.pos.exists)
new PositionPickler(pickler, treePkl.buf.addrOfTree).picklePositions(tree :: Nil, tree.pos)
def rawBytes = // not needed right now, but useful to print raw format.
pickler.assembleParts().iterator.grouped(10).toList.zipWithIndex.map {
case (row, i) => s"${i}0: ${row.mkString(" ")}"
}
// println(i"rawBytes = \n$rawBytes%\n%") // DEBUG
if (pickling ne noPrinter) {
println(i"**** pickled info of $cls")
new TastyPrinter(pickler.assembleParts()).printContents()
}
}
}
private def pickleSourcefile(pickler: TastyPickler, source: SourceFile): Unit = {
val buf = new TastyBuffer(10)
pickler.newSection("Sourcefile", buf)
buf.writeNat(pickler.nameBuffer.nameIndex(source.file.path).index)
}
override def runOn(units: List[CompilationUnit])(implicit ctx: Context): List[CompilationUnit] = {
val result = super.runOn(units)
if (ctx.settings.YtestPickler.value)
testUnpickler(units)(ctx.fresh.setPeriod(Period(ctx.runId + 1, FirstPhaseId)))
result
}
private def testUnpickler(units: List[CompilationUnit])(implicit ctx: Context): Unit = {
pickling.println(i"testing unpickler at run ${ctx.runId}")
ctx.definitions.init
val unpicklers =
for (unit <- units; (cls, pickler) <- unit.picklers) yield {
val unpickler = new DottyUnpickler(pickler.assembleParts())
unpickler.enter(roots = Set())
cls -> unpickler
}
pickling.println("************* entered toplevel ***********")
for ((cls, unpickler) <- unpicklers) {
val (unpickled, source) = unpickler.body(readPositions = true)
testSame(i"$unpickled%\n%", beforePickling(cls), cls, source)
}
}
private def testSame(unpickled: String, previous: String, cls: ClassSymbol, source: SourceFile)(implicit ctx: Context) =
if (previous != unpickled) {
output("before-pickling.txt", previous)
output("after-pickling.txt", unpickled)
ctx.error(s"""pickling difference for ${cls.fullName} in $source, for details:
|
| diff before-pickling.txt after-pickling.txt""".stripMargin)
}
}
| bsd-3-clause |
dim-dimus/hozdvor | protected/modules/rbac/components/ModuleManager.php | 3816 | <?php
/**
* Class ModuleManager
*/
class ModuleManager extends \yupe\components\ModuleManager
{
/**
* Функция преобразует роут в предполагаемое название правила.
* Поэтому для правильной автоматической фильтрации стоит придерживаться правила в именовании
* правил в виде Module.ControllerBackend.Action
*
* @param $route - строка в формате user/userBackend/create
* @return string - строка в формате User.UserBackend.Create
*/
private function getRoleByRoute($route)
{
$route = trim($route, '/');
$routeArray = preg_split('/\//', $route, -1, PREG_SPLIT_NO_EMPTY);
$routeArray = array_map(
function ($x) {
return ucfirst($x);
},
$routeArray
);
return join('.', $routeArray);
}
/**
* Обходит дерево меню и вычисляет доступность элемента для пользователя.
* Если параметр visible уже установлен, то проверка не осуществляется.
*
* @param $menu array - Меню
* @return array - Меню с проставленным атрибутом visible
*/
public function filterMenuVisibilityByUserRoles(array $menu)
{
foreach ($menu as $key => $item) {
$visible = true;
if (isset($item['url']) && is_array($item['url'])) {
$route = $item['url'][0];
$role = $this->getRoleByRoute($route);
if (!isset($menu[$key]['visible'])) {
$menu[$key]['visible'] = Yii::app()->getUser()->checkAccess(AuthItem::ROLE_ADMIN) || Yii::app(
)->getUser()->checkAccess(
$role
);
}
$visible = $menu[$key]['visible'];
}
if (isset($item['items']) && is_array($item['items']) && $visible) {
$menu[$key]['items'] = $this->filterMenuVisibilityByUserRoles($menu[$key]['items']);
}
}
return $menu;
}
public function checkModuleRights(CWebModule $module)
{
$items = $module->getAuthItems();
if (empty($items) || Yii::app()->getUser()->checkAccess(AuthItem::ROLE_ADMIN)) {
return true;
}
foreach ($items as $task) {
if (Yii::app()->getUser()->checkAccess($task['name'])) {
return true;
}
foreach ($task['items'] as $item) {
if (Yii::app()->getUser()->checkAccess($item['name'])) {
return true;
}
}
}
return false;
}
public function filterModulesListByUserRoles(array $modules)
{
foreach ($modules as $id => $module) {
if (!$this->checkModuleRights($module)) {
unset($modules[$id]);
}
}
return $modules;
}
/**
* @param bool $navigationOnly
* @param bool $disableModule
* @return array|mixed
*/
public function getModules($navigationOnly = false, $disableModule = false)
{
$modules = parent::getModules($navigationOnly, $disableModule);
if (true === $navigationOnly) {
return $this->filterMenuVisibilityByUserRoles($modules);
}
$modules['modulesNavigation'] = $this->filterMenuVisibilityByUserRoles($modules['modulesNavigation']);
$modules['modules'] = $this->filterModulesListByUserRoles($modules['modules']);
return $modules;
}
}
| bsd-3-clause |
eider-i128/fullserver | panelwww/player/engine/fsdomy/p/libs/plugins/modifier.markdown.php | 50497 | <?php
#
# Markdown - A text-to-HTML conversion tool for web writers
#
# PHP Markdown
# Copyright (c) 2004-2009 Michel Fortin
# <http://michelf.com/projects/php-markdown/>
#
# Original Markdown
# Copyright (c) 2004-2006 John Gruber
# <http://daringfireball.net/projects/markdown/>
#
define( 'MARKDOWN_VERSION', "1.0.1n" ); # Sat 10 Oct 2009
#
# Global default settings:
#
# Change to ">" for HTML output
@define( 'MARKDOWN_EMPTY_ELEMENT_SUFFIX', " />");
# Define the width of a tab for code blocks.
@define( 'MARKDOWN_TAB_WIDTH', 4 );
#
# WordPress settings:
#
# Change to false to remove Markdown from posts and/or comments.
@define( 'MARKDOWN_WP_POSTS', false );
@define( 'MARKDOWN_WP_COMMENTS', false );
### Standard Function Interface ###
@define( 'MARKDOWN_PARSER_CLASS', 'Markdown_Parser' );
function Markdown($text) {
#
# Initialize the parser and return the result of its transform method.
#
# Setup static parser variable.
static $parser;
if (!isset($parser)) {
$parser_class = MARKDOWN_PARSER_CLASS;
$parser = new $parser_class;
}
# Transform text using parser.
return $parser->transform($text);
}
### WordPress Plugin Interface ###
/*
Plugin Name: Markdown
Plugin URI: http://michelf.com/projects/php-markdown/
Description: <a href="http://daringfireball.net/projects/markdown/syntax">Markdown syntax</a> allows you to write using an easy-to-read, easy-to-write plain text format. Based on the original Perl version by <a href="http://daringfireball.net/">John Gruber</a>. <a href="http://michelf.com/projects/php-markdown/">More...</a>
Version: 1.0.1n
Author: Michel Fortin
Author URI: http://michelf.com/
*/
if (isset($wp_version)) {
# More details about how it works here:
# <http://michelf.com/weblog/2005/wordpress-text-flow-vs-markdown/>
# Post content and excerpts
# - Remove WordPress paragraph generator.
# - Run Markdown on excerpt, then remove all tags.
# - Add paragraph tag around the excerpt, but remove it for the excerpt rss.
if (MARKDOWN_WP_POSTS) {
remove_filter('the_content', 'wpautop');
remove_filter('the_content_rss', 'wpautop');
remove_filter('the_excerpt', 'wpautop');
add_filter('the_content', 'Markdown', 6);
add_filter('the_content_rss', 'Markdown', 6);
add_filter('get_the_excerpt', 'Markdown', 6);
add_filter('get_the_excerpt', 'trim', 7);
add_filter('the_excerpt', 'mdwp_add_p');
add_filter('the_excerpt_rss', 'mdwp_strip_p');
remove_filter('content_save_pre', 'balanceTags', 50);
remove_filter('excerpt_save_pre', 'balanceTags', 50);
add_filter('the_content', 'balanceTags', 50);
add_filter('get_the_excerpt', 'balanceTags', 9);
}
# Comments
# - Remove WordPress paragraph generator.
# - Remove WordPress auto-link generator.
# - Scramble important tags before passing them to the kses filter.
# - Run Markdown on excerpt then remove paragraph tags.
if (MARKDOWN_WP_COMMENTS) {
remove_filter('comment_text', 'wpautop', 30);
remove_filter('comment_text', 'make_clickable');
add_filter('pre_comment_content', 'Markdown', 6);
add_filter('pre_comment_content', 'mdwp_hide_tags', 8);
add_filter('pre_comment_content', 'mdwp_show_tags', 12);
add_filter('get_comment_text', 'Markdown', 6);
add_filter('get_comment_excerpt', 'Markdown', 6);
add_filter('get_comment_excerpt', 'mdwp_strip_p', 7);
global $mdwp_hidden_tags, $mdwp_placeholders;
$mdwp_hidden_tags = explode(' ',
'<p> </p> <pre> </pre> <ol> </ol> <ul> </ul> <li> </li>');
$mdwp_placeholders = explode(' ', str_rot13(
'pEj07ZbbBZ U1kqgh4w4p pre2zmeN6K QTi31t9pre ol0MP1jzJR '.
'ML5IjmbRol ulANi1NsGY J7zRLJqPul liA8ctl16T K9nhooUHli'));
}
function mdwp_add_p($text) {
if (!preg_match('{^$|^<(p|ul|ol|dl|pre|blockquote)>}i', $text)) {
$text = '<p>'.$text.'</p>';
$text = preg_replace('{\n{2,}}', "</p>\n\n<p>", $text);
}
return $text;
}
function mdwp_strip_p($t) { return preg_replace('{</?p>}i', '', $t); }
function mdwp_hide_tags($text) {
global $mdwp_hidden_tags, $mdwp_placeholders;
return str_replace($mdwp_hidden_tags, $mdwp_placeholders, $text);
}
function mdwp_show_tags($text) {
global $mdwp_hidden_tags, $mdwp_placeholders;
return str_replace($mdwp_placeholders, $mdwp_hidden_tags, $text);
}
}
### bBlog Plugin Info ###
function identify_modifier_markdown() {
return array(
'name' => 'markdown',
'type' => 'modifier',
'nicename' => 'Markdown',
'description' => 'A text-to-HTML conversion tool for web writers',
'authors' => 'Michel Fortin and John Gruber',
'licence' => 'BSD-like',
'version' => MARKDOWN_VERSION,
'help' => '<a href="http://daringfireball.net/projects/markdown/syntax">Markdown syntax</a> allows you to write using an easy-to-read, easy-to-write plain text format. Based on the original Perl version by <a href="http://daringfireball.net/">John Gruber</a>. <a href="http://michelf.com/projects/php-markdown/">More...</a>'
);
}
### Smarty Modifier Interface ###
function smarty_modifier_markdown($text) {
return Markdown($text);
}
### Textile Compatibility Mode ###
# Rename this file to "classTextile.php" and it can replace Textile everywhere.
if (strcasecmp(substr(__FILE__, -16), "classTextile.php") == 0) {
# Try to include PHP SmartyPants. Should be in the same directory.
@include_once 'smartypants.php';
# Fake Textile class. It calls Markdown instead.
class Textile {
function TextileThis($text, $lite='', $encode='') {
if ($lite == '' && $encode == '') $text = Markdown($text);
if (function_exists('SmartyPants')) $text = SmartyPants($text);
return $text;
}
# Fake restricted version: restrictions are not supported for now.
function TextileRestricted($text, $lite='', $noimage='') {
return $this->TextileThis($text, $lite);
}
# Workaround to ensure compatibility with TextPattern 4.0.3.
function blockLite($text) { return $text; }
}
}
#
# Markdown Parser Class
#
class Markdown_Parser {
# Regex to match balanced [brackets].
# Needed to insert a maximum bracked depth while converting to PHP.
var $nested_brackets_depth = 6;
var $nested_brackets_re;
var $nested_url_parenthesis_depth = 4;
var $nested_url_parenthesis_re;
# Table of hash values for escaped characters:
var $escape_chars = '\`*_{}[]()>#+-.!';
var $escape_chars_re;
# Change to ">" for HTML output.
var $empty_element_suffix = MARKDOWN_EMPTY_ELEMENT_SUFFIX;
var $tab_width = MARKDOWN_TAB_WIDTH;
# Change to `true` to disallow markup or entities.
var $no_markup = false;
var $no_entities = false;
# Predefined urls and titles for reference links and images.
var $predef_urls = array();
var $predef_titles = array();
function Markdown_Parser() {
#
# Constructor function. Initialize appropriate member variables.
#
$this->_initDetab();
$this->prepareItalicsAndBold();
$this->nested_brackets_re =
str_repeat('(?>[^\[\]]+|\[', $this->nested_brackets_depth).
str_repeat('\])*', $this->nested_brackets_depth);
$this->nested_url_parenthesis_re =
str_repeat('(?>[^()\s]+|\(', $this->nested_url_parenthesis_depth).
str_repeat('(?>\)))*', $this->nested_url_parenthesis_depth);
$this->escape_chars_re = '['.preg_quote($this->escape_chars).']';
# Sort document, block, and span gamut in ascendent priority order.
asort($this->document_gamut);
asort($this->block_gamut);
asort($this->span_gamut);
}
# Internal hashes used during transformation.
var $urls = array();
var $titles = array();
var $html_hashes = array();
# Status flag to avoid invalid nesting.
var $in_anchor = false;
function setup() {
#
# Called before the transformation process starts to setup parser
# states.
#
# Clear global hashes.
$this->urls = $this->predef_urls;
$this->titles = $this->predef_titles;
$this->html_hashes = array();
$in_anchor = false;
}
function teardown() {
#
# Called after the transformation process to clear any variable
# which may be taking up memory unnecessarly.
#
$this->urls = array();
$this->titles = array();
$this->html_hashes = array();
}
function transform($text) {
#
# Main function. Performs some preprocessing on the input text
# and pass it through the document gamut.
#
$this->setup();
# Remove UTF-8 BOM and marker character in input, if present.
$text = preg_replace('{^\xEF\xBB\xBF|\x1A}', '', $text);
# Standardize line endings:
# DOS to Unix and Mac to Unix
$text = preg_replace('{\r\n?}', "\n", $text);
# Make sure $text ends with a couple of newlines:
$text .= "\n\n";
# Convert all tabs to spaces.
$text = $this->detab($text);
# Turn block-level HTML blocks into hash entries
$text = $this->hashHTMLBlocks($text);
# Strip any lines consisting only of spaces and tabs.
# This makes subsequent regexen easier to write, because we can
# match consecutive blank lines with /\n+/ instead of something
# contorted like /[ ]*\n+/ .
$text = preg_replace('/^[ ]+$/m', '', $text);
# Run document gamut methods.
foreach ($this->document_gamut as $method => $priority) {
$text = $this->$method($text);
}
$this->teardown();
return $text . "\n";
}
var $document_gamut = array(
# Strip link definitions, store in hashes.
"stripLinkDefinitions" => 20,
"runBasicBlockGamut" => 30,
);
function stripLinkDefinitions($text) {
#
# Strips link definitions from text, stores the URLs and titles in
# hash references.
#
$less_than_tab = $this->tab_width - 1;
# Link defs are in the form: ^[id]: url "optional title"
$text = preg_replace_callback('{
^[ ]{0,'.$less_than_tab.'}\[(.+)\][ ]?: # id = $1
[ ]*
\n? # maybe *one* newline
[ ]*
(?:
<(.+?)> # url = $2
|
(\S+?) # url = $3
)
[ ]*
\n? # maybe one newline
[ ]*
(?:
(?<=\s) # lookbehind for whitespace
["(]
(.*?) # title = $4
[")]
[ ]*
)? # title is optional
(?:\n+|\Z)
}xm',
array(&$this, '_stripLinkDefinitions_callback'),
$text);
return $text;
}
function _stripLinkDefinitions_callback($matches) {
$link_id = strtolower($matches[1]);
$url = $matches[2] == '' ? $matches[3] : $matches[2];
$this->urls[$link_id] = $url;
$this->titles[$link_id] =& $matches[4];
return ''; # String that will replace the block
}
function hashHTMLBlocks($text) {
if ($this->no_markup) return $text;
$less_than_tab = $this->tab_width - 1;
# Hashify HTML blocks:
# We only want to do this for block-level HTML tags, such as headers,
# lists, and tables. That's because we still want to wrap <p>s around
# "paragraphs" that are wrapped in non-block-level tags, such as anchors,
# phrase emphasis, and spans. The list of tags we're looking for is
# hard-coded:
#
# * List "a" is made of tags which can be both inline or block-level.
# These will be treated block-level when the start tag is alone on
# its line, otherwise they're not matched here and will be taken as
# inline later.
# * List "b" is made of tags which are always block-level;
#
$block_tags_a_re = 'ins|del';
$block_tags_b_re = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|address|'.
'script|noscript|form|fieldset|iframe|math';
# Regular expression for the content of a block tag.
$nested_tags_level = 4;
$attr = '
(?> # optional tag attributes
\s # starts with whitespace
(?>
[^>"/]+ # text outside quotes
|
/+(?!>) # slash not followed by ">"
|
"[^"]*" # text inside double quotes (tolerate ">")
|
\'[^\']*\' # text inside single quotes (tolerate ">")
)*
)?
';
$content =
str_repeat('
(?>
[^<]+ # content without tag
|
<\2 # nested opening tag
'.$attr.' # attributes
(?>
/>
|
>', $nested_tags_level). # end of opening tag
'.*?'. # last level nested tag content
str_repeat('
</\2\s*> # closing nested tag
)
|
<(?!/\2\s*> # other tags with a different name
)
)*',
$nested_tags_level);
$content2 = str_replace('\2', '\3', $content);
# First, look for nested blocks, e.g.:
# <div>
# <div>
# tags for inner block must be indented.
# </div>
# </div>
#
# The outermost tags must start at the left margin for this to match, and
# the inner nested divs must be indented.
# We need to do this before the next, more liberal match, because the next
# match will start at the first `<div>` and stop at the first `</div>`.
$text = preg_replace_callback('{(?>
(?>
(?<=\n\n) # Starting after a blank line
| # or
\A\n? # the beginning of the doc
)
( # save in $1
# Match from `\n<tag>` to `</tag>\n`, handling nested tags
# in between.
[ ]{0,'.$less_than_tab.'}
<('.$block_tags_b_re.')# start tag = $2
'.$attr.'> # attributes followed by > and \n
'.$content.' # content, support nesting
</\2> # the matching end tag
[ ]* # trailing spaces/tabs
(?=\n+|\Z) # followed by a newline or end of document
| # Special version for tags of group a.
[ ]{0,'.$less_than_tab.'}
<('.$block_tags_a_re.')# start tag = $3
'.$attr.'>[ ]*\n # attributes followed by >
'.$content2.' # content, support nesting
</\3> # the matching end tag
[ ]* # trailing spaces/tabs
(?=\n+|\Z) # followed by a newline or end of document
| # Special case just for <hr />. It was easier to make a special
# case than to make the other regex more complicated.
[ ]{0,'.$less_than_tab.'}
<(hr) # start tag = $2
'.$attr.' # attributes
/?> # the matching end tag
[ ]*
(?=\n{2,}|\Z) # followed by a blank line or end of document
| # Special case for standalone HTML comments:
[ ]{0,'.$less_than_tab.'}
(?s:
<!-- .*? -->
)
[ ]*
(?=\n{2,}|\Z) # followed by a blank line or end of document
| # PHP and ASP-style processor instructions (<? and <%)
[ ]{0,'.$less_than_tab.'}
(?s:
<([?%]) # $2
.*?
\2>
)
[ ]*
(?=\n{2,}|\Z) # followed by a blank line or end of document
)
)}Sxmi',
array(&$this, '_hashHTMLBlocks_callback'),
$text);
return $text;
}
function _hashHTMLBlocks_callback($matches) {
$text = $matches[1];
$key = $this->hashBlock($text);
return "\n\n$key\n\n";
}
function hashPart($text, $boundary = 'X') {
#
# Called whenever a tag must be hashed when a function insert an atomic
# element in the text stream. Passing $text to through this function gives
# a unique text-token which will be reverted back when calling unhash.
#
# The $boundary argument specify what character should be used to surround
# the token. By convension, "B" is used for block elements that needs not
# to be wrapped into paragraph tags at the end, ":" is used for elements
# that are word separators and "X" is used in the general case.
#
# Swap back any tag hash found in $text so we do not have to `unhash`
# multiple times at the end.
$text = $this->unhash($text);
# Then hash the block.
static $i = 0;
$key = "$boundary\x1A" . ++$i . $boundary;
$this->html_hashes[$key] = $text;
return $key; # String that will replace the tag.
}
function hashBlock($text) {
#
# Shortcut function for hashPart with block-level boundaries.
#
return $this->hashPart($text, 'B');
}
var $block_gamut = array(
#
# These are all the transformations that form block-level
# tags like paragraphs, headers, and list items.
#
"doHeaders" => 10,
"doHorizontalRules" => 20,
"doLists" => 40,
"doCodeBlocks" => 50,
"doBlockQuotes" => 60,
);
function runBlockGamut($text) {
#
# Run block gamut tranformations.
#
# We need to escape raw HTML in Markdown source before doing anything
# else. This need to be done for each block, and not only at the
# begining in the Markdown function since hashed blocks can be part of
# list items and could have been indented. Indented blocks would have
# been seen as a code block in a previous pass of hashHTMLBlocks.
$text = $this->hashHTMLBlocks($text);
return $this->runBasicBlockGamut($text);
}
function runBasicBlockGamut($text) {
#
# Run block gamut tranformations, without hashing HTML blocks. This is
# useful when HTML blocks are known to be already hashed, like in the first
# whole-document pass.
#
foreach ($this->block_gamut as $method => $priority) {
$text = $this->$method($text);
}
# Finally form paragraph and restore hashed blocks.
$text = $this->formParagraphs($text);
return $text;
}
function doHorizontalRules($text) {
# Do Horizontal Rules:
return preg_replace(
'{
^[ ]{0,3} # Leading space
([-*_]) # $1: First marker
(?> # Repeated marker group
[ ]{0,2} # Zero, one, or two spaces.
\1 # Marker character
){2,} # Group repeated at least twice
[ ]* # Tailing spaces
$ # End of line.
}mx',
"\n".$this->hashBlock("<hr$this->empty_element_suffix")."\n",
$text);
}
var $span_gamut = array(
#
# These are all the transformations that occur *within* block-level
# tags like paragraphs, headers, and list items.
#
# Process character escapes, code spans, and inline HTML
# in one shot.
"parseSpan" => -30,
# Process anchor and image tags. Images must come first,
# because ![foo][f] looks like an anchor.
"doImages" => 10,
"doAnchors" => 20,
# Make links out of things like `<http://example.com/>`
# Must come after doAnchors, because you can use < and >
# delimiters in inline links like [this](<url>).
"doAutoLinks" => 30,
"encodeAmpsAndAngles" => 40,
"doItalicsAndBold" => 50,
"doHardBreaks" => 60,
);
function runSpanGamut($text) {
#
# Run span gamut tranformations.
#
foreach ($this->span_gamut as $method => $priority) {
$text = $this->$method($text);
}
return $text;
}
function doHardBreaks($text) {
# Do hard breaks:
return preg_replace_callback('/ {2,}\n/',
array(&$this, '_doHardBreaks_callback'), $text);
}
function _doHardBreaks_callback($matches) {
return $this->hashPart("<br$this->empty_element_suffix\n");
}
function doAnchors($text) {
#
# Turn Markdown link shortcuts into XHTML <a> tags.
#
if ($this->in_anchor) return $text;
$this->in_anchor = true;
#
# First, handle reference-style links: [link text] [id]
#
$text = preg_replace_callback('{
( # wrap whole match in $1
\[
('.$this->nested_brackets_re.') # link text = $2
\]
[ ]? # one optional space
(?:\n[ ]*)? # one optional newline followed by spaces
\[
(.*?) # id = $3
\]
)
}xs',
array(&$this, '_doAnchors_reference_callback'), $text);
#
# Next, inline-style links: [link text](url "optional title")
#
$text = preg_replace_callback('{
( # wrap whole match in $1
\[
('.$this->nested_brackets_re.') # link text = $2
\]
\( # literal paren
[ \n]*
(?:
<(.+?)> # href = $3
|
('.$this->nested_url_parenthesis_re.') # href = $4
)
[ \n]*
( # $5
([\'"]) # quote char = $6
(.*?) # Title = $7
\6 # matching quote
[ \n]* # ignore any spaces/tabs between closing quote and )
)? # title is optional
\)
)
}xs',
array(&$this, '_doAnchors_inline_callback'), $text);
#
# Last, handle reference-style shortcuts: [link text]
# These must come last in case you've also got [link text][1]
# or [link text](/foo)
#
$text = preg_replace_callback('{
( # wrap whole match in $1
\[
([^\[\]]+) # link text = $2; can\'t contain [ or ]
\]
)
}xs',
array(&$this, '_doAnchors_reference_callback'), $text);
$this->in_anchor = false;
return $text;
}
function _doAnchors_reference_callback($matches) {
$whole_match = $matches[1];
$link_text = $matches[2];
$link_id =& $matches[3];
if ($link_id == "") {
# for shortcut links like [this][] or [this].
$link_id = $link_text;
}
# lower-case and turn embedded newlines into spaces
$link_id = strtolower($link_id);
$link_id = preg_replace('{[ ]?\n}', ' ', $link_id);
if (isset($this->urls[$link_id])) {
$url = $this->urls[$link_id];
$url = $this->encodeAttribute($url);
$result = "<a href=\"$url\"";
if ( isset( $this->titles[$link_id] ) ) {
$title = $this->titles[$link_id];
$title = $this->encodeAttribute($title);
$result .= " title=\"$title\"";
}
$link_text = $this->runSpanGamut($link_text);
$result .= ">$link_text</a>";
$result = $this->hashPart($result);
}
else {
$result = $whole_match;
}
return $result;
}
function _doAnchors_inline_callback($matches) {
$whole_match = $matches[1];
$link_text = $this->runSpanGamut($matches[2]);
$url = $matches[3] == '' ? $matches[4] : $matches[3];
$title =& $matches[7];
$url = $this->encodeAttribute($url);
$result = "<a href=\"$url\"";
if (isset($title)) {
$title = $this->encodeAttribute($title);
$result .= " title=\"$title\"";
}
$link_text = $this->runSpanGamut($link_text);
$result .= ">$link_text</a>";
return $this->hashPart($result);
}
function doImages($text) {
#
# Turn Markdown image shortcuts into <img> tags.
#
#
# First, handle reference-style labeled images: ![alt text][id]
#
$text = preg_replace_callback('{
( # wrap whole match in $1
!\[
('.$this->nested_brackets_re.') # alt text = $2
\]
[ ]? # one optional space
(?:\n[ ]*)? # one optional newline followed by spaces
\[
(.*?) # id = $3
\]
)
}xs',
array(&$this, '_doImages_reference_callback'), $text);
#
# Next, handle inline images: 
# Don't forget: encode * and _
#
$text = preg_replace_callback('{
( # wrap whole match in $1
!\[
('.$this->nested_brackets_re.') # alt text = $2
\]
\s? # One optional whitespace character
\( # literal paren
[ \n]*
(?:
<(\S*)> # src url = $3
|
('.$this->nested_url_parenthesis_re.') # src url = $4
)
[ \n]*
( # $5
([\'"]) # quote char = $6
(.*?) # title = $7
\6 # matching quote
[ \n]*
)? # title is optional
\)
)
}xs',
array(&$this, '_doImages_inline_callback'), $text);
return $text;
}
function _doImages_reference_callback($matches) {
$whole_match = $matches[1];
$alt_text = $matches[2];
$link_id = strtolower($matches[3]);
if ($link_id == "") {
$link_id = strtolower($alt_text); # for shortcut links like ![this][].
}
$alt_text = $this->encodeAttribute($alt_text);
if (isset($this->urls[$link_id])) {
$url = $this->encodeAttribute($this->urls[$link_id]);
$result = "<img src=\"$url\" alt=\"$alt_text\"";
if (isset($this->titles[$link_id])) {
$title = $this->titles[$link_id];
$title = $this->encodeAttribute($title);
$result .= " title=\"$title\"";
}
$result .= $this->empty_element_suffix;
$result = $this->hashPart($result);
}
else {
# If there's no such link ID, leave intact:
$result = $whole_match;
}
return $result;
}
function _doImages_inline_callback($matches) {
$whole_match = $matches[1];
$alt_text = $matches[2];
$url = $matches[3] == '' ? $matches[4] : $matches[3];
$title =& $matches[7];
$alt_text = $this->encodeAttribute($alt_text);
$url = $this->encodeAttribute($url);
$result = "<img src=\"$url\" alt=\"$alt_text\"";
if (isset($title)) {
$title = $this->encodeAttribute($title);
$result .= " title=\"$title\""; # $title already quoted
}
$result .= $this->empty_element_suffix;
return $this->hashPart($result);
}
function doHeaders($text) {
# Setext-style headers:
# Header 1
# ========
#
# Header 2
# --------
#
$text = preg_replace_callback('{ ^(.+?)[ ]*\n(=+|-+)[ ]*\n+ }mx',
array(&$this, '_doHeaders_callback_setext'), $text);
# atx-style headers:
# # Header 1
# ## Header 2
# ## Header 2 with closing hashes ##
# ...
# ###### Header 6
#
$text = preg_replace_callback('{
^(\#{1,6}) # $1 = string of #\'s
[ ]*
(.+?) # $2 = Header text
[ ]*
\#* # optional closing #\'s (not counted)
\n+
}xm',
array(&$this, '_doHeaders_callback_atx'), $text);
return $text;
}
function _doHeaders_callback_setext($matches) {
# Terrible hack to check we haven't found an empty list item.
if ($matches[2] == '-' && preg_match('{^-(?: |$)}', $matches[1]))
return $matches[0];
$level = $matches[2]{0} == '=' ? 1 : 2;
$block = "<h$level>".$this->runSpanGamut($matches[1])."</h$level>";
return "\n" . $this->hashBlock($block) . "\n\n";
}
function _doHeaders_callback_atx($matches) {
$level = strlen($matches[1]);
$block = "<h$level>".$this->runSpanGamut($matches[2])."</h$level>";
return "\n" . $this->hashBlock($block) . "\n\n";
}
function doLists($text) {
#
# Form HTML ordered (numbered) and unordered (bulleted) lists.
#
$less_than_tab = $this->tab_width - 1;
# Re-usable patterns to match list item bullets and number markers:
$marker_ul_re = '[*+-]';
$marker_ol_re = '\d+[.]';
$marker_any_re = "(?:$marker_ul_re|$marker_ol_re)";
$markers_relist = array(
$marker_ul_re => $marker_ol_re,
$marker_ol_re => $marker_ul_re,
);
foreach ($markers_relist as $marker_re => $other_marker_re) {
# Re-usable pattern to match any entirel ul or ol list:
$whole_list_re = '
( # $1 = whole list
( # $2
([ ]{0,'.$less_than_tab.'}) # $3 = number of spaces
('.$marker_re.') # $4 = first list item marker
[ ]+
)
(?s:.+?)
( # $5
\z
|
\n{2,}
(?=\S)
(?! # Negative lookahead for another list item marker
[ ]*
'.$marker_re.'[ ]+
)
|
(?= # Lookahead for another kind of list
\n
\3 # Must have the same indentation
'.$other_marker_re.'[ ]+
)
)
)
'; // mx
# We use a different prefix before nested lists than top-level lists.
# See extended comment in _ProcessListItems().
if ($this->list_level) {
$text = preg_replace_callback('{
^
'.$whole_list_re.'
}mx',
array(&$this, '_doLists_callback'), $text);
}
else {
$text = preg_replace_callback('{
(?:(?<=\n)\n|\A\n?) # Must eat the newline
'.$whole_list_re.'
}mx',
array(&$this, '_doLists_callback'), $text);
}
}
return $text;
}
function _doLists_callback($matches) {
# Re-usable patterns to match list item bullets and number markers:
$marker_ul_re = '[*+-]';
$marker_ol_re = '\d+[.]';
$marker_any_re = "(?:$marker_ul_re|$marker_ol_re)";
$list = $matches[1];
$list_type = preg_match("/$marker_ul_re/", $matches[4]) ? "ul" : "ol";
$marker_any_re = ( $list_type == "ul" ? $marker_ul_re : $marker_ol_re );
$list .= "\n";
$result = $this->processListItems($list, $marker_any_re);
$result = $this->hashBlock("<$list_type>\n" . $result . "</$list_type>");
return "\n". $result ."\n\n";
}
var $list_level = 0;
function processListItems($list_str, $marker_any_re) {
#
# Process the contents of a single ordered or unordered list, splitting it
# into individual list items.
#
# The $this->list_level global keeps track of when we're inside a list.
# Each time we enter a list, we increment it; when we leave a list,
# we decrement. If it's zero, we're not in a list anymore.
#
# We do this because when we're not inside a list, we want to treat
# something like this:
#
# I recommend upgrading to version
# 8. Oops, now this line is treated
# as a sub-list.
#
# As a single paragraph, despite the fact that the second line starts
# with a digit-period-space sequence.
#
# Whereas when we're inside a list (or sub-list), that line will be
# treated as the start of a sub-list. What a kludge, huh? This is
# an aspect of Markdown's syntax that's hard to parse perfectly
# without resorting to mind-reading. Perhaps the solution is to
# change the syntax rules such that sub-lists must start with a
# starting cardinal number; e.g. "1." or "a.".
$this->list_level++;
# trim trailing blank lines:
$list_str = preg_replace("/\n{2,}\\z/", "\n", $list_str);
$list_str = preg_replace_callback('{
(\n)? # leading line = $1
(^[ ]*) # leading whitespace = $2
('.$marker_any_re.' # list marker and space = $3
(?:[ ]+|(?=\n)) # space only required if item is not empty
)
((?s:.*?)) # list item text = $4
(?:(\n+(?=\n))|\n) # tailing blank line = $5
(?= \n* (\z | \2 ('.$marker_any_re.') (?:[ ]+|(?=\n))))
}xm',
array(&$this, '_processListItems_callback'), $list_str);
$this->list_level--;
return $list_str;
}
function _processListItems_callback($matches) {
$item = $matches[4];
$leading_line =& $matches[1];
$leading_space =& $matches[2];
$marker_space = $matches[3];
$tailing_blank_line =& $matches[5];
if ($leading_line || $tailing_blank_line ||
preg_match('/\n{2,}/', $item))
{
# Replace marker with the appropriate whitespace indentation
$item = $leading_space . str_repeat(' ', strlen($marker_space)) . $item;
$item = $this->runBlockGamut($this->outdent($item)."\n");
}
else {
# Recursion for sub-lists:
$item = $this->doLists($this->outdent($item));
$item = preg_replace('/\n+$/', '', $item);
$item = $this->runSpanGamut($item);
}
return "<li>" . $item . "</li>\n";
}
function doCodeBlocks($text) {
#
# Process Markdown `<pre><code>` blocks.
#
$text = preg_replace_callback('{
(?:\n\n|\A\n?)
( # $1 = the code block -- one or more lines, starting with a space/tab
(?>
[ ]{'.$this->tab_width.'} # Lines must start with a tab or a tab-width of spaces
.*\n+
)+
)
((?=^[ ]{0,'.$this->tab_width.'}\S)|\Z) # Lookahead for non-space at line-start, or end of doc
}xm',
array(&$this, '_doCodeBlocks_callback'), $text);
return $text;
}
function _doCodeBlocks_callback($matches) {
$codeblock = $matches[1];
$codeblock = $this->outdent($codeblock);
$codeblock = htmlspecialchars($codeblock, ENT_NOQUOTES);
# trim leading newlines and trailing newlines
$codeblock = preg_replace('/\A\n+|\n+\z/', '', $codeblock);
$codeblock = "<pre><code>$codeblock\n</code></pre>";
return "\n\n".$this->hashBlock($codeblock)."\n\n";
}
function makeCodeSpan($code) {
#
# Create a code span markup for $code. Called from handleSpanToken.
#
$code = htmlspecialchars(trim($code), ENT_NOQUOTES);
return $this->hashPart("<code>$code</code>");
}
var $em_relist = array(
'' => '(?:(?<!\*)\*(?!\*)|(?<!_)_(?!_))(?=\S|$)(?![.,:;]\s)',
'*' => '(?<=\S|^)(?<!\*)\*(?!\*)',
'_' => '(?<=\S|^)(?<!_)_(?!_)',
);
var $strong_relist = array(
'' => '(?:(?<!\*)\*\*(?!\*)|(?<!_)__(?!_))(?=\S|$)(?![.,:;]\s)',
'**' => '(?<=\S|^)(?<!\*)\*\*(?!\*)',
'__' => '(?<=\S|^)(?<!_)__(?!_)',
);
var $em_strong_relist = array(
'' => '(?:(?<!\*)\*\*\*(?!\*)|(?<!_)___(?!_))(?=\S|$)(?![.,:;]\s)',
'***' => '(?<=\S|^)(?<!\*)\*\*\*(?!\*)',
'___' => '(?<=\S|^)(?<!_)___(?!_)',
);
var $em_strong_prepared_relist;
function prepareItalicsAndBold() {
#
# Prepare regular expressions for searching emphasis tokens in any
# context.
#
foreach ($this->em_relist as $em => $em_re) {
foreach ($this->strong_relist as $strong => $strong_re) {
# Construct list of allowed token expressions.
$token_relist = array();
if (isset($this->em_strong_relist["$em$strong"])) {
$token_relist[] = $this->em_strong_relist["$em$strong"];
}
$token_relist[] = $em_re;
$token_relist[] = $strong_re;
# Construct master expression from list.
$token_re = '{('. implode('|', $token_relist) .')}';
$this->em_strong_prepared_relist["$em$strong"] = $token_re;
}
}
}
function doItalicsAndBold($text) {
$token_stack = array('');
$text_stack = array('');
$em = '';
$strong = '';
$tree_char_em = false;
while (1) {
#
# Get prepared regular expression for seraching emphasis tokens
# in current context.
#
$token_re = $this->em_strong_prepared_relist["$em$strong"];
#
# Each loop iteration search for the next emphasis token.
# Each token is then passed to handleSpanToken.
#
$parts = preg_split($token_re, $text, 2, PREG_SPLIT_DELIM_CAPTURE);
$text_stack[0] .= $parts[0];
$token =& $parts[1];
$text =& $parts[2];
if (empty($token)) {
# Reached end of text span: empty stack without emitting.
# any more emphasis.
while ($token_stack[0]) {
$text_stack[1] .= array_shift($token_stack);
$text_stack[0] .= array_shift($text_stack);
}
break;
}
$token_len = strlen($token);
if ($tree_char_em) {
# Reached closing marker while inside a three-char emphasis.
if ($token_len == 3) {
# Three-char closing marker, close em and strong.
array_shift($token_stack);
$span = array_shift($text_stack);
$span = $this->runSpanGamut($span);
$span = "<strong><em>$span</em></strong>";
$text_stack[0] .= $this->hashPart($span);
$em = '';
$strong = '';
} else {
# Other closing marker: close one em or strong and
# change current token state to match the other
$token_stack[0] = str_repeat($token{0}, 3-$token_len);
$tag = $token_len == 2 ? "strong" : "em";
$span = $text_stack[0];
$span = $this->runSpanGamut($span);
$span = "<$tag>$span</$tag>";
$text_stack[0] = $this->hashPart($span);
$$tag = ''; # $$tag stands for $em or $strong
}
$tree_char_em = false;
} else if ($token_len == 3) {
if ($em) {
# Reached closing marker for both em and strong.
# Closing strong marker:
for ($i = 0; $i < 2; ++$i) {
$shifted_token = array_shift($token_stack);
$tag = strlen($shifted_token) == 2 ? "strong" : "em";
$span = array_shift($text_stack);
$span = $this->runSpanGamut($span);
$span = "<$tag>$span</$tag>";
$text_stack[0] .= $this->hashPart($span);
$$tag = ''; # $$tag stands for $em or $strong
}
} else {
# Reached opening three-char emphasis marker. Push on token
# stack; will be handled by the special condition above.
$em = $token{0};
$strong = "$em$em";
array_unshift($token_stack, $token);
array_unshift($text_stack, '');
$tree_char_em = true;
}
} else if ($token_len == 2) {
if ($strong) {
# Unwind any dangling emphasis marker:
if (strlen($token_stack[0]) == 1) {
$text_stack[1] .= array_shift($token_stack);
$text_stack[0] .= array_shift($text_stack);
}
# Closing strong marker:
array_shift($token_stack);
$span = array_shift($text_stack);
$span = $this->runSpanGamut($span);
$span = "<strong>$span</strong>";
$text_stack[0] .= $this->hashPart($span);
$strong = '';
} else {
array_unshift($token_stack, $token);
array_unshift($text_stack, '');
$strong = $token;
}
} else {
# Here $token_len == 1
if ($em) {
if (strlen($token_stack[0]) == 1) {
# Closing emphasis marker:
array_shift($token_stack);
$span = array_shift($text_stack);
$span = $this->runSpanGamut($span);
$span = "<em>$span</em>";
$text_stack[0] .= $this->hashPart($span);
$em = '';
} else {
$text_stack[0] .= $token;
}
} else {
array_unshift($token_stack, $token);
array_unshift($text_stack, '');
$em = $token;
}
}
}
return $text_stack[0];
}
function doBlockQuotes($text) {
$text = preg_replace_callback('/
( # Wrap whole match in $1
(?>
^[ ]*>[ ]? # ">" at the start of a line
.+\n # rest of the first line
(.+\n)* # subsequent consecutive lines
\n* # blanks
)+
)
/xm',
array(&$this, '_doBlockQuotes_callback'), $text);
return $text;
}
function _doBlockQuotes_callback($matches) {
$bq = $matches[1];
# trim one level of quoting - trim whitespace-only lines
$bq = preg_replace('/^[ ]*>[ ]?|^[ ]+$/m', '', $bq);
$bq = $this->runBlockGamut($bq); # recurse
$bq = preg_replace('/^/m', " ", $bq);
# These leading spaces cause problem with <pre> content,
# so we need to fix that:
$bq = preg_replace_callback('{(\s*<pre>.+?</pre>)}sx',
array(&$this, '_doBlockQuotes_callback2'), $bq);
return "\n". $this->hashBlock("<blockquote>\n$bq\n</blockquote>")."\n\n";
}
function _doBlockQuotes_callback2($matches) {
$pre = $matches[1];
$pre = preg_replace('/^ /m', '', $pre);
return $pre;
}
function formParagraphs($text) {
#
# Params:
# $text - string to process with html <p> tags
#
# Strip leading and trailing lines:
$text = preg_replace('/\A\n+|\n+\z/', '', $text);
$grafs = preg_split('/\n{2,}/', $text, -1, PREG_SPLIT_NO_EMPTY);
#
# Wrap <p> tags and unhashify HTML blocks
#
foreach ($grafs as $key => $value) {
if (!preg_match('/^B\x1A[0-9]+B$/', $value)) {
# Is a paragraph.
$value = $this->runSpanGamut($value);
$value = preg_replace('/^([ ]*)/', "<p>", $value);
$value .= "</p>";
$grafs[$key] = $this->unhash($value);
}
else {
# Is a block.
# Modify elements of @grafs in-place...
$graf = $value;
$block = $this->html_hashes[$graf];
$graf = $block;
// if (preg_match('{
// \A
// ( # $1 = <div> tag
// <div \s+
// [^>]*
// \b
// markdown\s*=\s* ([\'"]) # $2 = attr quote char
// 1
// \2
// [^>]*
// >
// )
// ( # $3 = contents
// .*
// )
// (</div>) # $4 = closing tag
// \z
// }xs', $block, $matches))
// {
// list(, $div_open, , $div_content, $div_close) = $matches;
//
// # We can't call Markdown(), because that resets the hash;
// # that initialization code should be pulled into its own sub, though.
// $div_content = $this->hashHTMLBlocks($div_content);
//
// # Run document gamut methods on the content.
// foreach ($this->document_gamut as $method => $priority) {
// $div_content = $this->$method($div_content);
// }
//
// $div_open = preg_replace(
// '{\smarkdown\s*=\s*([\'"]).+?\1}', '', $div_open);
//
// $graf = $div_open . "\n" . $div_content . "\n" . $div_close;
// }
$grafs[$key] = $graf;
}
}
return implode("\n\n", $grafs);
}
function encodeAttribute($text) {
#
# Encode text for a double-quoted HTML attribute. This function
# is *not* suitable for attributes enclosed in single quotes.
#
$text = $this->encodeAmpsAndAngles($text);
$text = str_replace('"', '"', $text);
return $text;
}
function encodeAmpsAndAngles($text) {
#
# Smart processing for ampersands and angle brackets that need to
# be encoded. Valid character entities are left alone unless the
# no-entities mode is set.
#
if ($this->no_entities) {
$text = str_replace('&', '&', $text);
} else {
# Ampersand-encoding based entirely on Nat Irons's Amputator
# MT plugin: <http://bumppo.net/projects/amputator/>
$text = preg_replace('/&(?!#?[xX]?(?:[0-9a-fA-F]+|\w+);)/',
'&', $text);;
}
# Encode remaining <'s
$text = str_replace('<', '<', $text);
return $text;
}
function doAutoLinks($text) {
$text = preg_replace_callback('{<((https?|ftp|dict):[^\'">\s]+)>}i',
array(&$this, '_doAutoLinks_url_callback'), $text);
# Email addresses: <[email protected]>
$text = preg_replace_callback('{
<
(?:mailto:)?
(
(?:
[-!#$%&\'*+/=?^_`.{|}~\w\x80-\xFF]+
|
".*?"
)
\@
(?:
[-a-z0-9\x80-\xFF]+(\.[-a-z0-9\x80-\xFF]+)*\.[a-z]+
|
\[[\d.a-fA-F:]+\] # IPv4 & IPv6
)
)
>
}xi',
array(&$this, '_doAutoLinks_email_callback'), $text);
return $text;
}
function _doAutoLinks_url_callback($matches) {
$url = $this->encodeAttribute($matches[1]);
$link = "<a href=\"$url\">$url</a>";
return $this->hashPart($link);
}
function _doAutoLinks_email_callback($matches) {
$address = $matches[1];
$link = $this->encodeEmailAddress($address);
return $this->hashPart($link);
}
function encodeEmailAddress($addr) {
#
# Input: an email address, e.g. "[email protected]"
#
# Output: the email address as a mailto link, with each character
# of the address encoded as either a decimal or hex entity, in
# the hopes of foiling most address harvesting spam bots. E.g.:
#
# <p><a href="mailto:foo
# @example.co
# m">foo@exampl
# e.com</a></p>
#
# Based by a filter by Matthew Wickline, posted to BBEdit-Talk.
# With some optimizations by Milian Wolff.
#
$addr = "mailto:" . $addr;
$chars = preg_split('/(?<!^)(?!$)/', $addr);
$seed = (int)abs(crc32($addr) / strlen($addr)); # Deterministic seed.
foreach ($chars as $key => $char) {
$ord = ord($char);
# Ignore non-ascii chars.
if ($ord < 128) {
$r = ($seed * (1 + $key)) % 100; # Pseudo-random function.
# roughly 10% raw, 45% hex, 45% dec
# '@' *must* be encoded. I insist.
if ($r > 90 && $char != '@') /* do nothing */;
else if ($r < 45) $chars[$key] = '&#x'.dechex($ord).';';
else $chars[$key] = '&#'.$ord.';';
}
}
$addr = implode('', $chars);
$text = implode('', array_slice($chars, 7)); # text without `mailto:`
$addr = "<a href=\"$addr\">$text</a>";
return $addr;
}
function parseSpan($str) {
#
# Take the string $str and parse it into tokens, hashing embeded HTML,
# escaped characters and handling code spans.
#
$output = '';
$span_re = '{
(
\\\\'.$this->escape_chars_re.'
|
(?<![`\\\\])
`+ # code span marker
'.( $this->no_markup ? '' : '
|
<!-- .*? --> # comment
|
<\?.*?\?> | <%.*?%> # processing instruction
|
<[/!$]?[-a-zA-Z0-9:_]+ # regular tags
(?>
\s
(?>[^"\'>]+|"[^"]*"|\'[^\']*\')*
)?
>
').'
)
}xs';
while (1) {
#
# Each loop iteration seach for either the next tag, the next
# openning code span marker, or the next escaped character.
# Each token is then passed to handleSpanToken.
#
$parts = preg_split($span_re, $str, 2, PREG_SPLIT_DELIM_CAPTURE);
# Create token from text preceding tag.
if ($parts[0] != "") {
$output .= $parts[0];
}
# Check if we reach the end.
if (isset($parts[1])) {
$output .= $this->handleSpanToken($parts[1], $parts[2]);
$str = $parts[2];
}
else {
break;
}
}
return $output;
}
function handleSpanToken($token, &$str) {
#
# Handle $token provided by parseSpan by determining its nature and
# returning the corresponding value that should replace it.
#
switch ($token{0}) {
case "\\":
return $this->hashPart("&#". ord($token{1}). ";");
case "`":
# Search for end marker in remaining text.
if (preg_match('/^(.*?[^`])'.preg_quote($token).'(?!`)(.*)$/sm',
$str, $matches))
{
$str = $matches[2];
$codespan = $this->makeCodeSpan($matches[1]);
return $this->hashPart($codespan);
}
return $token; // return as text since no ending marker found.
default:
return $this->hashPart($token);
}
}
function outdent($text) {
#
# Remove one level of line-leading tabs or spaces
#
return preg_replace('/^(\t|[ ]{1,'.$this->tab_width.'})/m', '', $text);
}
# String length function for detab. `_initDetab` will create a function to
# hanlde UTF-8 if the default function does not exist.
var $utf8_strlen = 'mb_strlen';
function detab($text) {
#
# Replace tabs with the appropriate amount of space.
#
# For each line we separate the line in blocks delemited by
# tab characters. Then we reconstruct every line by adding the
# appropriate number of space between each blocks.
$text = preg_replace_callback('/^.*\t.*$/m',
array(&$this, '_detab_callback'), $text);
return $text;
}
function _detab_callback($matches) {
$line = $matches[0];
$strlen = $this->utf8_strlen; # strlen function for UTF-8.
# Split in blocks.
$blocks = explode("\t", $line);
# Add each blocks to the line.
$line = $blocks[0];
unset($blocks[0]); # Do not add first block twice.
foreach ($blocks as $block) {
# Calculate amount of space, insert spaces, insert block.
$amount = $this->tab_width -
$strlen($line, 'UTF-8') % $this->tab_width;
$line .= str_repeat(" ", $amount) . $block;
}
return $line;
}
function _initDetab() {
#
# Check for the availability of the function in the `utf8_strlen` property
# (initially `mb_strlen`). If the function is not available, create a
# function that will loosely count the number of UTF-8 characters with a
# regular expression.
#
if (function_exists($this->utf8_strlen)) return;
$this->utf8_strlen = create_function('$text', 'return preg_match_all(
"/[\\\\x00-\\\\xBF]|[\\\\xC0-\\\\xFF][\\\\x80-\\\\xBF]*/",
$text, $m);');
}
function unhash($text) {
#
# Swap back in all the tags hashed by _HashHTMLBlocks.
#
return preg_replace_callback('/(.)\x1A[0-9]+\1/',
array(&$this, '_unhash_callback'), $text);
}
function _unhash_callback($matches) {
return $this->html_hashes[$matches[0]];
}
}
/*
PHP Markdown
============
Description
-----------
This is a PHP translation of the original Markdown formatter written in
Perl by John Gruber.
Markdown is a text-to-HTML filter; it translates an easy-to-read /
easy-to-write structured text format into HTML. Markdown's text format
is most similar to that of plain text email, and supports features such
as headers, *emphasis*, code blocks, blockquotes, and links.
Markdown's syntax is designed not as a generic markup language, but
specifically to serve as a front-end to (X)HTML. You can use span-level
HTML tags anywhere in a Markdown document, and you can use block level
HTML tags (like <div> and <table> as well).
For more information about Markdown's syntax, see:
<http://daringfireball.net/projects/markdown/>
Bugs
----
To file bug reports please send email to:
<[email protected]>
Please include with your report: (1) the example input; (2) the output you
expected; (3) the output Markdown actually produced.
Version History
---------------
See the readme file for detailed release notes for this version.
Copyright and License
---------------------
PHP Markdown
Copyright (c) 2004-2009 Michel Fortin
<http://michelf.com/>
All rights reserved.
Based on Markdown
Copyright (c) 2003-2006 John Gruber
<http://daringfireball.net/>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name "Markdown" nor the names of its contributors may
be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as
is" and any express or implied warranties, including, but not limited
to, the implied warranties of merchantability and fitness for a
particular purpose are disclaimed. In no event shall the copyright owner
or contributors be liable for any direct, indirect, incidental, special,
exemplary, or consequential damages (including, but not limited to,
procurement of substitute goods or services; loss of use, data, or
profits; or business interruption) however caused and on any theory of
liability, whether in contract, strict liability, or tort (including
negligence or otherwise) arising in any way out of the use of this
software, even if advised of the possibility of such damage.
*/
?> | mit |
hotchandanisagar/odata.net | src/Microsoft.Spatial/OrcasExtensions.cs | 1108 | //---------------------------------------------------------------------
// <copyright file="OrcasExtensions.cs" company="Microsoft">
// Copyright (C) Microsoft Corporation. All rights reserved. See License.txt in the project root for license information.
// </copyright>
//---------------------------------------------------------------------
namespace Microsoft.Spatial
{
using System.Text;
/// <summary>
/// This class holds extension methods for objects that have new capabilities
/// in newer versions of .net, and this lets us make the calls look the same and reduces the #if noise
/// </summary>
internal static class OrcasExtensions
{
/// <summary>
/// StringBuilder didn't have a clear method in Orcas, so we added and extension method to give it one.
/// </summary>
/// <param name="builder">The StringBuilder instance to clear.</param>
internal static void Clear(this StringBuilder builder)
{
builder.Length = 0;
builder.Capacity = 0;
}
}
}
| mit |
microlv/vscode | src/typings/windows-process-tree.d.ts | 2208 | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
declare module 'windows-process-tree' {
export enum ProcessDataFlag {
None = 0,
Memory = 1,
CommandLine = 2
}
export interface IProcessInfo {
pid: number;
ppid: number;
name: string;
/**
* The working set size of the process, in bytes.
*/
memory?: number;
/**
* The string returned is at most 512 chars, strings exceeding this length are truncated.
*/
commandLine?: string;
}
export interface IProcessCpuInfo extends IProcessInfo {
cpu?: number;
}
export interface IProcessTreeNode {
pid: number;
name: string;
memory?: number;
commandLine?: string;
children: IProcessTreeNode[];
}
/**
* Returns a tree of processes with the rootPid process as the root.
* @param rootPid - The pid of the process that will be the root of the tree.
* @param callback - The callback to use with the returned list of processes.
* @param flags - The flags for what process data should be included.
*/
export function getProcessTree(rootPid: number, callback: (tree: IProcessTreeNode) => void, flags?: ProcessDataFlag): void;
/**
* Returns a list of processes containing the rootPid process and all of its descendants.
* @param rootPid - The pid of the process of interest.
* @param callback - The callback to use with the returned set of processes.
* @param flags - The flags for what process data should be included.
*/
export function getProcessList(rootPid: number, callback: (processList: IProcessInfo[]) => void, flags?: ProcessDataFlag): void;
/**
* Returns the list of processes annotated with cpu usage information.
* @param processList - The list of processes.
* @param callback - The callback to use with the returned list of processes.
*/
export function getProcessCpuUsage(processList: IProcessInfo[], callback: (processListWithCpu: IProcessCpuInfo[]) => void): void;
}
| mit |
ycsoft/FatCat-Server | LIBS/boost_1_58_0/libs/unordered/doc/src_code/point1.cpp | 1197 |
// Copyright 2006-2009 Daniel James.
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#include <boost/unordered_set.hpp>
#include <boost/detail/lightweight_test.hpp>
//[point_example1
struct point {
int x;
int y;
};
bool operator==(point const& p1, point const& p2)
{
return p1.x == p2.x && p1.y == p2.y;
}
struct point_hash
: std::unary_function<point, std::size_t>
{
std::size_t operator()(point const& p) const
{
std::size_t seed = 0;
boost::hash_combine(seed, p.x);
boost::hash_combine(seed, p.y);
return seed;
}
};
boost::unordered_multiset<point, point_hash> points;
//]
int main() {
point x[] = {{1,2}, {3,4}, {1,5}, {1,2}};
for(int i = 0; i < sizeof(x) / sizeof(point); ++i)
points.insert(x[i]);
BOOST_TEST(points.count(x[0]) == 2);
BOOST_TEST(points.count(x[1]) == 1);
point y = {10, 2};
BOOST_TEST(points.count(y) == 0);
return boost::report_errors();
}
| mit |
PacktPublishing/Mastering-Mesos | Chapter4/Singularity/SingularityClient/src/main/java/com/hubspot/singularity/client/ProviderUtils.java | 337 | package com.hubspot.singularity.client;
import javax.inject.Provider;
class ProviderUtils
{
public static <T> Provider<T> of(final T instance) { // XXX: this seems like it should be in a base library somewhere?
return new Provider<T>() {
@Override
public T get()
{
return instance;
}
};
}
}
| mit |
alecspopa/rails | actionview/test/template/text_helper_test.rb | 22043 | # frozen_string_literal: true
require "abstract_unit"
class TextHelperTest < ActionView::TestCase
tests ActionView::Helpers::TextHelper
def setup
super
# This simulates the fact that instance variables are reset every time
# a view is rendered. The cycle helper depends on this behavior.
@_cycles = nil if (defined? @_cycles)
end
def test_concat
self.output_buffer = "foo".dup
assert_equal "foobar", concat("bar")
assert_equal "foobar", output_buffer
end
def test_simple_format_should_be_html_safe
assert simple_format("<b> test with html tags </b>").html_safe?
end
def test_simple_format_included_in_isolation
helper_klass = Class.new { include ActionView::Helpers::TextHelper }
assert helper_klass.new.simple_format("<b> test with html tags </b>").html_safe?
end
def test_simple_format
assert_equal "<p></p>", simple_format(nil)
assert_equal "<p>crazy\n<br /> cross\n<br /> platform linebreaks</p>", simple_format("crazy\r\n cross\r platform linebreaks")
assert_equal "<p>A paragraph</p>\n\n<p>and another one!</p>", simple_format("A paragraph\n\nand another one!")
assert_equal "<p>A paragraph\n<br /> With a newline</p>", simple_format("A paragraph\n With a newline")
text = "A\nB\nC\nD".freeze
assert_equal "<p>A\n<br />B\n<br />C\n<br />D</p>", simple_format(text)
text = "A\r\n \nB\n\n\r\n\t\nC\nD".freeze
assert_equal "<p>A\n<br /> \n<br />B</p>\n\n<p>\t\n<br />C\n<br />D</p>", simple_format(text)
assert_equal '<p class="test">This is a classy test</p>', simple_format("This is a classy test", class: "test")
assert_equal %Q(<p class="test">para 1</p>\n\n<p class="test">para 2</p>), simple_format("para 1\n\npara 2", class: "test")
end
def test_simple_format_should_sanitize_input_when_sanitize_option_is_not_false
assert_equal "<p><b> test with unsafe string </b>code!</p>", simple_format("<b> test with unsafe string </b><script>code!</script>")
end
def test_simple_format_should_sanitize_input_when_sanitize_option_is_true
assert_equal "<p><b> test with unsafe string </b>code!</p>",
simple_format("<b> test with unsafe string </b><script>code!</script>", {}, { sanitize: true })
end
def test_simple_format_should_not_sanitize_input_when_sanitize_option_is_false
assert_equal "<p><b> test with unsafe string </b><script>code!</script></p>", simple_format("<b> test with unsafe string </b><script>code!</script>", {}, { sanitize: false })
end
def test_simple_format_with_custom_wrapper
assert_equal "<div></div>", simple_format(nil, {}, { wrapper_tag: "div" })
end
def test_simple_format_with_custom_wrapper_and_multi_line_breaks
assert_equal "<div>We want to put a wrapper...</div>\n\n<div>...right there.</div>", simple_format("We want to put a wrapper...\n\n...right there.", {}, { wrapper_tag: "div" })
end
def test_simple_format_should_not_change_the_text_passed
text = "<b>Ok</b><script>code!</script>"
text_clone = text.dup
simple_format(text)
assert_equal text_clone, text
end
def test_simple_format_does_not_modify_the_html_options_hash
options = { class: "foobar" }
passed_options = options.dup
simple_format("some text", passed_options)
assert_equal options, passed_options
end
def test_simple_format_does_not_modify_the_options_hash
options = { wrapper_tag: :div, sanitize: false }
passed_options = options.dup
simple_format("some text", {}, passed_options)
assert_equal options, passed_options
end
def test_truncate
assert_equal "Hello World!", truncate("Hello World!", length: 12)
assert_equal "Hello Wor...", truncate("Hello World!!", length: 12)
end
def test_truncate_should_use_default_length_of_30
str = "This is a string that will go longer then the default truncate length of 30"
assert_equal str[0...27] + "...", truncate(str)
end
def test_truncate_with_options_hash
assert_equal "This is a string that wil[...]", truncate("This is a string that will go longer then the default truncate length of 30", omission: "[...]")
assert_equal "Hello W...", truncate("Hello World!", length: 10)
assert_equal "Hello[...]", truncate("Hello World!", omission: "[...]", length: 10)
assert_equal "Hello[...]", truncate("Hello Big World!", omission: "[...]", length: 13, separator: " ")
assert_equal "Hello Big[...]", truncate("Hello Big World!", omission: "[...]", length: 14, separator: " ")
assert_equal "Hello Big[...]", truncate("Hello Big World!", omission: "[...]", length: 15, separator: " ")
end
def test_truncate_multibyte
assert_equal "\354\225\204\353\246\254\353\236\221 \354\225\204\353\246\254 ...".dup.force_encoding(Encoding::UTF_8),
truncate("\354\225\204\353\246\254\353\236\221 \354\225\204\353\246\254 \354\225\204\353\235\274\353\246\254\354\230\244".dup.force_encoding(Encoding::UTF_8), length: 10)
end
def test_truncate_does_not_modify_the_options_hash
options = { length: 10 }
passed_options = options.dup
truncate("some text", passed_options)
assert_equal options, passed_options
end
def test_truncate_with_link_options
assert_equal "Here is a long test and ...<a href=\"#\">Continue</a>",
truncate("Here is a long test and I need a continue to read link", length: 27) { link_to "Continue", "#" }
end
def test_truncate_should_be_html_safe
assert truncate("Hello World!", length: 12).html_safe?
end
def test_truncate_should_escape_the_input
assert_equal "Hello <sc...", truncate("Hello <script>code!</script>World!!", length: 12)
end
def test_truncate_should_not_escape_the_input_with_escape_false
assert_equal "Hello <sc...", truncate("Hello <script>code!</script>World!!", length: 12, escape: false)
end
def test_truncate_with_escape_false_should_be_html_safe
truncated = truncate("Hello <script>code!</script>World!!", length: 12, escape: false)
assert truncated.html_safe?
end
def test_truncate_with_block_should_be_html_safe
truncated = truncate("Here's a long test and I need a continue to read link", length: 27) { link_to "Continue", "#" }
assert truncated.html_safe?
end
def test_truncate_with_block_should_escape_the_input
assert_equal "<script>code!</script>He...<a href=\"#\">Continue</a>",
truncate("<script>code!</script>Here's a long test and I need a continue to read link", length: 27) { link_to "Continue", "#" }
end
def test_truncate_with_block_should_not_escape_the_input_with_escape_false
assert_equal "<script>code!</script>He...<a href=\"#\">Continue</a>",
truncate("<script>code!</script>Here's a long test and I need a continue to read link", length: 27, escape: false) { link_to "Continue", "#" }
end
def test_truncate_with_block_with_escape_false_should_be_html_safe
truncated = truncate("<script>code!</script>Here's a long test and I need a continue to read link", length: 27, escape: false) { link_to "Continue", "#" }
assert truncated.html_safe?
end
def test_truncate_with_block_should_escape_the_block
assert_equal "Here is a long test and ...<script>alert('foo');</script>",
truncate("Here is a long test and I need a continue to read link", length: 27) { "<script>alert('foo');</script>" }
end
def test_highlight_should_be_html_safe
assert highlight("This is a beautiful morning", "beautiful").html_safe?
end
def test_highlight
assert_equal(
"This is a <mark>beautiful</mark> morning",
highlight("This is a beautiful morning", "beautiful")
)
assert_equal(
"This is a <mark>beautiful</mark> morning, but also a <mark>beautiful</mark> day",
highlight("This is a beautiful morning, but also a beautiful day", "beautiful")
)
assert_equal(
"This is a <b>beautiful</b> morning, but also a <b>beautiful</b> day",
highlight("This is a beautiful morning, but also a beautiful day", "beautiful", highlighter: '<b>\1</b>')
)
assert_equal(
"This text is not changed because we supplied an empty phrase",
highlight("This text is not changed because we supplied an empty phrase", nil)
)
end
def test_highlight_pending
assert_equal " ", highlight(" ", "blank text is returned verbatim")
end
def test_highlight_should_return_blank_string_for_nil
assert_equal "", highlight(nil, "blank string is returned for nil")
end
def test_highlight_should_sanitize_input
assert_equal(
"This is a <mark>beautiful</mark> morningcode!",
highlight("This is a beautiful morning<script>code!</script>", "beautiful")
)
end
def test_highlight_should_not_sanitize_if_sanitize_option_if_false
assert_equal(
"This is a <mark>beautiful</mark> morning<script>code!</script>",
highlight("This is a beautiful morning<script>code!</script>", "beautiful", sanitize: false)
)
end
def test_highlight_with_regexp
assert_equal(
"This is a <mark>beautiful!</mark> morning",
highlight("This is a beautiful! morning", "beautiful!")
)
assert_equal(
"This is a <mark>beautiful! morning</mark>",
highlight("This is a beautiful! morning", "beautiful! morning")
)
assert_equal(
"This is a <mark>beautiful? morning</mark>",
highlight("This is a beautiful? morning", "beautiful? morning")
)
end
def test_highlight_accepts_regexp
assert_equal("This day was challenging for judge <mark>Allen</mark> and his colleagues.",
highlight("This day was challenging for judge Allen and his colleagues.", /\ballen\b/i))
end
def test_highlight_with_multiple_phrases_in_one_pass
assert_equal %(<em>wow</em> <em>em</em>), highlight("wow em", %w(wow em), highlighter: '<em>\1</em>')
end
def test_highlight_with_html
assert_equal(
"<p>This is a <mark>beautiful</mark> morning, but also a <mark>beautiful</mark> day</p>",
highlight("<p>This is a beautiful morning, but also a beautiful day</p>", "beautiful")
)
assert_equal(
"<p>This is a <em><mark>beautiful</mark></em> morning, but also a <mark>beautiful</mark> day</p>",
highlight("<p>This is a <em>beautiful</em> morning, but also a beautiful day</p>", "beautiful")
)
assert_equal(
"<p>This is a <em class=\"error\"><mark>beautiful</mark></em> morning, but also a <mark>beautiful</mark> <span class=\"last\">day</span></p>",
highlight("<p>This is a <em class=\"error\">beautiful</em> morning, but also a beautiful <span class=\"last\">day</span></p>", "beautiful")
)
assert_equal(
"<p class=\"beautiful\">This is a <mark>beautiful</mark> morning, but also a <mark>beautiful</mark> day</p>",
highlight("<p class=\"beautiful\">This is a beautiful morning, but also a beautiful day</p>", "beautiful")
)
assert_equal(
"<p>This is a <mark>beautiful</mark> <a href=\"http://example.com/beautiful#top?what=beautiful%20morning&when=now+then\">morning</a>, but also a <mark>beautiful</mark> day</p>",
highlight("<p>This is a beautiful <a href=\"http://example.com/beautiful\#top?what=beautiful%20morning&when=now+then\">morning</a>, but also a beautiful day</p>", "beautiful")
)
assert_equal(
"<div>abc <b>div</b></div>",
highlight("<div>abc div</div>", "div", highlighter: '<b>\1</b>')
)
end
def test_highlight_does_not_modify_the_options_hash
options = { highlighter: '<b>\1</b>', sanitize: false }
passed_options = options.dup
highlight("<div>abc div</div>", "div", passed_options)
assert_equal options, passed_options
end
def test_highlight_with_block
assert_equal(
"<b>one</b> <b>two</b> <b>three</b>",
highlight("one two three", ["one", "two", "three"]) { |word| "<b>#{word}</b>" }
)
end
def test_excerpt
assert_equal("...is a beautiful morn...", excerpt("This is a beautiful morning", "beautiful", radius: 5))
assert_equal("This is a...", excerpt("This is a beautiful morning", "this", radius: 5))
assert_equal("...iful morning", excerpt("This is a beautiful morning", "morning", radius: 5))
assert_nil excerpt("This is a beautiful morning", "day")
end
def test_excerpt_with_regex
assert_equal("...is a beautiful! mor...", excerpt("This is a beautiful! morning", "beautiful", radius: 5))
assert_equal("...is a beautiful? mor...", excerpt("This is a beautiful? morning", "beautiful", radius: 5))
assert_equal("...is a beautiful? mor...", excerpt("This is a beautiful? morning", /\bbeau\w*\b/i, radius: 5))
assert_equal("...is a beautiful? mor...", excerpt("This is a beautiful? morning", /\b(beau\w*)\b/i, radius: 5))
assert_equal("...udge Allen and...", excerpt("This day was challenging for judge Allen and his colleagues.", /\ballen\b/i, radius: 5))
assert_equal("...judge Allen and...", excerpt("This day was challenging for judge Allen and his colleagues.", /\ballen\b/i, radius: 1, separator: " "))
assert_equal("...was challenging for...", excerpt("This day was challenging for judge Allen and his colleagues.", /\b(\w*allen\w*)\b/i, radius: 5))
end
def test_excerpt_should_not_be_html_safe
assert !excerpt("This is a beautiful! morning", "beautiful", radius: 5).html_safe?
end
def test_excerpt_in_borderline_cases
assert_equal("", excerpt("", "", radius: 0))
assert_equal("a", excerpt("a", "a", radius: 0))
assert_equal("...b...", excerpt("abc", "b", radius: 0))
assert_equal("abc", excerpt("abc", "b", radius: 1))
assert_equal("abc...", excerpt("abcd", "b", radius: 1))
assert_equal("...abc", excerpt("zabc", "b", radius: 1))
assert_equal("...abc...", excerpt("zabcd", "b", radius: 1))
assert_equal("zabcd", excerpt("zabcd", "b", radius: 2))
# excerpt strips the resulting string before ap-/prepending excerpt_string.
# whether this behavior is meaningful when excerpt_string is not to be
# appended is questionable.
assert_equal("zabcd", excerpt(" zabcd ", "b", radius: 4))
assert_equal("...abc...", excerpt("z abc d", "b", radius: 1))
end
def test_excerpt_with_omission
assert_equal("[...]is a beautiful morn[...]", excerpt("This is a beautiful morning", "beautiful", omission: "[...]", radius: 5))
assert_equal(
"This is the ultimate supercalifragilisticexpialidoceous very looooooooooooooooooong looooooooooooong beautiful morning with amazing sunshine and awesome tempera[...]",
excerpt("This is the ultimate supercalifragilisticexpialidoceous very looooooooooooooooooong looooooooooooong beautiful morning with amazing sunshine and awesome temperatures. So what are you gonna do about it?", "very",
omission: "[...]")
)
end
def test_excerpt_with_utf8
assert_equal("...\357\254\203ciency could not be...".dup.force_encoding(Encoding::UTF_8), excerpt("That's why e\357\254\203ciency could not be helped".dup.force_encoding(Encoding::UTF_8), "could", radius: 8))
end
def test_excerpt_does_not_modify_the_options_hash
options = { omission: "[...]", radius: 5 }
passed_options = options.dup
excerpt("This is a beautiful morning", "beautiful", passed_options)
assert_equal options, passed_options
end
def test_excerpt_with_separator
options = { separator: " ", radius: 1 }
assert_equal("...a very beautiful...", excerpt("This is a very beautiful morning", "very", options))
assert_equal("This is...", excerpt("This is a very beautiful morning", "this", options))
assert_equal("...beautiful morning", excerpt("This is a very beautiful morning", "morning", options))
options = { separator: "\n", radius: 0 }
assert_equal("...very long...", excerpt("my very\nvery\nvery long\nstring", "long", options))
options = { separator: "\n", radius: 1 }
assert_equal("...very\nvery long\nstring", excerpt("my very\nvery\nvery long\nstring", "long", options))
assert_equal excerpt("This is a beautiful morning", "a"),
excerpt("This is a beautiful morning", "a", separator: nil)
end
def test_word_wrap
assert_equal("my very very\nvery long\nstring", word_wrap("my very very very long string", line_width: 15))
end
def test_word_wrap_with_extra_newlines
assert_equal("my very very\nvery long\nstring\n\nwith another\nline", word_wrap("my very very very long string\n\nwith another line", line_width: 15))
end
def test_word_wrap_does_not_modify_the_options_hash
options = { line_width: 15 }
passed_options = options.dup
word_wrap("some text", passed_options)
assert_equal options, passed_options
end
def test_word_wrap_with_custom_break_sequence
assert_equal("1234567890\r\n1234567890\r\n1234567890", word_wrap("1234567890 " * 3, line_width: 2, break_sequence: "\r\n"))
end
def test_pluralization
assert_equal("1 count", pluralize(1, "count"))
assert_equal("2 counts", pluralize(2, "count"))
assert_equal("1 count", pluralize("1", "count"))
assert_equal("2 counts", pluralize("2", "count"))
assert_equal("1,066 counts", pluralize("1,066", "count"))
assert_equal("1.25 counts", pluralize("1.25", "count"))
assert_equal("1.0 count", pluralize("1.0", "count"))
assert_equal("1.00 count", pluralize("1.00", "count"))
assert_equal("2 counters", pluralize(2, "count", "counters"))
assert_equal("0 counters", pluralize(nil, "count", "counters"))
assert_equal("2 counters", pluralize(2, "count", plural: "counters"))
assert_equal("0 counters", pluralize(nil, "count", plural: "counters"))
assert_equal("2 people", pluralize(2, "person"))
assert_equal("10 buffaloes", pluralize(10, "buffalo"))
assert_equal("1 berry", pluralize(1, "berry"))
assert_equal("12 berries", pluralize(12, "berry"))
end
def test_localized_pluralization
old_locale = I18n.locale
begin
I18n.locale = :de
ActiveSupport::Inflector.inflections(:de) do |inflect|
inflect.irregular "region", "regionen"
end
assert_equal("1 region", pluralize(1, "region"))
assert_equal("2 regionen", pluralize(2, "region"))
assert_equal("2 regions", pluralize(2, "region", locale: :en))
ensure
I18n.locale = old_locale
end
end
def test_cycle_class
value = Cycle.new("one", 2, "3")
assert_equal("one", value.to_s)
assert_equal("2", value.to_s)
assert_equal("3", value.to_s)
assert_equal("one", value.to_s)
value.reset
assert_equal("one", value.to_s)
assert_equal("2", value.to_s)
assert_equal("3", value.to_s)
end
def test_cycle_class_with_no_arguments
assert_raise(ArgumentError) { Cycle.new }
end
def test_cycle
assert_equal("one", cycle("one", 2, "3"))
assert_equal("2", cycle("one", 2, "3"))
assert_equal("3", cycle("one", 2, "3"))
assert_equal("one", cycle("one", 2, "3"))
assert_equal("2", cycle("one", 2, "3"))
assert_equal("3", cycle("one", 2, "3"))
end
def test_cycle_with_array
array = [1, 2, 3]
assert_equal("1", cycle(array))
assert_equal("2", cycle(array))
assert_equal("3", cycle(array))
end
def test_cycle_with_no_arguments
assert_raise(ArgumentError) { cycle }
end
def test_cycle_resets_with_new_values
assert_equal("even", cycle("even", "odd"))
assert_equal("odd", cycle("even", "odd"))
assert_equal("even", cycle("even", "odd"))
assert_equal("1", cycle(1, 2, 3))
assert_equal("2", cycle(1, 2, 3))
assert_equal("3", cycle(1, 2, 3))
assert_equal("1", cycle(1, 2, 3))
end
def test_named_cycles
assert_equal("1", cycle(1, 2, 3, name: "numbers"))
assert_equal("red", cycle("red", "blue", name: "colors"))
assert_equal("2", cycle(1, 2, 3, name: "numbers"))
assert_equal("blue", cycle("red", "blue", name: "colors"))
assert_equal("3", cycle(1, 2, 3, name: "numbers"))
assert_equal("red", cycle("red", "blue", name: "colors"))
end
def test_current_cycle_with_default_name
cycle("even", "odd")
assert_equal "even", current_cycle
cycle("even", "odd")
assert_equal "odd", current_cycle
cycle("even", "odd")
assert_equal "even", current_cycle
end
def test_current_cycle_with_named_cycles
cycle("red", "blue", name: "colors")
assert_equal "red", current_cycle("colors")
cycle("red", "blue", name: "colors")
assert_equal "blue", current_cycle("colors")
cycle("red", "blue", name: "colors")
assert_equal "red", current_cycle("colors")
end
def test_current_cycle_safe_call
assert_nothing_raised { current_cycle }
assert_nothing_raised { current_cycle("colors") }
end
def test_current_cycle_with_more_than_two_names
cycle(1, 2, 3)
assert_equal "1", current_cycle
cycle(1, 2, 3)
assert_equal "2", current_cycle
cycle(1, 2, 3)
assert_equal "3", current_cycle
cycle(1, 2, 3)
assert_equal "1", current_cycle
end
def test_default_named_cycle
assert_equal("1", cycle(1, 2, 3))
assert_equal("2", cycle(1, 2, 3, name: "default"))
assert_equal("3", cycle(1, 2, 3))
end
def test_reset_cycle
assert_equal("1", cycle(1, 2, 3))
assert_equal("2", cycle(1, 2, 3))
reset_cycle
assert_equal("1", cycle(1, 2, 3))
end
def test_reset_unknown_cycle
reset_cycle("colors")
end
def test_reset_named_cycle
assert_equal("1", cycle(1, 2, 3, name: "numbers"))
assert_equal("red", cycle("red", "blue", name: "colors"))
reset_cycle("numbers")
assert_equal("1", cycle(1, 2, 3, name: "numbers"))
assert_equal("blue", cycle("red", "blue", name: "colors"))
assert_equal("2", cycle(1, 2, 3, name: "numbers"))
assert_equal("red", cycle("red", "blue", name: "colors"))
end
def test_cycle_no_instance_variable_clashes
@cycles = %w{Specialized Fuji Giant}
assert_equal("red", cycle("red", "blue"))
assert_equal("blue", cycle("red", "blue"))
assert_equal("red", cycle("red", "blue"))
assert_equal(%w{Specialized Fuji Giant}, @cycles)
end
end
| mit |
agencja-acclaim/gulp-bower-webapp | wp-content/plugins/wordpress-seo/admin/notifiers/class-configuration-notifier.php | 4908 | <?php
/**
* WPSEO plugin file.
*
* @package WPSEO\Admin\Notifiers
*/
/**
* Represents the logic for showing the notification.
*/
class WPSEO_Configuration_Notifier implements WPSEO_Listener {
/**
* @var string
*/
const META_NAME = 'wpseo-dismiss-configuration-notice';
/**
* @var string
*/
const META_VALUE = 'yes';
/**
* @var bool
*/
protected $show_notification;
/**
* Constructs the object by setting the show notification property based the given options.
*/
public function __construct() {
$this->show_notification = WPSEO_Options::get( 'show_onboarding_notice', false );
}
/**
* Returns the content of the notification.
*
* @return string A string with the notification HTML, or empty string when no notification is needed.
*/
public function notify() {
if ( ! $this->show_notification() ) {
return $this->re_run_notification();
}
return $this->first_time_notification();
}
/**
* Listens to an argument in the request URL. When triggered just set the notification to dismissed.
*
* @return void
*/
public function listen() {
if ( ! $this->show_notification() || ! $this->dismissal_is_triggered() ) {
return;
}
$this->set_dismissed();
}
/**
* Checks if the dismissal should be triggered.
*
* @return bool True when action has been triggered.
*/
protected function dismissal_is_triggered() {
return filter_input( INPUT_GET, 'dismiss_get_started' ) === '1';
}
/**
* Checks if the current user has dismissed the notification.
*
* @return bool True when the notification has been dismissed.
*/
protected function is_dismissed() {
return get_user_meta( get_current_user_id(), self::META_NAME, true ) === self::META_VALUE;
}
/**
* Sets the dismissed state for the current user.
*
* @return void
*/
protected function set_dismissed() {
update_user_meta( get_current_user_id(), self::META_NAME, self::META_VALUE );
}
/**
* Checks if the notification should be shown.
*
* @return bool True when notification should be shown.
*/
protected function show_notification() {
return $this->show_notification && ! $this->is_dismissed();
}
/**
* Returns the notification to re-run the config wizard.
*
* @return string The notification.
*/
private function re_run_notification() {
$content = sprintf(
/* translators: %1$s expands to Yoast SEO, %2$s is a link start tag to the Onboarding Wizard, %3$s is the link closing tag. */
esc_html__( 'Want to make sure your %1$s settings are still OK? %2$sOpen the configuration wizard again%3$s to validate them.', 'wordpress-seo' ),
'Yoast SEO',
'<a href="' . esc_url( admin_url( 'admin.php?page=' . WPSEO_Configuration_Page::PAGE_IDENTIFIER ) ) . '">',
'</a>'
);
return $this->notification( __( 'Check SEO configuration', 'wordpress-seo' ), $content );
}
/**
* Returns the notification to start the config wizard for the first time.
*
* @return string The notification.
*/
private function first_time_notification() {
$content = sprintf(
/* translators: %1$s expands to Yoast SEO, %2$s is a link start tag to the Onboarding Wizard, %3$s is the link closing tag. */
esc_html__( 'Get started quickly with the %1$s %2$sconfiguration wizard%3$s!', 'wordpress-seo' ),
'Yoast SEO',
'<a href="' . esc_url( admin_url( 'admin.php?page=' . WPSEO_Configuration_Page::PAGE_IDENTIFIER ) ) . '">',
'</a>'
);
return $this->notification( __( 'First-time SEO configuration', 'wordpress-seo' ), $content, true );
}
/**
* Returns a styled notification.
*
* @param string $title Title for the notification.
* @param string $content Content for the notification.
* @param bool $show_dismissal Whether to show the dismiss button or not.
*
* @return string The styled notification.
*/
private function notification( $title, $content, $show_dismissal = false ) {
$notification = '<div class="yoast-container yoast-container__configuration-wizard">';
$notification .= sprintf(
'<img src="%1$s" height="%2$s" width="%3$d" />',
esc_url( plugin_dir_url( WPSEO_FILE ) . 'images/new-to-configuration-notice.svg' ),
60,
60
);
$notification .= '<div class="yoast-container__configuration-wizard--content">';
$notification .= '<h3>' . esc_html( $title ) . '</h3>';
$notification .= '<p>';
$notification .= $content;
$notification .= '</p>';
$notification .= '</div>';
if ( $show_dismissal ) {
$notification .= sprintf(
'<a href="%1$s" style="" class="button dismiss yoast-container__configuration-wizard--dismiss"><span class="screen-reader-text">%2$s</span><span class="dashicons dashicons-no-alt"></span></a>',
esc_url( admin_url( 'admin.php?page=wpseo_dashboard&dismiss_get_started=1' ) ),
esc_html__( 'Dismiss this item.', 'wordpress-seo' )
);
}
$notification .= '</div>';
return $notification;
}
}
| mit |
gero3/three.js | examples/jsm/nodes/core/NodeLib.js | 872 | const NodeLib = {
nodes: {},
keywords: {},
add: function ( node ) {
this.nodes[ node.name ] = node;
},
addKeyword: function ( name, callback, cache ) {
cache = cache !== undefined ? cache : true;
this.keywords[ name ] = { callback: callback, cache: cache };
},
remove: function ( node ) {
delete this.nodes[ node.name ];
},
removeKeyword: function ( name ) {
delete this.keywords[ name ];
},
get: function ( name ) {
return this.nodes[ name ];
},
getKeyword: function ( name, builder ) {
return this.keywords[ name ].callback.call( this, builder );
},
getKeywordData: function ( name ) {
return this.keywords[ name ];
},
contains: function ( name ) {
return this.nodes[ name ] !== undefined;
},
containsKeyword: function ( name ) {
return this.keywords[ name ] !== undefined;
}
};
export { NodeLib };
| mit |
bholmes/XamarinComponents | Util/NugetAuditor/Mono.Security/Mono.Security.X509.Extensions/BasicConstraintsExtension.cs | 4183 | //
// BasicConstraintsExtension.cs: Handles X.509 BasicConstrains extensions.
//
// Author:
// Sebastien Pouliot <[email protected]>
//
// (C) 2003 Motus Technologies Inc. (http://www.motus.com)
// Copyright (C) 2004-2005 Novell, Inc (http://www.novell.com)
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
using System;
using System.Globalization;
using System.Text;
using Mono.Security;
using Mono.Security.X509;
namespace Mono.Security.X509.Extensions {
// References:
// 1. RFC 3280: Internet X.509 Public Key Infrastructure, Section 4.2.1.10
// http://www.ietf.org/rfc/rfc3280.txt
/* id-ce-basicConstraints OBJECT IDENTIFIER ::= { id-ce 19 }
*
* BasicConstraints ::= SEQUENCE {
* cA BOOLEAN DEFAULT FALSE,
* pathLenConstraint INTEGER (0..MAX) OPTIONAL
* }
*/
#if INSIDE_CORLIB
internal
#else
public
#endif
class BasicConstraintsExtension : X509Extension {
public const int NoPathLengthConstraint = -1;
private bool cA;
private int pathLenConstraint;
public BasicConstraintsExtension () : base ()
{
extnOid = "2.5.29.19";
pathLenConstraint = NoPathLengthConstraint;
}
public BasicConstraintsExtension (ASN1 asn1) : base (asn1) {}
public BasicConstraintsExtension (X509Extension extension) : base (extension) {}
protected override void Decode ()
{
// default values
cA = false;
pathLenConstraint = NoPathLengthConstraint;
ASN1 sequence = new ASN1 (extnValue.Value);
if (sequence.Tag != 0x30)
throw new ArgumentException ("Invalid BasicConstraints extension");
int n = 0;
ASN1 a = sequence [n++];
if ((a != null) && (a.Tag == 0x01)) {
cA = (a.Value [0] == 0xFF);
a = sequence [n++];
}
if ((a != null) && (a.Tag == 0x02))
pathLenConstraint = ASN1Convert.ToInt32 (a);
}
protected override void Encode ()
{
ASN1 seq = new ASN1 (0x30);
if (cA)
seq.Add (new ASN1 (0x01, new byte[] { 0xFF }));
// CAs MUST NOT include the pathLenConstraint field unless the cA boolean is asserted
if (cA && (pathLenConstraint >= 0))
seq.Add (ASN1Convert.FromInt32 (pathLenConstraint));
extnValue = new ASN1 (0x04);
extnValue.Add (seq);
}
public bool CertificateAuthority {
get { return cA; }
set { cA = value; }
}
public override string Name {
get { return "Basic Constraints"; }
}
public int PathLenConstraint {
get { return pathLenConstraint; }
set {
if (value < NoPathLengthConstraint) {
string msg = Locale.GetText ("PathLenConstraint must be positive or -1 for none ({0}).", value);
throw new ArgumentOutOfRangeException (msg);
}
pathLenConstraint = value;
}
}
public override string ToString ()
{
StringBuilder sb = new StringBuilder ();
sb.Append ("Subject Type=");
sb.Append ((cA) ? "CA" : "End Entity");
sb.Append (Environment.NewLine);
sb.Append ("Path Length Constraint=");
if (pathLenConstraint == NoPathLengthConstraint)
sb.Append ("None");
else
sb.Append (pathLenConstraint.ToString (CultureInfo.InvariantCulture));
sb.Append (Environment.NewLine);
return sb.ToString ();
}
}
}
| mit |
duodraco/ophpen | src/vendor/symfony/src/Symfony/Bundle/DoctrineBundle/Tests/DependencyInjection/Fixtures/Bundles/YamlBundle/YamlBundle.php | 177 | <?php
namespace DoctrineBundle\Tests\DependencyInjection\Fixtures\Bundles\YamlBundle;
use Symfony\Component\HttpKernel\Bundle\Bundle;
class YamlBundle extends Bundle
{
} | mit |
Ecuacion/Pokemon-Showdown-Node-Bot | data/games-words-example.js | 340 | /*
* Words for Hangman and Anagrams
*
* Rename this file to games-words.js
* Add here your topics and words in the same format as the example
*/
exports.words = {
"Colors": ['Red', 'Orange', 'Blue', 'Yellow', 'Pink', 'Black', 'White'],
"Days of Week": ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
};
| mit |
lxe/babel | packages/babel-plugin-transform-es2015-block-scoping/test/fixtures/general/switch-callbacks/actual.js | 173 | function fn() {
switch (true) {
default:
let foo = 4;
if (true) {
let bar = () => foo;
console.log(bar());
}
}
}
| mit |
jskenney/calendar-core | ace-builds/src-noconflict/snippets/twig.js | 503 | ace.define("ace/snippets/twig",["require","exports","module"], function(require, exports, module) {
"use strict";
exports.snippetText =undefined;
exports.scope = "twig";
}); (function() {
ace.require(["ace/snippets/twig"], function(m) {
if (typeof module == "object" && typeof exports == "object" && module) {
module.exports = m;
}
});
})();
| mit |
pcphashimoto/codezine_tizenapp_sample1 | tizen-web-ui-fw/latest/js/modules/jqm/jquery.mobile.navigation.pushstate.js | 5626 | //>>excludeStart("jqmBuildExclude", pragmas.jqmBuildExclude);
//>>description: history.pushState support, layered on top of hashchange.
//>>label: Pushstate Support
//>>group: Navigation
define( [ "jquery", "./jquery.mobile.navigation", "./jquery.hashchange" ], function( jQuery ) {
//>>excludeEnd("jqmBuildExclude");
(function( $, window ) {
// For now, let's Monkeypatch this onto the end of $.mobile._registerInternalEvents
// Scope self to pushStateHandler so we can reference it sanely within the
// methods handed off as event handlers
var pushStateHandler = {},
self = pushStateHandler,
$win = $.mobile.$window,
url = $.mobile.path.parseLocation(),
mobileinitDeferred = $.Deferred(),
domreadyDeferred = $.Deferred();
$.mobile.$document.ready( $.proxy( domreadyDeferred, "resolve" ) );
$.mobile.$document.one( "mobileinit", $.proxy( mobileinitDeferred, "resolve" ) );
$.extend( pushStateHandler, {
// TODO move to a path helper, this is rather common functionality
initialFilePath: (function() {
return url.pathname + url.search;
})(),
hashChangeTimeout: 200,
hashChangeEnableTimer: undefined,
initialHref: url.hrefNoHash,
state: function() {
return {
// firefox auto decodes the url when using location.hash but not href
hash: $.mobile.path.parseLocation().hash || "#" + self.initialFilePath,
title: document.title,
// persist across refresh
initialHref: self.initialHref
};
},
resetUIKeys: function( url ) {
var dialog = $.mobile.dialogHashKey,
subkey = "&" + $.mobile.subPageUrlKey,
dialogIndex = url.indexOf( dialog );
if ( dialogIndex > -1 ) {
url = url.slice( 0, dialogIndex ) + "#" + url.slice( dialogIndex );
} else if ( url.indexOf( subkey ) > -1 ) {
url = url.split( subkey ).join( "#" + subkey );
}
return url;
},
// TODO sort out a single barrier to hashchange functionality
nextHashChangePrevented: function( value ) {
$.mobile.urlHistory.ignoreNextHashChange = value;
self.onHashChangeDisabled = value;
},
// on hash change we want to clean up the url
// NOTE this takes place *after* the vanilla navigation hash change
// handling has taken place and set the state of the DOM
onHashChange: function( e ) {
// disable this hash change
if ( self.onHashChangeDisabled ) {
return;
}
var href, state,
// firefox auto decodes the url when using location.hash but not href
hash = $.mobile.path.parseLocation().hash,
isPath = $.mobile.path.isPath( hash ),
resolutionUrl = isPath ? $.mobile.path.getLocation() : $.mobile.getDocumentUrl();
hash = isPath ? hash.replace( "#", "" ) : hash;
// propulate the hash when its not available
state = self.state();
// make the hash abolute with the current href
href = $.mobile.path.makeUrlAbsolute( hash, resolutionUrl );
if ( isPath ) {
href = self.resetUIKeys( href );
}
// replace the current url with the new href and store the state
// Note that in some cases we might be replacing an url with the
// same url. We do this anyways because we need to make sure that
// all of our history entries have a state object associated with
// them. This allows us to work around the case where $.mobile.back()
// is called to transition from an external page to an embedded page.
// In that particular case, a hashchange event is *NOT* generated by the browser.
// Ensuring each history entry has a state object means that onPopState()
// will always trigger our hashchange callback even when a hashchange event
// is not fired.
history.replaceState( state, document.title, href );
},
// on popstate (ie back or forward) we need to replace the hash that was there previously
// cleaned up by the additional hash handling
onPopState: function( e ) {
var poppedState = e.originalEvent.state,
fromHash, toHash, hashChanged;
// if there's no state its not a popstate we care about, eg chrome's initial popstate
if ( poppedState ) {
// if we get two pop states in under this.hashChangeTimeout
// make sure to clear any timer set for the previous change
clearTimeout( self.hashChangeEnableTimer );
// make sure to enable hash handling for the the _handleHashChange call
self.nextHashChangePrevented( false );
// change the page based on the hash in the popped state
$.mobile._handleHashChange( poppedState.hash );
// prevent any hashchange in the next self.hashChangeTimeout
self.nextHashChangePrevented( true );
// re-enable hash change handling after swallowing a possible hash
// change event that comes on all popstates courtesy of browsers like Android
self.hashChangeEnableTimer = setTimeout( function() {
self.nextHashChangePrevented( false );
}, self.hashChangeTimeout );
}
},
init: function() {
$win.bind( "hashchange", self.onHashChange );
// Handle popstate events the occur through history changes
$win.bind( "popstate", self.onPopState );
// if there's no hash, we need to replacestate for returning to home
if ( location.hash === "" ) {
history.replaceState( self.state(), document.title, $.mobile.path.getLocation() );
}
}
});
// We need to init when "mobileinit", "domready", and "navready" have all happened
$.when( domreadyDeferred, mobileinitDeferred, $.mobile.navreadyDeferred ).done(function() {
if ( $.mobile.pushStateEnabled && $.support.pushState ) {
pushStateHandler.init();
}
});
})( jQuery, this );
//>>excludeStart("jqmBuildExclude", pragmas.jqmBuildExclude);
});
//>>excludeEnd("jqmBuildExclude");
| mit |
vascogaspar/ember-cli-pagination | tests/helpers/start-app.js | 571 | import Ember from 'ember';
import Application from '../../app';
import Router from '../../router';
import config from '../../config/environment';
import PaginationAssertions from './assertions';
export default function startApp(attrs) {
var application;
var attributes = Ember.merge({}, config.APP);
attributes = Ember.merge(attributes, attrs); // use defaults, but you can override;
Ember.run(function() {
application = Application.create(attributes);
application.setupForTesting();
application.injectTestHelpers();
});
return application;
}
| mit |
dsebastien/DefinitelyTyped | types/semver/functions/major.d.ts | 241 | import SemVer = require('../classes/semver');
import semver = require('../');
/**
* Return the major version number.
*/
declare function major(version: string | SemVer, optionsOrLoose?: boolean | semver.Options): number;
export = major;
| mit |
pedromarquesalmeida/A | lib/cc/analyzer/filesystem.rb | 1204 | module CC
module Analyzer
class Filesystem
def initialize(root)
@root = root
end
def exist?(path)
File.exist?(path_for(path))
end
def source_buffer_for(path)
SourceBuffer.new(path, read_path(path))
end
def read_path(path)
File.read(path_for(path))
end
def write_path(path, content)
File.write(path_for(path), content)
File.chown(root_uid, root_gid, path_for(path))
end
def any?(&block)
file_paths.any?(&block)
end
def files_matching(globs)
Dir.chdir(@root) do
globs.map do |glob|
Dir.glob(glob)
end.flatten.sort.uniq
end
end
private
def file_paths
@file_paths ||= Dir.chdir(@root) do
`find . -type f -print0`.strip.split("\0").map do |path|
path.sub(/^\.\//, "")
end
end
end
def path_for(path)
File.join(@root, path)
end
def root_uid
root_stat.uid
end
def root_gid
root_stat.gid
end
def root_stat
@root_stat ||= File.stat(@root)
end
end
end
end
| mit |
jeremybuehler/aqua | test/client/pages/admin/components/accounts/Search.js | 4854 | var React = require('react/addons');
var Lab = require('lab');
var Code = require('code');
var Proxyquire = require('proxyquire');
var StubRouterContext = require('../../../../fixtures/StubRouterContext');
var lab = exports.lab = Lab.script();
var TestUtils = React.addons.TestUtils;
var stub = {
Actions: {
getResults: function () {}
},
AccountStore: {}
};
var Search = Proxyquire('../../../../../../client/pages/admin/components/accounts/Search', {
'../../actions/Account': stub.Actions,
'../../stores/Account': stub.AccountStore
});
lab.experiment('Admin Account Search', function () {
lab.test('it renders normally', function (done) {
var ComponentWithContext = StubRouterContext(Search, {});
var SearchEl = React.createElement(ComponentWithContext, {});
var search = TestUtils.renderIntoDocument(SearchEl);
Code.expect(search).to.exist();
done();
});
lab.test('it handles unmounting', function (done) {
var container = global.document.createElement('div');
var ComponentWithContext = StubRouterContext(Search, {});
var SearchEl = React.createElement(ComponentWithContext, {});
React.render(SearchEl, container);
React.unmountComponentAtNode(container);
done();
});
lab.test('it receives new props', function (done) {
var ComponentWithContext = StubRouterContext(Search, {});
var SearchEl = React.createElement(ComponentWithContext, {});
var search = TestUtils.renderIntoDocument(SearchEl);
search.setProps({ foo: 'bar' });
Code.expect(search).to.exist();
done();
});
lab.test('it handles a store change', function (done) {
var ComponentWithContext = StubRouterContext(Search, {});
var SearchEl = React.createElement(ComponentWithContext, {});
TestUtils.renderIntoDocument(SearchEl);
stub.AccountStore.emitChange();
done();
});
lab.test('it handles a filter change (from a submit event)', function (done) {
var ComponentWithContext = StubRouterContext(Search, {});
var SearchEl = React.createElement(ComponentWithContext, {});
var search = TestUtils.renderIntoDocument(SearchEl);
var target = TestUtils.findRenderedComponentWithType(search, Search);
var form = TestUtils.findRenderedDOMComponentWithTag(target.refs.filters, 'form');
target.transitionTo = function () {};
TestUtils.Simulate.submit(form.getDOMNode());
Code.expect(search).to.exist();
done();
});
lab.test('it handles a filter change (from an input event)', function (done) {
var ComponentWithContext = StubRouterContext(Search, {});
var SearchEl = React.createElement(ComponentWithContext, {});
var search = TestUtils.renderIntoDocument(SearchEl);
var target = TestUtils.findRenderedComponentWithType(search, Search);
var selects = TestUtils.scryRenderedDOMComponentsWithTag(search, 'select');
var limit = selects[selects.length - 1];
target.transitionTo = function () {};
TestUtils.Simulate.change(limit, { target: { value: 10 } });
Code.expect(search).to.exist();
done();
});
lab.test('it handles a page change', function (done) {
var ComponentWithContext = StubRouterContext(Search, {});
var SearchEl = React.createElement(ComponentWithContext, {});
var search = TestUtils.renderIntoDocument(SearchEl);
var target = TestUtils.findRenderedComponentWithType(search, Search);
target.setState({
results: {
data: [],
pages: {
current: 2,
prev: 1,
hasPrev: true,
next: 3,
hasNext: true,
total: 3
},
items: {
limit: 10,
begin: 11,
end: 20,
total: 30
}
}
});
var next = target.refs.paging.refs.next;
target.transitionTo = function () {};
TestUtils.Simulate.click(next.getDOMNode());
Code.expect(search).to.exist();
done();
});
lab.test('it handles a create new click', function (done) {
var ComponentWithContext = StubRouterContext(Search, {});
var SearchEl = React.createElement(ComponentWithContext, {});
var search = TestUtils.renderIntoDocument(SearchEl);
var target = TestUtils.findRenderedComponentWithType(search, Search);
var createNew = target.refs.createNew;
TestUtils.Simulate.click(createNew.getDOMNode());
Code.expect(search).to.exist();
done();
});
});
| mit |
cdnjs/cdnjs | ajax/libs/amcharts4/4.9.34/.internal/core/formatters/NumberFormatter.js | 30542 | /**
* Number formatting-related functionality.
*/
import { __extends } from "tslib";
import { Language } from "../utils/Language";
import { BaseObject } from "../Base";
import { getTextFormatter } from "../formatters/TextFormatter";
import { registry } from "../Registry";
import * as $strings from "../utils/Strings";
import * as $object from "../utils/Object";
import * as $utils from "../utils/Utils";
import * as $type from "../utils/Type";
import * as $math from "../utils/Math";
/**
* NumberFormatter class. Formats numbers according to specified formats.
*
* @todo Apply translations to suffixes/prefixes
*/
var NumberFormatter = /** @class */ (function (_super) {
__extends(NumberFormatter, _super);
/**
* Constructor
*/
function NumberFormatter() {
var _this = _super.call(this) || this;
/**
* A base value for negative numbers. Will treat all numbers below this value
* as negative numbers.
*/
_this._negativeBase = 0;
/**
* Holds number format.
*
* @default #,###.#####
*/
_this._numberFormat = "#,###.#####";
/**
* Output format to produce. If the format calls for applying color to the
* formatted value, this setting will determine what markup to use: SVG or
* HTML.
*
* Available options: svg, html.
*
* @default "svg"
*/
_this._outputFormat = "svg";
/**
* Any number smaller than this will be considered "small" number, which will
* trigger special formatting if "a" format modifier is used.
*/
_this._smallNumberThreshold = 1.00;
_this.className = "NumberFormatter";
_this.applyTheme();
return _this;
}
NumberFormatter.prototype.dispose = function () {
_super.prototype.dispose.call(this);
if (this.language) {
this.language.dispose();
}
};
Object.defineProperty(NumberFormatter.prototype, "language", {
/**
* @return Language
*/
get: function () {
if (!this._language) {
if (this.sprite) {
this._language = this.sprite.language;
}
else {
this._language = new Language;
}
}
return this._language;
},
/**
* A reference to [[Language]] instance.
*
* Formatter will use language to translate various items, like number
* suffixes, etc.
*
* @param value Language
*/
set: function (value) {
this._language = value;
},
enumerable: true,
configurable: true
});
/**
* Formats the number according to specific format.
*
* @param value Value to format
* @param format Format to apply
* @return Formatted number
*/
NumberFormatter.prototype.format = function (value, format, precision) {
// no format passed in or "Number"
if (typeof format === "undefined" || ($type.isString(format) && format.toLowerCase() === "number")) {
format = this._numberFormat;
}
// Init return value
var formatted;
// Cast to number just in case
// TODO: maybe use better casting
var source = Number(value);
// Is it a built-in format or Intl.NumberFormatOptions
if (format instanceof Object) {
try {
if (this.intlLocales) {
return new Intl.NumberFormat(this.intlLocales, format).format(source);
}
else {
return new Intl.NumberFormat(undefined, format).format(source);
}
}
catch (e) {
return "Invalid";
}
}
else {
// Clean format
format = $utils.cleanFormat(format);
// Get format info (it will also deal with parser caching)
var info = this.parseFormat(format, this.language);
// format and replace the number
var details = void 0;
if (source > this._negativeBase) {
details = info.positive;
}
else if (source < this._negativeBase) {
details = info.negative;
}
else {
details = info.zero;
}
// Adjust precision
if ($type.hasValue(precision) && !details.mod) {
details = $object.clone(details);
details.decimals.active = source == 0 ? 0 : precision;
}
// Format
formatted = details.template.split($strings.PLACEHOLDER).join(this.applyFormat(source, details));
}
return formatted;
};
/**
* Parses supplied format into structured object which can be used to format
* the number.
*
* @param format Format string, i.e. "#,###.00"
* @param language Language
*/
NumberFormatter.prototype.parseFormat = function (format, language) {
var _this = this;
// Check cache
var cached = this.getCache(format);
if ($type.hasValue(cached)) {
return cached;
}
// init format parse info holder
var info = {
"positive": {
"thousands": {
"active": -1,
"passive": -1,
"interval": -1,
"separator": language.translateEmpty("_thousandSeparator")
},
"decimals": {
"active": -1,
"passive": -1,
"separator": language.translateEmpty("_decimalSeparator")
},
"template": "",
"source": "",
"parsed": false
},
"negative": {
"thousands": {
"active": -1,
"passive": -1,
"interval": -1,
"separator": language.translateEmpty("_thousandSeparator")
},
"decimals": {
"active": -1,
"passive": -1,
"separator": language.translateEmpty("_decimalSeparator")
},
"template": "",
"source": "",
"parsed": false
},
"zero": {
"thousands": {
"active": -1,
"passive": -1,
"interval": -1,
"separator": language.translateEmpty("_thousandSeparator")
},
"decimals": {
"active": -1,
"passive": -1,
"separator": language.translateEmpty("_decimalSeparator")
},
"template": "",
"source": "",
"parsed": false
}
};
// Escape double vertical bars (that mean display one vertical bar)
format = format.replace("||", $strings.PLACEHOLDER2);
// Split it up and deal with different formats
var parts = format.split("|");
info.positive.source = parts[0];
if (typeof parts[2] === "undefined") {
info.zero = info.positive;
}
else {
info.zero.source = parts[2];
}
if (typeof parts[1] === "undefined") {
info.negative = info.positive;
}
else {
info.negative.source = parts[1];
}
// Parse each
$object.each(info, function (part, item) {
// Already parsed
if (item.parsed) {
return;
}
// Check cached
if (typeof _this.getCache(item.source) !== "undefined") {
info[part] = _this.getCache(item.source);
return;
}
// Begin parsing
var partFormat = item.source;
// Just "Number"?
if (partFormat.toLowerCase() === "number") {
partFormat = $type.isString(_this._numberFormat) ? _this._numberFormat : "#,###.#####";
}
// Let TextFormatter split into chunks
var chunks = getTextFormatter().chunk(partFormat, true);
for (var i = 0; i < chunks.length; i++) {
var chunk = chunks[i];
// replace back double vertical bar
chunk.text = chunk.text.replace($strings.PLACEHOLDER2, "|");
if (chunk.type === "value") {
// Parse format
// Look for codes
var matches = chunk.text.match(/[#0.,]+[ ]?[abesABES%!]?[abesABES‰!]?/);
if (matches) {
if (matches === null || matches[0] === "") {
// no codes here - assume string
// nothing to do here
item.template += chunk.text;
}
else {
// look for the format modifiers at the end
var mods = matches[0].match(/[abesABES%‰!]{2}|[abesABES%‰]{1}$/);
if (mods) {
item.mod = mods[0].toLowerCase();
item.modSpacing = matches[0].match(/[ ]{1}[abesABES%‰!]{1}$/) ? true : false;
}
// break the format up
var a = matches[0].split(".");
// Deal with thousands
if (a[0] === "") {
// No directives for thousands
// Leave default settings (no formatting)
}
else {
// Counts
item.thousands.active = (a[0].match(/0/g) || []).length;
item.thousands.passive = (a[0].match(/\#/g) || []).length + item.thousands.active;
// Separator interval
var b = a[0].split(",");
if (b.length === 1) {
// No thousands separators
// Do nothing
}
else {
// Use length fo the last chunk as thousands length
item.thousands.interval = $type.getValue(b.pop()).length;
if (item.thousands.interval === 0) {
item.thousands.interval = -1;
}
}
}
// Deal with decimals
if (typeof (a[1]) === "undefined") {
// No directives for decimals
// Leave at defaults (no formatting)
}
else {
// Counts
item.decimals.active = (a[1].match(/0/g) || []).length;
item.decimals.passive = (a[1].match(/\#/g) || []).length + item.decimals.active;
}
// Add special code to template
item.template += chunk.text.split(matches[0]).join($strings.PLACEHOLDER);
}
}
}
else {
// Quoted string - take it as it is
item.template += chunk.text;
}
}
// Apply style formatting
//item.template = getTextFormatter().format(item.template, this.outputFormat);
// Save cache
_this.setCache(item.source, item);
// Mark this as parsed
item.parsed = true;
});
// Save cache (the whole thing)
this.setCache(format, info);
return info;
};
/**
* Applies parsed format to a numeric value.
*
* @param value Value
* @param details Parsed format as returned by parseFormat()
* @return Formatted number
*/
NumberFormatter.prototype.applyFormat = function (value, details) {
// Use absolute values
var negative = value < 0;
value = Math.abs(value);
// Recalculate according to modifier
var prefix = "", suffix = "";
var mods = details.mod ? details.mod.split("") : [];
if (mods.indexOf("b") !== -1) {
var a_1 = this.applyPrefix(value, this.bytePrefixes, mods.indexOf("!") !== -1);
value = a_1[0];
prefix = a_1[1];
suffix = a_1[2];
if (details.modSpacing) {
suffix = " " + suffix;
}
}
else if (mods.indexOf("a") !== -1) {
var a_2 = this.applyPrefix(value, value < this.smallNumberThreshold ? this.smallNumberPrefixes : this.bigNumberPrefixes, mods.indexOf("!") !== -1);
value = a_2[0];
prefix = a_2[1];
suffix = a_2[2];
if (details.modSpacing) {
suffix = " " + suffix;
}
}
else if (mods.indexOf("%") !== -1) {
var ol = $math.min(value.toString().length + 2, 21);
value *= 100;
value = parseFloat(value.toPrecision(ol));
suffix = "%";
}
else if (mods.indexOf("‰") !== -1) {
var ol = $math.min(value.toString().length + 3, 21);
value *= 1000;
value = parseFloat(value.toPrecision(ol));
suffix = "‰";
}
// Round to passive
if (mods.indexOf("e") !== -1) {
// convert the value to exponential
var exp = void 0;
if (details.decimals.passive >= 0) {
exp = value.toExponential(details.decimals.passive).split("e");
}
else {
exp = value.toExponential().split("e");
}
value = Number(exp[0]);
suffix = "e" + exp[1];
if (details.modSpacing) {
suffix = " " + suffix;
}
}
else if (details.decimals.passive === 0) {
value = Math.round(value);
}
else if (details.decimals.passive > 0) {
var d = Math.pow(10, details.decimals.passive);
value = Math.round(value * d) / d;
}
// Init return value
var res = "";
// Calc integer and decimal parts
var a = $utils.numberToString(value).split(".");
// Format integers
var ints = a[0];
// Pad integers to active length
if (ints.length < details.thousands.active) {
ints = Array(details.thousands.active - ints.length + 1).join("0") + ints;
}
// Insert thousands separators
if (details.thousands.interval > 0) {
var ip = [];
var intsr = ints.split("").reverse().join("");
for (var i = 0, len = ints.length; i <= len; i += details.thousands.interval) {
var c = intsr.substr(i, details.thousands.interval).split("").reverse().join("");
if (c !== "") {
ip.unshift(c);
}
}
ints = ip.join(details.thousands.separator);
}
// Add integers
res += ints;
// Add decimals
if (a.length === 1) {
a.push("");
}
var decs = a[1];
// Fill zeros?
if (decs.length < details.decimals.active) {
decs += Array(details.decimals.active - decs.length + 1).join("0");
}
if (decs !== "") {
res += details.decimals.separator + decs;
}
// Can't have empty return value
if (res === "") {
res = "0";
}
// Add minus sign back
if (value !== 0 && negative && (mods.indexOf("s") === -1)) {
res = "-" + res;
}
// Add suffixes/prefixes
if (prefix) {
res = prefix + res;
}
if (suffix) {
res += suffix;
}
return res;
};
/**
* Chooses appropriate prefix and suffix based on the passed in rules.
*
* @param value Value
* @param prefixes Prefix array
* @param force Force application of a first prefix (@sice 4.5.4)
* @return Result
*/
NumberFormatter.prototype.applyPrefix = function (value, prefixes, force) {
if (force === void 0) { force = false; }
var newvalue = value;
var prefix = "";
var suffix = "";
var applied = false;
var k = 1;
for (var i = 0, len = prefixes.length; i < len; i++) {
if (prefixes[i].number <= value) {
if (prefixes[i].number === 0) {
newvalue = 0;
}
else {
newvalue = value / prefixes[i].number;
k = prefixes[i].number;
}
prefix = prefixes[i].prefix;
suffix = prefixes[i].suffix;
applied = true;
}
}
if (!applied && force && prefixes.length && value != 0) {
// Prefix was not applied. Use the first prefix.
newvalue = value / prefixes[0].number;
prefix = prefixes[0].prefix;
suffix = prefixes[0].suffix;
applied = true;
}
if (applied) {
newvalue = parseFloat(newvalue.toPrecision($math.min(k.toString().length + Math.floor(newvalue).toString().replace(/[^0-9]*/g, "").length, 21)));
}
return [newvalue, prefix, suffix];
};
/**
* Invalidates the parent [[Sprite]] object.
*/
NumberFormatter.prototype.invalidateSprite = function () {
if (this.sprite) {
this.sprite.invalidate();
}
};
Object.defineProperty(NumberFormatter.prototype, "numberFormat", {
/**
* @return A format to use for number formatting
*/
get: function () {
return this._numberFormat;
},
/**
* Number format.
*
* @default "#,###.#####"
* @see {@link https://www.amcharts.com/docs/v4/concepts/formatters/formatting-numbers/} Tutorial on number formatting
* @param format A format to use for number formatting
*/
set: function (format) {
this._numberFormat = format;
},
enumerable: true,
configurable: true
});
Object.defineProperty(NumberFormatter.prototype, "intlLocales", {
/**
* @return Date format
*/
get: function () {
return this._intlLocales;
},
/**
* Locales if you are using date formats in `Intl.NumberFormatOptions` syntax.
*
* @see (@link https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/NumberFormat) about using Intl for number formatting
* @param value Locales
*/
set: function (value) {
this._intlLocales = value;
this.invalidateSprite();
},
enumerable: true,
configurable: true
});
Object.defineProperty(NumberFormatter.prototype, "negativeBase", {
/**
* @return A format to use for number formatting
*/
get: function () {
return this._negativeBase;
},
/**
* Negative base for negative numbers.
*
* @default 0
* @see {@link https://www.amcharts.com/docs/v4/concepts/formatters/formatting-numbers/} Tutorial on number formatting
* @param format A format to use for number formatting
*/
set: function (value) {
this._negativeBase = value;
},
enumerable: true,
configurable: true
});
Object.defineProperty(NumberFormatter.prototype, "bigNumberPrefixes", {
/**
* @return Prefixes for big numbers
*/
get: function () {
if (!$type.hasValue(this._bigNumberPrefixes)) {
this._bigNumberPrefixes = [
{ "number": 1e+3, "suffix": this.language.translate("_big_number_suffix_3") },
{ "number": 1e+6, "suffix": this.language.translate("_big_number_suffix_6") },
{ "number": 1e+9, "suffix": this.language.translate("_big_number_suffix_9") },
{ "number": 1e+12, "suffix": this.language.translate("_big_number_suffix_12") },
{ "number": 1e+15, "suffix": this.language.translate("_big_number_suffix_15") },
{ "number": 1e+18, "suffix": this.language.translate("_big_number_suffix_18") },
{ "number": 1e+21, "suffix": this.language.translate("_big_number_suffix_21") },
{ "number": 1e+24, "suffix": this.language.translate("_big_number_suffix_24") }
];
}
return this._bigNumberPrefixes;
},
/**
* Prefixes for big numbers.
*
* It's an array of objects of number/prefix pairs.
*
* ```JSON
* [
* { "number": 1e+3, "suffix": "K" },
* { "number": 1e+6, "suffix": "M" },
* { "number": 1e+9, "suffix": "G" },
* { "number": 1e+12, "suffix": "T" },
* { "number": 1e+15, "suffix": "P" },
* { "number": 1e+18, "suffix": "E" },
* { "number": 1e+21, "suffix": "Z" },
* { "number": 1e+24, "suffix": "Y" }
* ]
* ```
*
* If the number is bigger than the `number` ir will be converted to the
* appropriate bigger number with prefix.
*
* E.g. as per above `1500` will be converted to `1.5K`.
*
* Please note that for this transformation to be enabled, you need to
* enable it specific modifier in your format setting.
*
* The modifier for big/small number modification is "a":
*
* ```Text
* {myfield.formatNumber("#,###.00a")}
* ```
*
* @see {@link https://www.amcharts.com/docs/v4/concepts/formatters/formatting-numbers/} Tutorial on number formatting
* @param prefixes Prefixes for big numbers
*/
set: function (prefixes) {
this._bigNumberPrefixes = prefixes;
},
enumerable: true,
configurable: true
});
Object.defineProperty(NumberFormatter.prototype, "smallNumberPrefixes", {
/**
* @return Prefixes for small numbers
*/
get: function () {
if (!$type.hasValue(this._smallNumberPrefixes)) {
this._smallNumberPrefixes = [
{ "number": 1e-24, "suffix": this.language.translate("_small_number_suffix_24") },
{ "number": 1e-21, "suffix": this.language.translate("_small_number_suffix_21") },
{ "number": 1e-18, "suffix": this.language.translate("_small_number_suffix_18") },
{ "number": 1e-15, "suffix": this.language.translate("_small_number_suffix_15") },
{ "number": 1e-12, "suffix": this.language.translate("_small_number_suffix_12") },
{ "number": 1e-9, "suffix": this.language.translate("_small_number_suffix_9") },
{ "number": 1e-6, "suffix": this.language.translate("_small_number_suffix_6") },
{ "number": 1e-3, "suffix": this.language.translate("_small_number_suffix_3") }
];
}
return this._smallNumberPrefixes;
},
/**
* Prefixes for big numbers.
*
* It's an array of objects of number/prefix pairs.
*
* ```JSON
* [
* { "number": 1e-24, "suffix": "y" },
* { "number": 1e-21, "suffix": "z" },
* { "number": 1e-18, "suffix": "a" },
* { "number": 1e-15, "suffix": "f" },
* { "number": 1e-12, "suffix": "p" },
* { "number": 1e-9, "suffix": "n" },
* { "number": 1e-6, "suffix": "μ" },
* { "number": 1e-3, "suffix": "m" }
* ]
* ```
*
* If the number is smaller than the `number` ir will be converted to the
* appropriate smaller number with prefix.
*
* E.g. as per above `0.0015` will be converted to `1.5m`.
*
* Please note that for this transformation to be enabled, you need to
* enable it specific modifier in your format setting.
*
* The modifier for big/small number modification is "a":
*
* ```Text
* {myfield.formatNumber("#,###.00a")}
* ```
*
* IMPORTANT: The order of the suffixes is important. The list must start
* from the smallest number and work towards bigger ones.
*
* @see {@link https://www.amcharts.com/docs/v4/concepts/formatters/formatting-numbers/} Tutorial on number formatting
* @param prefixes Prefixes for small numbers
*/
set: function (prefixes) {
this._smallNumberPrefixes = prefixes;
},
enumerable: true,
configurable: true
});
Object.defineProperty(NumberFormatter.prototype, "smallNumberThreshold", {
/**
* @return Small number threshold
*/
get: function () {
return this._smallNumberThreshold;
},
/**
* Any number smaller than this will be considered "small" number, which will
* trigger special formatting if "a" format modifier is used.
*
* @since 4.6.8
* @param value Small number threshold
*/
set: function (value) {
this._smallNumberThreshold = value;
},
enumerable: true,
configurable: true
});
Object.defineProperty(NumberFormatter.prototype, "bytePrefixes", {
/**
* @return Prefixes for byte-size formatting
*/
get: function () {
if (!$type.hasValue(this._bytePrefixes)) {
this._bytePrefixes = [
{ "number": 1, suffix: this.language.translate("_byte_suffix_B") },
{ "number": 1024, suffix: this.language.translate("_byte_suffix_KB") },
{ "number": 1048576, suffix: this.language.translate("_byte_suffix_MB") },
{ "number": 1073741824, suffix: this.language.translate("_byte_suffix_GB") },
{ "number": 1099511627776, suffix: this.language.translate("_byte_suffix_TB") },
{ "number": 1125899906842624, suffix: this.language.translate("_byte_suffix_PB") }
];
}
return this._bytePrefixes;
},
/**
* Basically the same as `bigNumberPrefixes`, except base for calculation
* is not thousand but byte (1024).
*
* The modifier is "b".
*
* ```Text
* {myfield.formatNumber("#,###.00b")}
* ```
*
* The above `2048` will change to `2K`.
*
* @see {@link https://www.amcharts.com/docs/v4/concepts/formatters/formatting-numbers/} Tutorial on number formatting
* @param prefixes Prefixes for byte-size formatting
*/
set: function (prefixes) {
this._bytePrefixes = prefixes;
},
enumerable: true,
configurable: true
});
Object.defineProperty(NumberFormatter.prototype, "outputFormat", {
/**
* @ignore Exclude from docs
* @return Output format
*/
get: function () {
return this._outputFormat;
},
/**
* Ooutput format: "svg" or "html".
*
* @ignore Exclude from docs
* @param value Output format
*/
set: function (outputFormat) {
this._outputFormat = outputFormat.toLowerCase();
this.invalidateSprite();
},
enumerable: true,
configurable: true
});
/**
* Replaces brackets with temporary placeholders.
*
* @ignore Exclude from docs
* @param text Input text
* @return Escaped text
*/
NumberFormatter.prototype.escape = function (text) {
return text.replace("||", $strings.PLACEHOLDER2);
};
/**
* Replaces placeholders back to brackets.
*
* @ignore Exclude from docs
* @param text Escaped text
* @return Unescaped text
*/
NumberFormatter.prototype.unescape = function (text) {
return text.replace($strings.PLACEHOLDER2, "|");
};
return NumberFormatter;
}(BaseObject));
export { NumberFormatter };
/**
* Register class in system, so that it can be instantiated using its name from
* anywhere.
*
* @ignore
*/
registry.registeredClasses["NumberFormatter"] = NumberFormatter;
//# sourceMappingURL=NumberFormatter.js.map | mit |
kaloyan-raev/che | plugins/plugin-machine/che-plugin-machine-ext-client/src/main/java/org/eclipse/che/ide/extension/machine/client/processes/ProcessDataAdapter.java | 3448 | /*******************************************************************************
* Copyright (c) 2012-2016 Codenvy, S.A.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Codenvy, S.A. - initial API and implementation
*******************************************************************************/
package org.eclipse.che.ide.extension.machine.client.processes;
import org.eclipse.che.ide.ui.tree.NodeDataAdapter;
import org.eclipse.che.ide.ui.tree.TreeNodeElement;
import javax.validation.constraints.NotNull;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
/**
* @author Anna Shumilova
*/
public class ProcessDataAdapter implements NodeDataAdapter<ProcessTreeNode> {
/** {@inheritDoc} */
@Override
public int compare(ProcessTreeNode current, ProcessTreeNode other) {
return current.getId().compareTo(other.getId());
}
/** {@inheritDoc} */
@Override
public boolean hasChildren(ProcessTreeNode data) {
Collection<ProcessTreeNode> children = data.getChildren();
return children != null && !children.isEmpty();
}
/** {@inheritDoc} */
@Override
@NotNull
public List<ProcessTreeNode> getChildren(ProcessTreeNode data) {
List<ProcessTreeNode> children = new ArrayList<>();
Collection<ProcessTreeNode> nodes = data.getChildren();
if (nodes == null) {
return children;
}
for (ProcessTreeNode node : nodes) {
children.add(node);
}
return children;
}
/** {@inheritDoc} */
@Override
@NotNull
public String getNodeId(ProcessTreeNode data) {
return data.getId();
}
/** {@inheritDoc} */
@Override
@NotNull
public String getNodeName(ProcessTreeNode data) {
return data.getName();
}
/** {@inheritDoc} */
@Override
@NotNull
public ProcessTreeNode getParent(ProcessTreeNode data) {
return data.getParent();
}
/** {@inheritDoc} */
@Override
@NotNull
public TreeNodeElement<ProcessTreeNode> getRenderedTreeNode(ProcessTreeNode data) {
return data.getTreeNodeElement();
}
/** {@inheritDoc} */
@Override
public void setNodeName(ProcessTreeNode data, String name) {
throw new UnsupportedOperationException("The method isn't supported in this mode...");
}
/** {@inheritDoc} */
@Override
public void setRenderedTreeNode(ProcessTreeNode data, TreeNodeElement<ProcessTreeNode> renderedNode) {
data.setTreeNodeElement(renderedNode);
}
/** {@inheritDoc} */
@Override
public ProcessTreeNode getDragDropTarget(ProcessTreeNode data) {
throw new UnsupportedOperationException("The method isn't supported in this mode...");
}
/** {@inheritDoc} */
@Override
public List<String> getNodePath(ProcessTreeNode data) {
throw new UnsupportedOperationException("The method isn't supported in this mode...");
}
/** {@inheritDoc} */
@Override
public ProcessTreeNode getNodeByPath(ProcessTreeNode root, List<String> relativeNodePath) {
throw new UnsupportedOperationException("The method isn't supported in this mode...");
}
}
| epl-1.0 |
hrishiko/gavkiwp | wp-content/plugins/yith-woocommerce-compare/includes/class.yith-woocompare.php | 2677 | <?php
/**
* Main class
*
* @author Your Inspiration Themes
* @package YITH Woocommerce Compare
* @version 1.1.4
*/
if ( !defined( 'YITH_WOOCOMPARE' ) ) { exit; } // Exit if accessed directly
if( !class_exists( 'YITH_Woocompare' ) ) {
/**
* YITH Woocommerce Compare
*
* @since 1.0.0
*/
class YITH_Woocompare {
/**
* Plugin object
*
* @var string
* @since 1.0.0
*/
public $obj = null;
/**
* AJAX Helper
*
* @var string
* @since 1.0.0
*/
public $ajax = null;
/**
* Constructor
*
* @return mixed|YITH_Woocompare_Admin|YITH_Woocompare_Frontend
* @since 1.0.0
*/
public function __construct() {
add_action( 'widgets_init', array( $this, 'registerWidgets' ) );
// Load Plugin Framework
add_action( 'plugins_loaded', array( $this, 'plugin_fw_loader' ), 15 );
if( $this->is_frontend() ) {
$this->obj = new YITH_Woocompare_Frontend();
} elseif( $this->is_admin() ) {
$this->obj = new YITH_Woocompare_Admin();
}
return $this->obj;
}
/**
* Detect if is frontend
* @return bool
*/
public function is_frontend() {
$is_ajax = ( defined( 'DOING_AJAX' ) && DOING_AJAX );
return (bool) ( ! is_admin() || $is_ajax && isset( $_REQUEST['context'] ) && $_REQUEST['context'] == 'frontend' );
}
/**
* Detect if is admin
* @return bool
*/
public function is_admin() {
$is_ajax = ( defined( 'DOING_AJAX' ) && DOING_AJAX );
return (bool) ( is_admin() || $is_ajax && isset( $_REQUEST['context'] ) && $_REQUEST['context'] == 'admin' );
}
/**
* Load Plugin Framework
*
* @since 1.0
* @access public
* @return void
* @author Andrea Grillo <[email protected]>
*/
public function plugin_fw_loader() {
if ( ! defined( 'YIT_CORE_PLUGIN' ) ) {
global $plugin_fw_data;
if ( ! empty( $plugin_fw_data ) ) {
$plugin_fw_file = array_shift( $plugin_fw_data );
require_once( $plugin_fw_file );
}
}
}
/**
* Load and register widgets
*
* @access public
* @since 1.0.0
*/
public function registerWidgets() {
register_widget( 'YITH_Woocompare_Widget' );
}
}
} | gpl-2.0 |
solidhex/kendahls-kitchen | wp-content/plugins/subscribe2/include/s2_colorpicker.js | 1643 | // version 1.0 - original version
jQuery(document).ready(function () {
var version = jQuery.fn.jquery.split('.');
if (parseFloat(version[1]) < 7) {
// use .live as we are on jQuery prior to 1.7
jQuery('.colorpickerField').live('click', function () {
if (jQuery(this).attr('id').search("__i__") === -1) {
var picker,
field = jQuery(this).attr('id').substr(0, 20);
jQuery('.s2_colorpicker').hide();
jQuery('.s2_colorpicker').each(function () {
if (jQuery(this).attr('id').search(field) !== -1) {
picker = jQuery(this).attr('id');
}
});
jQuery.farbtastic('#' + picker).linkTo(this);
jQuery('#' + picker).slideDown();
}
});
} else {
// use .on as we are using jQuery 1.7 and up where .live is deprecated
jQuery(document).on('focus', '.colorpickerField', function () {
if (jQuery(this).is('.s2_initialised') || this.id.search('__i__') !== -1) {
return; // exit early, already initialized or not activated
}
jQuery(this).addClass('s2_initialised');
var picker,
field = jQuery(this).attr('id').substr(0, 20);
jQuery('.s2_colorpicker').each(function () {
if (jQuery(this).attr('id').search(field) !== -1) {
picker = jQuery(this).attr('id');
return false; // stop looping
}
});
jQuery(this).on('focusin', function (event) {
jQuery('.s2_colorpicker').hide();
jQuery.farbtastic('#' + picker).linkTo(this);
jQuery('#' + picker).slideDown();
});
jQuery(this).on('focusout', function (event) {
jQuery('#' + picker).slideUp();
});
jQuery(this).trigger('focus'); // retrigger focus event for plugin to work
});
}
});
| gpl-2.0 |
urokuta/cspoker | external/plcafe/generated-src/jp/ac/kobe_u/cs/prolog/builtin/PRED_atom_1.java | 1196 | package jp.ac.kobe_u.cs.prolog.builtin;
import jp.ac.kobe_u.cs.prolog.lang.Predicate;
import jp.ac.kobe_u.cs.prolog.lang.Prolog;
import jp.ac.kobe_u.cs.prolog.lang.Term;
/*
This file is generated by Prolog Cafe.
PLEASE DO NOT EDIT!
*/
/**
<code>atom/1</code> defined in builtins.pl<br>
@author Mutsunori Banbara ([email protected])
@author Naoyuki Tamura ([email protected])
@version 1.0
*/
public class PRED_atom_1 extends Predicate {
public Term arg1;
public PRED_atom_1(Term a1, Predicate cont) {
arg1 = a1;
this.cont = cont;
}
public PRED_atom_1(){}
public void setArgument(Term[] args, Predicate cont) {
arg1 = args[0];
this.cont = cont;
}
public int arity() { return 1; }
public String toString() {
return "atom(" + arg1 + ")";
}
public Predicate exec(Prolog engine) {
// atom(A):-atom(A)
engine.setB0();
Term a1;
a1 = arg1;
// atom(A):-[atom(A)]
//START inline expansion of atom(a(1))
a1 = a1.dereference();
if (! a1.isSymbol()) {
return engine.fail();
}
//END inline expansion
return cont;
}
}
| gpl-2.0 |
urokuta/cspoker | external/plcafe/generated-src/jp/ac/kobe_u/cs/prolog/builtin/PRED_$log_2.java | 1545 | package jp.ac.kobe_u.cs.prolog.builtin;
import jp.ac.kobe_u.cs.prolog.lang.Arithmetic;
import jp.ac.kobe_u.cs.prolog.lang.BuiltinException;
import jp.ac.kobe_u.cs.prolog.lang.Predicate;
import jp.ac.kobe_u.cs.prolog.lang.Prolog;
import jp.ac.kobe_u.cs.prolog.lang.Term;
/*
This file is generated by Prolog Cafe.
PLEASE DO NOT EDIT!
*/
/**
<code>'$log'/2</code> defined in builtins.pl<br>
@author Mutsunori Banbara ([email protected])
@author Naoyuki Tamura ([email protected])
@version 1.0
*/
public class PRED_$log_2 extends Predicate {
public Term arg1, arg2;
public PRED_$log_2(Term a1, Term a2, Predicate cont) {
arg1 = a1;
arg2 = a2;
this.cont = cont;
}
public PRED_$log_2(){}
public void setArgument(Term[] args, Predicate cont) {
arg1 = args[0];
arg2 = args[1];
this.cont = cont;
}
public int arity() { return 2; }
public String toString() {
return "$log(" + arg1 + "," + arg2 + ")";
}
public Predicate exec(Prolog engine) {
// '$log'(A,B):-'$log'(A,B)
engine.setB0();
Term a1, a2;
a1 = arg1;
a2 = arg2;
// '$log'(A,B):-['$log'(A,B)]
//START inline expansion of $log(a(1),a(2))
try {
if (! a2.unify(Arithmetic.evaluate(a1).log(), engine.trail)) {
return engine.fail();
}
} catch (BuiltinException e) {
e.goal = this;
throw e;
}
//END inline expansion
return cont;
}
}
| gpl-2.0 |
mmzgit/zen_boutique | app/code/core/Mage/ProductAlert/Block/Email/Price.php | 1991 | <?php
/**
* Magento
*
* NOTICE OF LICENSE
*
* This source file is subject to the Open Software License (OSL 3.0)
* that is bundled with this package in the file LICENSE.txt.
* It is also available through the world-wide-web at this URL:
* http://opensource.org/licenses/osl-3.0.php
* If you did not receive a copy of the license and are unable to
* obtain it through the world-wide-web, please send an email
* to [email protected] so we can send you a copy immediately.
*
* DISCLAIMER
*
* Do not edit or add to this file if you wish to upgrade Magento to newer
* versions in the future. If you wish to customize Magento for your
* needs please refer to http://www.magentocommerce.com for more information.
*
* @category Mage
* @package Mage_ProductAlert
* @copyright Copyright (c) 2012 Magento Inc. (http://www.magentocommerce.com)
* @license http://opensource.org/licenses/osl-3.0.php Open Software License (OSL 3.0)
*/
/**
* ProductAlert email price changed grid
*
* @category Mage
* @package Mage_ProductAlert
* @author Magento Core Team <[email protected]>
*/
class Mage_ProductAlert_Block_Email_Price extends Mage_ProductAlert_Block_Email_Abstract
{
/**
* Constructor
*
*/
public function __construct()
{
parent::__construct();
$this->setTemplate('email/productalert/price.phtml');
}
/**
* Retrive unsubscribe url for product
*
* @param int $productId
* @return string
*/
public function getProductUnsubscribeUrl($productId)
{
$params = $this->_getUrlParams();
$params['product'] = $productId;
return $this->getUrl('productalert/unsubscribe/price', $params);
}
/**
* Retrieve unsubscribe url for all products
*
* @return string
*/
public function getUnsubscribeUrl()
{
return $this->getUrl('productalert/unsubscribe/priceAll', $this->_getUrlParams());
}
}
| gpl-2.0 |
FFMG/myoddweb.piger | monitor/api/python/Python-3.7.2/Lib/xml/etree/ElementTree.py | 57266 | """Lightweight XML support for Python.
XML is an inherently hierarchical data format, and the most natural way to
represent it is with a tree. This module has two classes for this purpose:
1. ElementTree represents the whole XML document as a tree and
2. Element represents a single node in this tree.
Interactions with the whole document (reading and writing to/from files) are
usually done on the ElementTree level. Interactions with a single XML element
and its sub-elements are done on the Element level.
Element is a flexible container object designed to store hierarchical data
structures in memory. It can be described as a cross between a list and a
dictionary. Each Element has a number of properties associated with it:
'tag' - a string containing the element's name.
'attributes' - a Python dictionary storing the element's attributes.
'text' - a string containing the element's text content.
'tail' - an optional string containing text after the element's end tag.
And a number of child elements stored in a Python sequence.
To create an element instance, use the Element constructor,
or the SubElement factory function.
You can also use the ElementTree class to wrap an element structure
and convert it to and from XML.
"""
#---------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
#
# ElementTree
# Copyright (c) 1999-2008 by Fredrik Lundh. All rights reserved.
#
# [email protected]
# http://www.pythonware.com
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2008 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
__all__ = [
# public symbols
"Comment",
"dump",
"Element", "ElementTree",
"fromstring", "fromstringlist",
"iselement", "iterparse",
"parse", "ParseError",
"PI", "ProcessingInstruction",
"QName",
"SubElement",
"tostring", "tostringlist",
"TreeBuilder",
"VERSION",
"XML", "XMLID",
"XMLParser", "XMLPullParser",
"register_namespace",
]
VERSION = "1.3.0"
import sys
import re
import warnings
import io
import collections
import collections.abc
import contextlib
from . import ElementPath
class ParseError(SyntaxError):
"""An error when parsing an XML document.
In addition to its exception value, a ParseError contains
two extra attributes:
'code' - the specific exception code
'position' - the line and column of the error
"""
pass
# --------------------------------------------------------------------
def iselement(element):
"""Return True if *element* appears to be an Element."""
return hasattr(element, 'tag')
class Element:
"""An XML element.
This class is the reference implementation of the Element interface.
An element's length is its number of subelements. That means if you
want to check if an element is truly empty, you should check BOTH
its length AND its text attribute.
The element tag, attribute names, and attribute values can be either
bytes or strings.
*tag* is the element name. *attrib* is an optional dictionary containing
element attributes. *extra* are additional element attributes given as
keyword arguments.
Example form:
<tag attrib>text<child/>...</tag>tail
"""
tag = None
"""The element's name."""
attrib = None
"""Dictionary of the element's attributes."""
text = None
"""
Text before first subelement. This is either a string or the value None.
Note that if there is no text, this attribute may be either
None or the empty string, depending on the parser.
"""
tail = None
"""
Text after this element's end tag, but before the next sibling element's
start tag. This is either a string or the value None. Note that if there
was no text, this attribute may be either None or an empty string,
depending on the parser.
"""
def __init__(self, tag, attrib={}, **extra):
if not isinstance(attrib, dict):
raise TypeError("attrib must be dict, not %s" % (
attrib.__class__.__name__,))
attrib = attrib.copy()
attrib.update(extra)
self.tag = tag
self.attrib = attrib
self._children = []
def __repr__(self):
return "<%s %r at %#x>" % (self.__class__.__name__, self.tag, id(self))
def makeelement(self, tag, attrib):
"""Create a new element with the same type.
*tag* is a string containing the element name.
*attrib* is a dictionary containing the element attributes.
Do not call this method, use the SubElement factory function instead.
"""
return self.__class__(tag, attrib)
def copy(self):
"""Return copy of current element.
This creates a shallow copy. Subelements will be shared with the
original tree.
"""
elem = self.makeelement(self.tag, self.attrib)
elem.text = self.text
elem.tail = self.tail
elem[:] = self
return elem
def __len__(self):
return len(self._children)
def __bool__(self):
warnings.warn(
"The behavior of this method will change in future versions. "
"Use specific 'len(elem)' or 'elem is not None' test instead.",
FutureWarning, stacklevel=2
)
return len(self._children) != 0 # emulate old behaviour, for now
def __getitem__(self, index):
return self._children[index]
def __setitem__(self, index, element):
# if isinstance(index, slice):
# for elt in element:
# assert iselement(elt)
# else:
# assert iselement(element)
self._children[index] = element
def __delitem__(self, index):
del self._children[index]
def append(self, subelement):
"""Add *subelement* to the end of this element.
The new element will appear in document order after the last existing
subelement (or directly after the text, if it's the first subelement),
but before the end tag for this element.
"""
self._assert_is_element(subelement)
self._children.append(subelement)
def extend(self, elements):
"""Append subelements from a sequence.
*elements* is a sequence with zero or more elements.
"""
for element in elements:
self._assert_is_element(element)
self._children.extend(elements)
def insert(self, index, subelement):
"""Insert *subelement* at position *index*."""
self._assert_is_element(subelement)
self._children.insert(index, subelement)
def _assert_is_element(self, e):
# Need to refer to the actual Python implementation, not the
# shadowing C implementation.
if not isinstance(e, _Element_Py):
raise TypeError('expected an Element, not %s' % type(e).__name__)
def remove(self, subelement):
"""Remove matching subelement.
Unlike the find methods, this method compares elements based on
identity, NOT ON tag value or contents. To remove subelements by
other means, the easiest way is to use a list comprehension to
select what elements to keep, and then use slice assignment to update
the parent element.
ValueError is raised if a matching element could not be found.
"""
# assert iselement(element)
self._children.remove(subelement)
def getchildren(self):
"""(Deprecated) Return all subelements.
Elements are returned in document order.
"""
warnings.warn(
"This method will be removed in future versions. "
"Use 'list(elem)' or iteration over elem instead.",
DeprecationWarning, stacklevel=2
)
return self._children
def find(self, path, namespaces=None):
"""Find first matching element by tag name or path.
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return the first matching element, or None if no element was found.
"""
return ElementPath.find(self, path, namespaces)
def findtext(self, path, default=None, namespaces=None):
"""Find text for first matching element by tag name or path.
*path* is a string having either an element tag or an XPath,
*default* is the value to return if the element was not found,
*namespaces* is an optional mapping from namespace prefix to full name.
Return text content of first matching element, or default value if
none was found. Note that if an element is found having no text
content, the empty string is returned.
"""
return ElementPath.findtext(self, path, default, namespaces)
def findall(self, path, namespaces=None):
"""Find all matching subelements by tag name or path.
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Returns list containing all matching elements in document order.
"""
return ElementPath.findall(self, path, namespaces)
def iterfind(self, path, namespaces=None):
"""Find all matching subelements by tag name or path.
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return an iterable yielding all matching elements in document order.
"""
return ElementPath.iterfind(self, path, namespaces)
def clear(self):
"""Reset element.
This function removes all subelements, clears all attributes, and sets
the text and tail attributes to None.
"""
self.attrib.clear()
self._children = []
self.text = self.tail = None
def get(self, key, default=None):
"""Get element attribute.
Equivalent to attrib.get, but some implementations may handle this a
bit more efficiently. *key* is what attribute to look for, and
*default* is what to return if the attribute was not found.
Returns a string containing the attribute value, or the default if
attribute was not found.
"""
return self.attrib.get(key, default)
def set(self, key, value):
"""Set element attribute.
Equivalent to attrib[key] = value, but some implementations may handle
this a bit more efficiently. *key* is what attribute to set, and
*value* is the attribute value to set it to.
"""
self.attrib[key] = value
def keys(self):
"""Get list of attribute names.
Names are returned in an arbitrary order, just like an ordinary
Python dict. Equivalent to attrib.keys()
"""
return self.attrib.keys()
def items(self):
"""Get element attributes as a sequence.
The attributes are returned in arbitrary order. Equivalent to
attrib.items().
Return a list of (name, value) tuples.
"""
return self.attrib.items()
def iter(self, tag=None):
"""Create tree iterator.
The iterator loops over the element and all subelements in document
order, returning all elements with a matching tag.
If the tree structure is modified during iteration, new or removed
elements may or may not be included. To get a stable set, use the
list() function on the iterator, and loop over the resulting list.
*tag* is what tags to look for (default is to return all elements)
Return an iterator containing all the matching elements.
"""
if tag == "*":
tag = None
if tag is None or self.tag == tag:
yield self
for e in self._children:
yield from e.iter(tag)
# compatibility
def getiterator(self, tag=None):
# Change for a DeprecationWarning in 1.4
warnings.warn(
"This method will be removed in future versions. "
"Use 'elem.iter()' or 'list(elem.iter())' instead.",
PendingDeprecationWarning, stacklevel=2
)
return list(self.iter(tag))
def itertext(self):
"""Create text iterator.
The iterator loops over the element and all subelements in document
order, returning all inner text.
"""
tag = self.tag
if not isinstance(tag, str) and tag is not None:
return
t = self.text
if t:
yield t
for e in self:
yield from e.itertext()
t = e.tail
if t:
yield t
def SubElement(parent, tag, attrib={}, **extra):
"""Subelement factory which creates an element instance, and appends it
to an existing parent.
The element tag, attribute names, and attribute values can be either
bytes or Unicode strings.
*parent* is the parent element, *tag* is the subelements name, *attrib* is
an optional directory containing element attributes, *extra* are
additional attributes given as keyword arguments.
"""
attrib = attrib.copy()
attrib.update(extra)
element = parent.makeelement(tag, attrib)
parent.append(element)
return element
def Comment(text=None):
"""Comment element factory.
This function creates a special element which the standard serializer
serializes as an XML comment.
*text* is a string containing the comment string.
"""
element = Element(Comment)
element.text = text
return element
def ProcessingInstruction(target, text=None):
"""Processing Instruction element factory.
This function creates a special element which the standard serializer
serializes as an XML comment.
*target* is a string containing the processing instruction, *text* is a
string containing the processing instruction contents, if any.
"""
element = Element(ProcessingInstruction)
element.text = target
if text:
element.text = element.text + " " + text
return element
PI = ProcessingInstruction
class QName:
"""Qualified name wrapper.
This class can be used to wrap a QName attribute value in order to get
proper namespace handing on output.
*text_or_uri* is a string containing the QName value either in the form
{uri}local, or if the tag argument is given, the URI part of a QName.
*tag* is an optional argument which if given, will make the first
argument (text_or_uri) be interpreted as a URI, and this argument (tag)
be interpreted as a local name.
"""
def __init__(self, text_or_uri, tag=None):
if tag:
text_or_uri = "{%s}%s" % (text_or_uri, tag)
self.text = text_or_uri
def __str__(self):
return self.text
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self.text)
def __hash__(self):
return hash(self.text)
def __le__(self, other):
if isinstance(other, QName):
return self.text <= other.text
return self.text <= other
def __lt__(self, other):
if isinstance(other, QName):
return self.text < other.text
return self.text < other
def __ge__(self, other):
if isinstance(other, QName):
return self.text >= other.text
return self.text >= other
def __gt__(self, other):
if isinstance(other, QName):
return self.text > other.text
return self.text > other
def __eq__(self, other):
if isinstance(other, QName):
return self.text == other.text
return self.text == other
# --------------------------------------------------------------------
class ElementTree:
"""An XML element hierarchy.
This class also provides support for serialization to and from
standard XML.
*element* is an optional root element node,
*file* is an optional file handle or file name of an XML file whose
contents will be used to initialize the tree with.
"""
def __init__(self, element=None, file=None):
# assert element is None or iselement(element)
self._root = element # first node
if file:
self.parse(file)
def getroot(self):
"""Return root element of this tree."""
return self._root
def _setroot(self, element):
"""Replace root element of this tree.
This will discard the current contents of the tree and replace it
with the given element. Use with care!
"""
# assert iselement(element)
self._root = element
def parse(self, source, parser=None):
"""Load external XML document into element tree.
*source* is a file name or file object, *parser* is an optional parser
instance that defaults to XMLParser.
ParseError is raised if the parser fails to parse the document.
Returns the root element of the given source document.
"""
close_source = False
if not hasattr(source, "read"):
source = open(source, "rb")
close_source = True
try:
if parser is None:
# If no parser was specified, create a default XMLParser
parser = XMLParser()
if hasattr(parser, '_parse_whole'):
# The default XMLParser, when it comes from an accelerator,
# can define an internal _parse_whole API for efficiency.
# It can be used to parse the whole source without feeding
# it with chunks.
self._root = parser._parse_whole(source)
return self._root
while True:
data = source.read(65536)
if not data:
break
parser.feed(data)
self._root = parser.close()
return self._root
finally:
if close_source:
source.close()
def iter(self, tag=None):
"""Create and return tree iterator for the root element.
The iterator loops over all elements in this tree, in document order.
*tag* is a string with the tag name to iterate over
(default is to return all elements).
"""
# assert self._root is not None
return self._root.iter(tag)
# compatibility
def getiterator(self, tag=None):
# Change for a DeprecationWarning in 1.4
warnings.warn(
"This method will be removed in future versions. "
"Use 'tree.iter()' or 'list(tree.iter())' instead.",
PendingDeprecationWarning, stacklevel=2
)
return list(self.iter(tag))
def find(self, path, namespaces=None):
"""Find first matching element by tag name or path.
Same as getroot().find(path), which is Element.find()
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return the first matching element, or None if no element was found.
"""
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.find(path, namespaces)
def findtext(self, path, default=None, namespaces=None):
"""Find first matching element by tag name or path.
Same as getroot().findtext(path), which is Element.findtext()
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return the first matching element, or None if no element was found.
"""
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.findtext(path, default, namespaces)
def findall(self, path, namespaces=None):
"""Find all matching subelements by tag name or path.
Same as getroot().findall(path), which is Element.findall().
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return list containing all matching elements in document order.
"""
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.findall(path, namespaces)
def iterfind(self, path, namespaces=None):
"""Find all matching subelements by tag name or path.
Same as getroot().iterfind(path), which is element.iterfind()
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return an iterable yielding all matching elements in document order.
"""
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.iterfind(path, namespaces)
def write(self, file_or_filename,
encoding=None,
xml_declaration=None,
default_namespace=None,
method=None, *,
short_empty_elements=True):
"""Write element tree to a file as XML.
Arguments:
*file_or_filename* -- file name or a file object opened for writing
*encoding* -- the output encoding (default: US-ASCII)
*xml_declaration* -- bool indicating if an XML declaration should be
added to the output. If None, an XML declaration
is added if encoding IS NOT either of:
US-ASCII, UTF-8, or Unicode
*default_namespace* -- sets the default XML namespace (for "xmlns")
*method* -- either "xml" (default), "html, "text", or "c14n"
*short_empty_elements* -- controls the formatting of elements
that contain no content. If True (default)
they are emitted as a single self-closed
tag, otherwise they are emitted as a pair
of start/end tags
"""
if not method:
method = "xml"
elif method not in _serialize:
raise ValueError("unknown method %r" % method)
if not encoding:
if method == "c14n":
encoding = "utf-8"
else:
encoding = "us-ascii"
enc_lower = encoding.lower()
with _get_writer(file_or_filename, enc_lower) as write:
if method == "xml" and (xml_declaration or
(xml_declaration is None and
enc_lower not in ("utf-8", "us-ascii", "unicode"))):
declared_encoding = encoding
if enc_lower == "unicode":
# Retrieve the default encoding for the xml declaration
import locale
declared_encoding = locale.getpreferredencoding()
write("<?xml version='1.0' encoding='%s'?>\n" % (
declared_encoding,))
if method == "text":
_serialize_text(write, self._root)
else:
qnames, namespaces = _namespaces(self._root, default_namespace)
serialize = _serialize[method]
serialize(write, self._root, qnames, namespaces,
short_empty_elements=short_empty_elements)
def write_c14n(self, file):
# lxml.etree compatibility. use output method instead
return self.write(file, method="c14n")
# --------------------------------------------------------------------
# serialization support
@contextlib.contextmanager
def _get_writer(file_or_filename, encoding):
# returns text write method and release all resources after using
try:
write = file_or_filename.write
except AttributeError:
# file_or_filename is a file name
if encoding == "unicode":
file = open(file_or_filename, "w")
else:
file = open(file_or_filename, "w", encoding=encoding,
errors="xmlcharrefreplace")
with file:
yield file.write
else:
# file_or_filename is a file-like object
# encoding determines if it is a text or binary writer
if encoding == "unicode":
# use a text writer as is
yield write
else:
# wrap a binary writer with TextIOWrapper
with contextlib.ExitStack() as stack:
if isinstance(file_or_filename, io.BufferedIOBase):
file = file_or_filename
elif isinstance(file_or_filename, io.RawIOBase):
file = io.BufferedWriter(file_or_filename)
# Keep the original file open when the BufferedWriter is
# destroyed
stack.callback(file.detach)
else:
# This is to handle passed objects that aren't in the
# IOBase hierarchy, but just have a write method
file = io.BufferedIOBase()
file.writable = lambda: True
file.write = write
try:
# TextIOWrapper uses this methods to determine
# if BOM (for UTF-16, etc) should be added
file.seekable = file_or_filename.seekable
file.tell = file_or_filename.tell
except AttributeError:
pass
file = io.TextIOWrapper(file,
encoding=encoding,
errors="xmlcharrefreplace",
newline="\n")
# Keep the original file open when the TextIOWrapper is
# destroyed
stack.callback(file.detach)
yield file.write
def _namespaces(elem, default_namespace=None):
# identify namespaces used in this tree
# maps qnames to *encoded* prefix:local names
qnames = {None: None}
# maps uri:s to prefixes
namespaces = {}
if default_namespace:
namespaces[default_namespace] = ""
def add_qname(qname):
# calculate serialized qname representation
try:
if qname[:1] == "{":
uri, tag = qname[1:].rsplit("}", 1)
prefix = namespaces.get(uri)
if prefix is None:
prefix = _namespace_map.get(uri)
if prefix is None:
prefix = "ns%d" % len(namespaces)
if prefix != "xml":
namespaces[uri] = prefix
if prefix:
qnames[qname] = "%s:%s" % (prefix, tag)
else:
qnames[qname] = tag # default element
else:
if default_namespace:
# FIXME: can this be handled in XML 1.0?
raise ValueError(
"cannot use non-qualified names with "
"default_namespace option"
)
qnames[qname] = qname
except TypeError:
_raise_serialization_error(qname)
# populate qname and namespaces table
for elem in elem.iter():
tag = elem.tag
if isinstance(tag, QName):
if tag.text not in qnames:
add_qname(tag.text)
elif isinstance(tag, str):
if tag not in qnames:
add_qname(tag)
elif tag is not None and tag is not Comment and tag is not PI:
_raise_serialization_error(tag)
for key, value in elem.items():
if isinstance(key, QName):
key = key.text
if key not in qnames:
add_qname(key)
if isinstance(value, QName) and value.text not in qnames:
add_qname(value.text)
text = elem.text
if isinstance(text, QName) and text.text not in qnames:
add_qname(text.text)
return qnames, namespaces
def _serialize_xml(write, elem, qnames, namespaces,
short_empty_elements, **kwargs):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % text)
elif tag is ProcessingInstruction:
write("<?%s?>" % text)
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_xml(write, e, qnames, None,
short_empty_elements=short_empty_elements)
else:
write("<" + tag)
items = list(elem.items())
if items or namespaces:
if namespaces:
for v, k in sorted(namespaces.items(),
key=lambda x: x[1]): # sort on prefix
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k,
_escape_attrib(v)
))
for k, v in sorted(items): # lexical order
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib(v)
write(" %s=\"%s\"" % (qnames[k], v))
if text or len(elem) or not short_empty_elements:
write(">")
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_xml(write, e, qnames, None,
short_empty_elements=short_empty_elements)
write("</" + tag + ">")
else:
write(" />")
if elem.tail:
write(_escape_cdata(elem.tail))
HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
"img", "input", "isindex", "link", "meta", "param")
try:
HTML_EMPTY = set(HTML_EMPTY)
except NameError:
pass
def _serialize_html(write, elem, qnames, namespaces, **kwargs):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % _escape_cdata(text))
elif tag is ProcessingInstruction:
write("<?%s?>" % _escape_cdata(text))
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_html(write, e, qnames, None)
else:
write("<" + tag)
items = list(elem.items())
if items or namespaces:
if namespaces:
for v, k in sorted(namespaces.items(),
key=lambda x: x[1]): # sort on prefix
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k,
_escape_attrib(v)
))
for k, v in sorted(items): # lexical order
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib_html(v)
# FIXME: handle boolean attributes
write(" %s=\"%s\"" % (qnames[k], v))
write(">")
ltag = tag.lower()
if text:
if ltag == "script" or ltag == "style":
write(text)
else:
write(_escape_cdata(text))
for e in elem:
_serialize_html(write, e, qnames, None)
if ltag not in HTML_EMPTY:
write("</" + tag + ">")
if elem.tail:
write(_escape_cdata(elem.tail))
def _serialize_text(write, elem):
for part in elem.itertext():
write(part)
if elem.tail:
write(elem.tail)
_serialize = {
"xml": _serialize_xml,
"html": _serialize_html,
"text": _serialize_text,
# this optional method is imported at the end of the module
# "c14n": _serialize_c14n,
}
def register_namespace(prefix, uri):
"""Register a namespace prefix.
The registry is global, and any existing mapping for either the
given prefix or the namespace URI will be removed.
*prefix* is the namespace prefix, *uri* is a namespace uri. Tags and
attributes in this namespace will be serialized with prefix if possible.
ValueError is raised if prefix is reserved or is invalid.
"""
if re.match(r"ns\d+$", prefix):
raise ValueError("Prefix format reserved for internal use")
for k, v in list(_namespace_map.items()):
if k == uri or v == prefix:
del _namespace_map[k]
_namespace_map[uri] = prefix
_namespace_map = {
# "well-known" namespace prefixes
"http://www.w3.org/XML/1998/namespace": "xml",
"http://www.w3.org/1999/xhtml": "html",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://schemas.xmlsoap.org/wsdl/": "wsdl",
# xml schema
"http://www.w3.org/2001/XMLSchema": "xs",
"http://www.w3.org/2001/XMLSchema-instance": "xsi",
# dublin core
"http://purl.org/dc/elements/1.1/": "dc",
}
# For tests and troubleshooting
register_namespace._namespace_map = _namespace_map
def _raise_serialization_error(text):
raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
def _escape_cdata(text):
# escape character data
try:
# it's worth avoiding do-nothing calls for strings that are
# shorter than 500 characters, or so. assume that's, by far,
# the most common case in most applications.
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib(text):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
# The following business with carriage returns is to satisfy
# Section 2.11 of the XML specification, stating that
# CR or CR LN should be replaced with just LN
# http://www.w3.org/TR/REC-xml/#sec-line-ends
if "\r\n" in text:
text = text.replace("\r\n", "\n")
if "\r" in text:
text = text.replace("\r", "\n")
#The following four lines are issue 17582
if "\n" in text:
text = text.replace("\n", " ")
if "\t" in text:
text = text.replace("\t", "	")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib_html(text):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
# --------------------------------------------------------------------
def tostring(element, encoding=None, method=None, *,
short_empty_elements=True):
"""Generate string representation of XML element.
All subelements are included. If encoding is "unicode", a string
is returned. Otherwise a bytestring is returned.
*element* is an Element instance, *encoding* is an optional output
encoding defaulting to US-ASCII, *method* is an optional output which can
be one of "xml" (default), "html", "text" or "c14n".
Returns an (optionally) encoded string containing the XML data.
"""
stream = io.StringIO() if encoding == 'unicode' else io.BytesIO()
ElementTree(element).write(stream, encoding, method=method,
short_empty_elements=short_empty_elements)
return stream.getvalue()
class _ListDataStream(io.BufferedIOBase):
"""An auxiliary stream accumulating into a list reference."""
def __init__(self, lst):
self.lst = lst
def writable(self):
return True
def seekable(self):
return True
def write(self, b):
self.lst.append(b)
def tell(self):
return len(self.lst)
def tostringlist(element, encoding=None, method=None, *,
short_empty_elements=True):
lst = []
stream = _ListDataStream(lst)
ElementTree(element).write(stream, encoding, method=method,
short_empty_elements=short_empty_elements)
return lst
def dump(elem):
"""Write element tree or element structure to sys.stdout.
This function should be used for debugging only.
*elem* is either an ElementTree, or a single Element. The exact output
format is implementation dependent. In this version, it's written as an
ordinary XML file.
"""
# debugging
if not isinstance(elem, ElementTree):
elem = ElementTree(elem)
elem.write(sys.stdout, encoding="unicode")
tail = elem.getroot().tail
if not tail or tail[-1] != "\n":
sys.stdout.write("\n")
# --------------------------------------------------------------------
# parsing
def parse(source, parser=None):
"""Parse XML document into element tree.
*source* is a filename or file object containing XML data,
*parser* is an optional parser instance defaulting to XMLParser.
Return an ElementTree instance.
"""
tree = ElementTree()
tree.parse(source, parser)
return tree
def iterparse(source, events=None, parser=None):
"""Incrementally parse XML document into ElementTree.
This class also reports what's going on to the user based on the
*events* it is initialized with. The supported events are the strings
"start", "end", "start-ns" and "end-ns" (the "ns" events are used to get
detailed namespace information). If *events* is omitted, only
"end" events are reported.
*source* is a filename or file object containing XML data, *events* is
a list of events to report back, *parser* is an optional parser instance.
Returns an iterator providing (event, elem) pairs.
"""
# Use the internal, undocumented _parser argument for now; When the
# parser argument of iterparse is removed, this can be killed.
pullparser = XMLPullParser(events=events, _parser=parser)
def iterator():
try:
while True:
yield from pullparser.read_events()
# load event buffer
data = source.read(16 * 1024)
if not data:
break
pullparser.feed(data)
root = pullparser._close_and_return_root()
yield from pullparser.read_events()
it.root = root
finally:
if close_source:
source.close()
class IterParseIterator(collections.abc.Iterator):
__next__ = iterator().__next__
it = IterParseIterator()
it.root = None
del iterator, IterParseIterator
close_source = False
if not hasattr(source, "read"):
source = open(source, "rb")
close_source = True
return it
class XMLPullParser:
def __init__(self, events=None, *, _parser=None):
# The _parser argument is for internal use only and must not be relied
# upon in user code. It will be removed in a future release.
# See http://bugs.python.org/issue17741 for more details.
self._events_queue = collections.deque()
self._parser = _parser or XMLParser(target=TreeBuilder())
# wire up the parser for event reporting
if events is None:
events = ("end",)
self._parser._setevents(self._events_queue, events)
def feed(self, data):
"""Feed encoded data to parser."""
if self._parser is None:
raise ValueError("feed() called after end of stream")
if data:
try:
self._parser.feed(data)
except SyntaxError as exc:
self._events_queue.append(exc)
def _close_and_return_root(self):
# iterparse needs this to set its root attribute properly :(
root = self._parser.close()
self._parser = None
return root
def close(self):
"""Finish feeding data to parser.
Unlike XMLParser, does not return the root element. Use
read_events() to consume elements from XMLPullParser.
"""
self._close_and_return_root()
def read_events(self):
"""Return an iterator over currently available (event, elem) pairs.
Events are consumed from the internal event queue as they are
retrieved from the iterator.
"""
events = self._events_queue
while events:
event = events.popleft()
if isinstance(event, Exception):
raise event
else:
yield event
def XML(text, parser=None):
"""Parse XML document from string constant.
This function can be used to embed "XML Literals" in Python code.
*text* is a string containing XML data, *parser* is an
optional parser instance, defaulting to the standard XMLParser.
Returns an Element instance.
"""
if not parser:
parser = XMLParser(target=TreeBuilder())
parser.feed(text)
return parser.close()
def XMLID(text, parser=None):
"""Parse XML document from string constant for its IDs.
*text* is a string containing XML data, *parser* is an
optional parser instance, defaulting to the standard XMLParser.
Returns an (Element, dict) tuple, in which the
dict maps element id:s to elements.
"""
if not parser:
parser = XMLParser(target=TreeBuilder())
parser.feed(text)
tree = parser.close()
ids = {}
for elem in tree.iter():
id = elem.get("id")
if id:
ids[id] = elem
return tree, ids
# Parse XML document from string constant. Alias for XML().
fromstring = XML
def fromstringlist(sequence, parser=None):
"""Parse XML document from sequence of string fragments.
*sequence* is a list of other sequence, *parser* is an optional parser
instance, defaulting to the standard XMLParser.
Returns an Element instance.
"""
if not parser:
parser = XMLParser(target=TreeBuilder())
for text in sequence:
parser.feed(text)
return parser.close()
# --------------------------------------------------------------------
class TreeBuilder:
"""Generic element structure builder.
This builder converts a sequence of start, data, and end method
calls to a well-formed element structure.
You can use this class to build an element structure using a custom XML
parser, or a parser for some other XML-like format.
*element_factory* is an optional element factory which is called
to create new Element instances, as necessary.
"""
def __init__(self, element_factory=None):
self._data = [] # data collector
self._elem = [] # element stack
self._last = None # last element
self._tail = None # true if we're after an end tag
if element_factory is None:
element_factory = Element
self._factory = element_factory
def close(self):
"""Flush builder buffers and return toplevel document Element."""
assert len(self._elem) == 0, "missing end tags"
assert self._last is not None, "missing toplevel element"
return self._last
def _flush(self):
if self._data:
if self._last is not None:
text = "".join(self._data)
if self._tail:
assert self._last.tail is None, "internal error (tail)"
self._last.tail = text
else:
assert self._last.text is None, "internal error (text)"
self._last.text = text
self._data = []
def data(self, data):
"""Add text to current element."""
self._data.append(data)
def start(self, tag, attrs):
"""Open new element and return it.
*tag* is the element name, *attrs* is a dict containing element
attributes.
"""
self._flush()
self._last = elem = self._factory(tag, attrs)
if self._elem:
self._elem[-1].append(elem)
self._elem.append(elem)
self._tail = 0
return elem
def end(self, tag):
"""Close and return current Element.
*tag* is the element name.
"""
self._flush()
self._last = self._elem.pop()
assert self._last.tag == tag,\
"end tag mismatch (expected %s, got %s)" % (
self._last.tag, tag)
self._tail = 1
return self._last
_sentinel = ['sentinel']
# also see ElementTree and TreeBuilder
class XMLParser:
"""Element structure builder for XML source data based on the expat parser.
*html* are predefined HTML entities (deprecated and not supported),
*target* is an optional target object which defaults to an instance of the
standard TreeBuilder class, *encoding* is an optional encoding string
which if given, overrides the encoding specified in the XML file:
http://www.iana.org/assignments/character-sets
"""
def __init__(self, html=_sentinel, target=None, encoding=None):
if html is not _sentinel:
warnings.warn(
"The html argument of XMLParser() is deprecated",
DeprecationWarning, stacklevel=2)
try:
from xml.parsers import expat
except ImportError:
try:
import pyexpat as expat
except ImportError:
raise ImportError(
"No module named expat; use SimpleXMLTreeBuilder instead"
)
parser = expat.ParserCreate(encoding, "}")
if target is None:
target = TreeBuilder()
# underscored names are provided for compatibility only
self.parser = self._parser = parser
self.target = self._target = target
self._error = expat.error
self._names = {} # name memo cache
# main callbacks
parser.DefaultHandlerExpand = self._default
if hasattr(target, 'start'):
parser.StartElementHandler = self._start
if hasattr(target, 'end'):
parser.EndElementHandler = self._end
if hasattr(target, 'data'):
parser.CharacterDataHandler = target.data
# miscellaneous callbacks
if hasattr(target, 'comment'):
parser.CommentHandler = target.comment
if hasattr(target, 'pi'):
parser.ProcessingInstructionHandler = target.pi
# Configure pyexpat: buffering, new-style attribute handling.
parser.buffer_text = 1
parser.ordered_attributes = 1
parser.specified_attributes = 1
self._doctype = None
self.entity = {}
try:
self.version = "Expat %d.%d.%d" % expat.version_info
except AttributeError:
pass # unknown
def _setevents(self, events_queue, events_to_report):
# Internal API for XMLPullParser
# events_to_report: a list of events to report during parsing (same as
# the *events* of XMLPullParser's constructor.
# events_queue: a list of actual parsing events that will be populated
# by the underlying parser.
#
parser = self._parser
append = events_queue.append
for event_name in events_to_report:
if event_name == "start":
parser.ordered_attributes = 1
parser.specified_attributes = 1
def handler(tag, attrib_in, event=event_name, append=append,
start=self._start):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
elif event_name == "end":
def handler(tag, event=event_name, append=append,
end=self._end):
append((event, end(tag)))
parser.EndElementHandler = handler
elif event_name == "start-ns":
def handler(prefix, uri, event=event_name, append=append):
append((event, (prefix or "", uri or "")))
parser.StartNamespaceDeclHandler = handler
elif event_name == "end-ns":
def handler(prefix, event=event_name, append=append):
append((event, None))
parser.EndNamespaceDeclHandler = handler
else:
raise ValueError("unknown event %r" % event_name)
def _raiseerror(self, value):
err = ParseError(value)
err.code = value.code
err.position = value.lineno, value.offset
raise err
def _fixname(self, key):
# expand qname, and convert name string to ascii, if possible
try:
name = self._names[key]
except KeyError:
name = key
if "}" in name:
name = "{" + name
self._names[key] = name
return name
def _start(self, tag, attr_list):
# Handler for expat's StartElementHandler. Since ordered_attributes
# is set, the attributes are reported as a list of alternating
# attribute name,value.
fixname = self._fixname
tag = fixname(tag)
attrib = {}
if attr_list:
for i in range(0, len(attr_list), 2):
attrib[fixname(attr_list[i])] = attr_list[i+1]
return self.target.start(tag, attrib)
def _end(self, tag):
return self.target.end(self._fixname(tag))
def _default(self, text):
prefix = text[:1]
if prefix == "&":
# deal with undefined entities
try:
data_handler = self.target.data
except AttributeError:
return
try:
data_handler(self.entity[text[1:-1]])
except KeyError:
from xml.parsers import expat
err = expat.error(
"undefined entity %s: line %d, column %d" %
(text, self.parser.ErrorLineNumber,
self.parser.ErrorColumnNumber)
)
err.code = 11 # XML_ERROR_UNDEFINED_ENTITY
err.lineno = self.parser.ErrorLineNumber
err.offset = self.parser.ErrorColumnNumber
raise err
elif prefix == "<" and text[:9] == "<!DOCTYPE":
self._doctype = [] # inside a doctype declaration
elif self._doctype is not None:
# parse doctype contents
if prefix == ">":
self._doctype = None
return
text = text.strip()
if not text:
return
self._doctype.append(text)
n = len(self._doctype)
if n > 2:
type = self._doctype[1]
if type == "PUBLIC" and n == 4:
name, type, pubid, system = self._doctype
if pubid:
pubid = pubid[1:-1]
elif type == "SYSTEM" and n == 3:
name, type, system = self._doctype
pubid = None
else:
return
if hasattr(self.target, "doctype"):
self.target.doctype(name, pubid, system[1:-1])
elif self.doctype != self._XMLParser__doctype:
# warn about deprecated call
self._XMLParser__doctype(name, pubid, system[1:-1])
self.doctype(name, pubid, system[1:-1])
self._doctype = None
def doctype(self, name, pubid, system):
"""(Deprecated) Handle doctype declaration
*name* is the Doctype name, *pubid* is the public identifier,
and *system* is the system identifier.
"""
warnings.warn(
"This method of XMLParser is deprecated. Define doctype() "
"method on the TreeBuilder target.",
DeprecationWarning,
)
# sentinel, if doctype is redefined in a subclass
__doctype = doctype
def feed(self, data):
"""Feed encoded data to parser."""
try:
self.parser.Parse(data, 0)
except self._error as v:
self._raiseerror(v)
def close(self):
"""Finish feeding data to parser and return element structure."""
try:
self.parser.Parse("", 1) # end of data
except self._error as v:
self._raiseerror(v)
try:
close_handler = self.target.close
except AttributeError:
pass
else:
return close_handler()
finally:
# get rid of circular references
del self.parser, self._parser
del self.target, self._target
# Import the C accelerators
try:
# Element is going to be shadowed by the C implementation. We need to keep
# the Python version of it accessible for some "creative" by external code
# (see tests)
_Element_Py = Element
# Element, SubElement, ParseError, TreeBuilder, XMLParser
from _elementtree import *
except ImportError:
pass
| gpl-2.0 |
Gurgel100/gcc | gcc/testsuite/g++.dg/debug/dwarf2/fesd-baseonly.C | 7807 | // { dg-do compile }
// { dg-options "-gdwarf-2 -dA -femit-struct-debug-baseonly -fno-eliminate-unused-debug-symbols" }
// { dg-final { scan-assembler "timespec.*DW_AT_name" } }
// { dg-final { scan-assembler-not "tv_sec.*DW_AT_name" } }
// { dg-final { scan-assembler-not "tv_nsec.*DW_AT_name" } }
// { dg-final { scan-assembler "itimerspec.*DW_AT_name" } }
// { dg-final { scan-assembler-not "it_interval.*DW_AT_name" } }
// { dg-final { scan-assembler-not "it_value.*DW_AT_name" } }
// { dg-final { scan-assembler-not "gstruct_head_ordy_defn_ref_head.*DW_AT_name" } }
// { dg-final { scan-assembler-not "field_head_ordy_defn_ref_head.*DW_AT_name" } }
// { dg-final { scan-assembler-not "gstruct_head_ordy_defn_ptr_head.*DW_AT_name" } }
// { dg-final { scan-assembler-not "field_head_ordy_defn_ptr_head.*DW_AT_name" } }
// { dg-final { scan-assembler-not "gstruct_head_ordy_defn_fld_head.*DW_AT_name" } }
// { dg-final { scan-assembler-not "field_head_ordy_defn_fld_head.*DW_AT_name" } }
// { dg-final { scan-assembler "gstruct_head_ordy_defn_var_head.*DW_AT_name" } }
// { dg-final { scan-assembler-not "field_head_ordy_defn_var_head_inc.*DW_AT_name" } }
// { dg-final { scan-assembler-not "field_head_ordy_defn_var_head_ref.*DW_AT_name" } }
// { dg-final { scan-assembler-not "field_head_ordy_defn_var_head_ptr.*DW_AT_name" } }
// { dg-final { scan-assembler-not "field_head_ordy_defn_var_head_fld.*DW_AT_name" } }
// { dg-final { scan-assembler-not "gstruct_head_ordy_decl_ref_head.*DW_AT_name" } }
// { dg-final { scan-assembler "gstruct_head_ordy_defn_ref_base.*DW_AT_name" } }
// { dg-final { scan-assembler-not "field_head_ordy_defn_ref_base.*DW_AT_name" } }
// { dg-final { scan-assembler "gstruct_head_ordy_defn_ptr_base.*DW_AT_name" } }
// { dg-final { scan-assembler-not "field_head_ordy_defn_ptr_base.*DW_AT_name" } }
// { dg-final { scan-assembler "gstruct_head_ordy_defn_fld_base.*DW_AT_name" } }
// { dg-final { scan-assembler-not "field_head_ordy_defn_fld_base.*DW_AT_name" } }
// { dg-final { scan-assembler "gstruct_head_ordy_defn_var_base.*DW_AT_name" } }
// { dg-final { scan-assembler-not "field_head_ordy_defn_var_base.*DW_AT_name" } }
// { dg-final { scan-assembler-not "gstruct_head_tmpl_defn_fld_head<int>.*DW_AT_name" } }
// { dg-final { scan-assembler-not "field_head_tmpl_defn_fld_head.*DW_AT_name" } }
// { dg-final { scan-assembler "gstruct_head_tmpl_defn_var_head<int>.*DW_AT_name" } }
// { dg-final { scan-assembler-not "field_head_tmpl_defn_var_head_inc.*DW_AT_name" } }
// { dg-final { scan-assembler-not "field_head_tmpl_defn_var_head_ref.*DW_AT_name" } }
// { dg-final { scan-assembler-not "field_head_tmpl_defn_var_head_ptr.*DW_AT_name" } }
// { dg-final { scan-assembler-not "field_head_tmpl_defn_var_head_fld.*DW_AT_name" } }
// { dg-final { scan-assembler-not "gstruct_head_tmpl_decl_ref_head<int>.*DW_AT_name" } }
// { dg-final { scan-assembler-not "gstruct_head_tmpl_defn_ref_head<int>.*DW_AT_name" } }
// { dg-final { scan-assembler-not "gstruct_head_tmpl_defn_ptr_head<int>.*DW_AT_name" } }
// { dg-final { scan-assembler-not "field_head_tmpl_defn_ptr_head.*DW_AT_name" } }
// { dg-final { scan-assembler "gstruct_base_ordy_defn_ref_base.*DW_AT_name" } }
// { dg-final { scan-assembler "field_base_ordy_defn_ref_base.*DW_AT_name" } }
// { dg-final { scan-assembler "gstruct_base_ordy_defn_ptr_base.*DW_AT_name" } }
// { dg-final { scan-assembler "field_base_ordy_defn_ptr_base.*DW_AT_name" } }
// { dg-final { scan-assembler "gstruct_base_ordy_defn_fld_base.*DW_AT_name" } }
// { dg-final { scan-assembler "field_base_ordy_defn_fld_base.*DW_AT_name" } }
// { dg-final { scan-assembler "gstruct_base_ordy_defn_var_base.*DW_AT_name" } }
// { dg-final { scan-assembler "field1_base_ordy_defn_var_base_inc.*DW_AT_name" } }
// { dg-final { scan-assembler "field1_base_ordy_defn_var_base_ref.*DW_AT_name" } }
// { dg-final { scan-assembler "field1_base_ordy_defn_var_base_ptr.*DW_AT_name" } }
// { dg-final { scan-assembler "field1_base_ordy_defn_var_base_fld.*DW_AT_name" } }
// { dg-final { scan-assembler "field2_base_ordy_defn_var_base_inc.*DW_AT_name" } }
// { dg-final { scan-assembler "field2_base_ordy_defn_var_base_ref.*DW_AT_name" } }
// { dg-final { scan-assembler "field2_base_ordy_defn_var_base_ptr.*DW_AT_name" } }
// { dg-final { scan-assembler "field2_base_ordy_defn_var_base_fld.*DW_AT_name" } }
// { dg-final { scan-assembler "gstruct_head_ordy_decl_ref_base.*DW_AT_name" } }
// { dg-final { scan-assembler "gstruct_base_ordy_decl_ref_base.*DW_AT_name" } }
// { dg-final { scan-assembler "gstruct_head_tmpl_defn_var_base<int>.*DW_AT_name" } }
// { dg-final { scan-assembler-not "field_head_tmpl_defn_var_base.*DW_AT_name" } }
// { dg-final { scan-assembler "gstruct_head_tmpl_defn_fld_base<int>.*DW_AT_name" } }
// { dg-final { scan-assembler-not "field_head_tmpl_defn_fld_base.*DW_AT_name" } }
// { dg-final { scan-assembler "gstruct_base_tmpl_defn_fld_base<int>.*DW_AT_name" } }
// { dg-final { scan-assembler "field_base_tmpl_defn_fld_base.*DW_AT_name" } }
// { dg-final { scan-assembler "gstruct_base_tmpl_defn_var_base<int>.*DW_AT_name" } }
// { dg-final { scan-assembler "field1_base_tmpl_defn_var_base_inc.*DW_AT_name" } }
// { dg-final { scan-assembler "field1_base_tmpl_defn_var_base_ref.*DW_AT_name" } }
// { dg-final { scan-assembler "field1_base_tmpl_defn_var_base_ptr.*DW_AT_name" } }
// { dg-final { scan-assembler "field1_base_tmpl_defn_var_base_fld.*DW_AT_name" } }
// { dg-final { scan-assembler "field2_base_tmpl_defn_var_base_inc.*DW_AT_name" } }
// { dg-final { scan-assembler "field2_base_tmpl_defn_var_base_ref.*DW_AT_name" } }
// { dg-final { scan-assembler "field2_base_tmpl_defn_var_base_ptr.*DW_AT_name" } }
// { dg-final { scan-assembler "field2_base_tmpl_defn_var_base_fld.*DW_AT_name" } }
// { dg-final { scan-assembler "gstruct_head_tmpl_decl_ref_base<int>.*DW_AT_name" } }
// { dg-final { scan-assembler "gstruct_head_tmpl_defn_ref_base<int>.*DW_AT_name" } }
// { dg-final { scan-assembler "gstruct_head_tmpl_defn_ptr_base<int>.*DW_AT_name" } }
// { dg-final { scan-assembler-not "field_head_tmpl_defn_ptr_base.*DW_AT_name" } }
// { dg-final { scan-assembler "gstruct_base_tmpl_decl_ref_base<int>.*DW_AT_name" } }
// { dg-final { scan-assembler "gstruct_base_tmpl_defn_ref_base<int>.*DW_AT_name" } }
// { dg-final { scan-assembler "gstruct_base_tmpl_defn_ptr_base<int>.*DW_AT_name" } }
// { dg-final { scan-assembler "field_base_tmpl_defn_ptr_base.*DW_AT_name" } }
# 1 "fesd-baseonly.C"
# 1 "<built-in>"
# 1 "<command-line>"
# 1 "fesd-baseonly.C"
//#include "time.h"
# 1 "time.h" 1 3 4
struct timespec
{
long int tv_sec;
long int tv_nsec;
};
struct itimerspec
{
struct timespec it_interval;
struct timespec it_value;
};
# 6 "fesd-baseonly.C" 2
struct timespec base_var8;
struct itimerspec *base_var9;
#include "fesd-baseonly.h"
struct gstruct_head_ordy_defn_var_base base_var1;
struct gstruct_base_ordy_defn_var_base base_var2;
struct gstruct_head_tmpl_defn_var_base< int > base_var5;
struct gstruct_base_tmpl_defn_var_base< int > base_var6;
int base_function() {
return 0
+ base_var1.field_head_ordy_defn_var_base
+ base_var2.field1_base_ordy_defn_var_base_ptr->field_head_ordy_defn_ptr_base
+ base_var2.field1_base_ordy_defn_var_base_fld.field_head_ordy_defn_fld_base
+ base_var2.field2_base_ordy_defn_var_base_ptr->field_base_ordy_defn_ptr_base
+ base_var2.field2_base_ordy_defn_var_base_fld.field_base_ordy_defn_fld_base
+ base_var5.field_head_tmpl_defn_var_base
+ base_var6.field1_base_tmpl_defn_var_base_ptr->field_head_tmpl_defn_ptr_base
+ base_var6.field1_base_tmpl_defn_var_base_fld.field_head_tmpl_defn_fld_base
+ base_var6.field2_base_tmpl_defn_var_base_ptr->field_base_tmpl_defn_ptr_base
+ base_var6.field2_base_tmpl_defn_var_base_fld.field_base_tmpl_defn_fld_base
;
}
| gpl-2.0 |
ogajduse/spacewalk | java/code/src/com/redhat/rhn/frontend/action/configuration/ssm/SubscribeSubmit.java | 4445 | /**
* Copyright (c) 2009--2014 Red Hat, Inc.
*
* This software is licensed to you under the GNU General Public License,
* version 2 (GPLv2). There is NO WARRANTY for this software, express or
* implied, including the implied warranties of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
* along with this software; if not, see
* http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
*
* Red Hat trademarks are not licensed under GPLv2. No permission is
* granted to use or replicate Red Hat trademarks that are incorporated
* in this software or its documentation.
*/
package com.redhat.rhn.frontend.action.configuration.ssm;
import com.redhat.rhn.common.db.datasource.DataResult;
import com.redhat.rhn.domain.rhnset.RhnSet;
import com.redhat.rhn.domain.user.User;
import com.redhat.rhn.frontend.action.common.BaseSetOperateOnSelectedItemsAction;
import com.redhat.rhn.frontend.dto.ConfigChannelDto;
import com.redhat.rhn.frontend.struts.RequestContext;
import com.redhat.rhn.frontend.struts.RhnHelper;
import com.redhat.rhn.manager.configuration.ConfigurationManager;
import com.redhat.rhn.manager.rhnset.RhnSetDecl;
import com.redhat.rhn.manager.rhnset.RhnSetManager;
import org.apache.struts.action.ActionForm;
import org.apache.struts.action.ActionForward;
import org.apache.struts.action.ActionMapping;
import org.apache.struts.action.ActionMessage;
import java.util.Iterator;
import java.util.Map;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
/**
* For ssm config channel subscriptions.
* @version $Rev$
*/
public class SubscribeSubmit extends BaseSetOperateOnSelectedItemsAction {
/**
* {@inheritDoc}
*/
protected DataResult getDataResult(User user, ActionForm form,
HttpServletRequest request) {
return ConfigurationManager.getInstance().ssmChannelListForSubscribe(user, null);
}
/**
* {@inheritDoc}
*/
protected RhnSetDecl getSetDecl() {
return RhnSetDecl.CONFIG_CHANNELS_RANKING;
}
/**
* {@inheritDoc}
*/
protected void processMethodKeys(Map<String, String> map) {
map.put("ssm.config.subscribe.jsp.continue", "proceed");
}
/**
* {@inheritDoc}
*/
protected void processParamMap(ActionForm form,
HttpServletRequest request, Map<String, Object> params) {
//no-op
}
/**
* Continue to the confirm page.
* @param mapping ActionMapping
* @param formIn ActionForm
* @param request ServletRequest
* @param response ServletResponse
* @return The ActionForward to go to next.
*/
public ActionForward proceed(ActionMapping mapping,
ActionForm formIn,
HttpServletRequest request,
HttpServletResponse response) {
RequestContext context = new RequestContext(request);
User user = context.getCurrentUser();
// completely replace any existing item in the set with values
// coming from the current request
getSetDecl().clear(user);
RhnSet set = updateSet(request);
if (!context.isJavaScriptEnabled()) {
return handleNoScript(mapping, formIn, request, response);
}
if (set.isEmpty()) {
return handleEmptySelection(mapping, formIn, request);
}
/* BZ 221637 - Any channels that is already subscribed to by all servers will be
omitted from the user selectable list. This RhnSet is used in the ranking
process on the next page, so need to add in those omitted channels explicitly
so they take place in the ranking.
*/
ConfigurationManager manager = ConfigurationManager.getInstance();
DataResult channels = manager.ssmChannelListForSubscribeAlreadySubbed(user);
for (Iterator it = channels.iterator(); it.hasNext();) {
ConfigChannelDto channel = (ConfigChannelDto) it.next();
set.addElement(channel.getId());
}
RhnSetManager.store(set);
return mapping.findForward(RhnHelper.CONFIRM_FORWARD);
}
/**
*
* {@inheritDoc}
*/
protected ActionMessage getNoScriptMessage() {
return new ActionMessage(
"common.config.subscription.jsp.error.nojavascript");
}
}
| gpl-2.0 |
b-jesch/service.fritzbox.callmonitor | resources/lib/PhoneBooks/pyicloud/vendorlibs/certifi/__init__.py | 63 | from .core import where, old_where
__version__ = "2016.02.28"
| gpl-2.0 |
fsxchen/WhatWeb | plugins/RackStar-Server-Appliance-OS.rb | 1006 | ##
# This file is part of WhatWeb and may be subject to
# redistribution and commercial restrictions. Please see the WhatWeb
# web site for more information on licensing and terms of use.
# http://www.morningstarsecurity.com/research/whatweb
##
Plugin.define "RackStar-Server-Appliance-OS" do
author "Brendan Coles <[email protected]>" # 2010-11-01
version "0.1"
description "RackStar can be installed on almost any PC platform. That's what makes the RackStar Server Appliance Software so great! Now anyone can host sites! Still got a Cobalt RAQ? No problem! RackStar runs on that too! - homepage: http://www.rackstar.net/"
# 322 ShodanHQ results for www.rackstar.net
# Matches #
matches [
# HTTP Server Header
{ :search=>"headers[server]", :regexp=>/<A HREF='http:\/\/www.rackstar.net\/' TITLE='This server is powered by the RackStar Server Appliance OS'>RACKSTAR<\/A>/ },
# HTTP Server Header
{ :search=>"headers[server]", :regexp=>/\(<A HREF=http:\/\/www.rackstar.net\/>RACKSTAR<\/A>\)/ },
]
end
| gpl-2.0 |
JKT-OSSCLUB/ProActio | Proactio Front/php/pear/HTML/Template/Flexy/Token/Foreach.php | 2140 | <?php
/* vim: set expandtab tabstop=4 shiftwidth=4: */
// +----------------------------------------------------------------------+
// | PHP Version 4 |
// +----------------------------------------------------------------------+
// | Copyright (c) 1997-2002 The PHP Group |
// +----------------------------------------------------------------------+
// | This source file is subject to version 2.02 of the PHP license, |
// | that is bundled with this package in the file LICENSE, and is |
// | available at through the world-wide-web at |
// | http://www.php.net/license/2_02.txt. |
// | If you did not receive a copy of the PHP license and are unable to |
// | obtain it through the world-wide-web, please send a note to |
// | [email protected] so we can mail you a copy immediately. |
// +----------------------------------------------------------------------+
// | Authors: Alan Knowles <alan@akbkhome> |
// +----------------------------------------------------------------------+
//
// $Id: Foreach.php 296025 2010-03-10 11:31:37Z alan_k $
//
/**
* Class to handle foreach statements
*
*
*/
class HTML_Template_Flexy_Token_Foreach extends HTML_Template_Flexy_Token {
/**
* variable to loop on.
*
* @var string
* @access public
*/
var $loopOn = '';
/**
* key value
*
* @var string
* @access public
*/
var $key = '';
/**
* optional value (in key=>value pair)
*
* @var string
* @access public
*/
var $value = '';
/**
* Setvalue - a array of all three (last one optional)
* @see parent::setValue()
*/
function setValue($value) {
$this->loopOn=$value[0];
if (!isset($value[1]) || !strlen(trim($value[1]))) {
// error condition.
return false;
}
$this->key=$value[1];
$this->value= isset($value[2]) ? $value[2] : '';
}
}
| gpl-2.0 |
avpreserve/nfai | app/plugins/Cataloging/controllers/lookup/EntityController.php | 1711 | <?php
/* ----------------------------------------------------------------------
* app/controllers/lookup/EntityController.php :
* ----------------------------------------------------------------------
* CollectiveAccess
* Open-source collections management software
* ----------------------------------------------------------------------
*
* Software by Whirl-i-Gig (http://www.whirl-i-gig.com)
* Copyright 2008 Whirl-i-Gig
*
* For more information visit http://www.CollectiveAccess.org
*
* This program is free software; you may redistribute it and/or modify it under
* the terms of the provided license as published by Whirl-i-Gig
*
* CollectiveAccess is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTIES whatsoever, including any implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* This source code is free and modifiable under the terms of
* GNU General Public License. (http://www.gnu.org/copyleft/gpl.html). See
* the "license.txt" file for details, or visit the CollectiveAccess web site at
* http://www.CollectiveAccess.org
*
* ----------------------------------------------------------------------
*/
require_once(__CA_APP_DIR__."/plugins/Contribute/controllers/BaseLookupController.php");
class EntityController extends BaseLookupController {
# -------------------------------------------------------
protected $opb_uses_hierarchy_browser = false;
protected $ops_table_name = 'ca_entities'; // name of "subject" table (what we're editing)
protected $ops_name_singular = 'entity';
protected $ops_search_class = 'EntitySearch';
# -------------------------------------------------------
}
?> | gpl-2.0 |
ogajduse/spacewalk | java/code/src/com/redhat/rhn/frontend/xmlrpc/serializer/PackageSourceOverviewSerializer.java | 1758 | /**
* Copyright (c) 2016 Red Hat, Inc.
*
* This software is licensed to you under the GNU General Public License,
* version 2 (GPLv2). There is NO WARRANTY for this software, express or
* implied, including the implied warranties of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
* along with this software; if not, see
* http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
*
* Red Hat trademarks are not licensed under GPLv2. No permission is
* granted to use or replicate Red Hat trademarks that are incorporated
* in this software or its documentation.
*/
package com.redhat.rhn.frontend.xmlrpc.serializer;
import java.io.IOException;
import java.io.Writer;
import com.redhat.rhn.frontend.dto.PackageSourceOverview;
import redstone.xmlrpc.XmlRpcException;
import redstone.xmlrpc.XmlRpcSerializer;
import com.redhat.rhn.frontend.xmlrpc.serializer.util.SerializerHelper;
/**
* PackageSourceOverviewSerializer
* @version $Rev$
*
* @xmlrpc.doc
* #struct("package source overview")
* #prop("int", "id")
* #prop("string", "name")
* #struct_end()
*/
public class PackageSourceOverviewSerializer extends RhnXmlRpcCustomSerializer {
/**
* {@inheritDoc}
*/
public Class getSupportedClass() {
return PackageSourceOverview.class;
}
/** {@inheritDoc} */
protected void doSerialize(Object value, Writer output, XmlRpcSerializer serializer)
throws XmlRpcException, IOException {
PackageSourceOverview pO = (PackageSourceOverview)value;
SerializerHelper helper = new SerializerHelper(serializer);
helper.add("id", pO.getId());
helper.add("name", pO.getNvrea());
helper.writeTo(output);
}
}
| gpl-2.0 |
Badcreature/cspoker | external/plcafe/generated-src/jp/ac/kobe_u/cs/prolog/builtin/PRED_$003D$002E$002E_2.java | 1163 | package jp.ac.kobe_u.cs.prolog.builtin;
import jp.ac.kobe_u.cs.prolog.lang.Predicate;
import jp.ac.kobe_u.cs.prolog.lang.Prolog;
import jp.ac.kobe_u.cs.prolog.lang.Term;
/*
This file is generated by Prolog Cafe.
PLEASE DO NOT EDIT!
*/
/**
<code>=.. /2</code> defined in builtins.pl<br>
@author Mutsunori Banbara ([email protected])
@author Naoyuki Tamura ([email protected])
@version 1.0
*/
public class PRED_$003D$002E$002E_2 extends Predicate {
public Term arg1, arg2;
public PRED_$003D$002E$002E_2(Term a1, Term a2, Predicate cont) {
arg1 = a1;
arg2 = a2;
this.cont = cont;
}
public PRED_$003D$002E$002E_2(){}
public void setArgument(Term[] args, Predicate cont) {
arg1 = args[0];
arg2 = args[1];
this.cont = cont;
}
public int arity() { return 2; }
public String toString() {
return "=..(" + arg1 + "," + arg2 + ")";
}
public Predicate exec(Prolog engine) {
// A=..B:-A=..B
engine.setB0();
Term a1, a2;
a1 = arg1;
a2 = arg2;
// A=..B:-['$univ'(A,B)]
return new PRED_$univ_2(a1, a2, cont);
}
}
| gpl-2.0 |
khalidhoffman/wp-spa | vendor/paquettg/php-html-parser/tests/Node/ParentTest.php | 5735 | <?php
use PHPHtmlParser\Dom\MockNode as Node;
class NodeParentTest extends PHPUnit_Framework_TestCase {
public function testHasChild()
{
$parent = new Node;
$child = new Node;
$parent->addChild($child);
$this->assertTrue($parent->hasChildren());
}
public function testHasChildNoChildren()
{
$node = new Node;
$this->assertFalse($node->hasChildren());
}
public function testAddChild()
{
$parent = new Node;
$child = new Node;
$this->assertTrue($parent->addChild($child));
}
public function testAddChildTwoParent()
{
$parent = new Node;
$parent2 = new Node;
$child = new Node;
$parent->addChild($child);
$parent2->addChild($child);
$this->assertFalse($parent->hasChildren());
}
public function testGetChild()
{
$parent = new Node;
$child = new Node;
$child2 = new Node;
$parent->addChild($child);
$parent->addChild($child2);
$this->assertTrue($parent->getChild($child2->id()) instanceof Node);
}
public function testRemoveChild()
{
$parent = new Node;
$child = new Node;
$parent->addChild($child);
$parent->removeChild($child->id());
$this->assertFalse($parent->hasChildren());
}
public function testRemoveChildNotExists()
{
$parent = new Node;
$parent->removeChild(1);
$this->assertFalse($parent->hasChildren());
}
public function testNextChild()
{
$parent = new Node;
$child = new Node;
$child2 = new Node;
$parent->addChild($child);
$parent->addChild($child2);
$this->assertEquals($child2->id(), $parent->nextChild($child->id())->id());
}
public function testNextChildWithRemove()
{
$parent = new Node;
$child = new Node;
$child2 = new Node;
$child3 = new Node;
$parent->addChild($child);
$parent->addChild($child2);
$parent->addChild($child3);
$parent->removeChild($child2->id());
$this->assertEquals($child3->id(), $parent->nextChild($child->id())->id());
}
public function testPreviousChild()
{
$parent = new Node;
$child = new Node;
$child2 = new Node;
$parent->addChild($child);
$parent->addChild($child2);
$this->assertEquals($child->id(), $parent->previousChild($child2->id())->id());
}
public function testPreviousChildWithRemove()
{
$parent = new Node;
$child = new Node;
$child2 = new Node;
$child3 = new Node;
$parent->addChild($child);
$parent->addChild($child2);
$parent->addChild($child3);
$parent->removeChild($child2->id());
$this->assertEquals($child->id(), $parent->previousChild($child3->id())->id());
}
public function testFirstChild()
{
$parent = new Node;
$child = new Node;
$child2 = new Node;
$child3 = new Node;
$parent->addChild($child);
$parent->addChild($child2);
$parent->addChild($child3);
$this->assertEquals($child->id(), $parent->firstChild()->id());
}
public function testLastChild()
{
$parent = new Node;
$child = new Node;
$child2 = new Node;
$child3 = new Node;
$parent->addChild($child);
$parent->addChild($child2);
$parent->addChild($child3);
$this->assertEquals($child3->id(), $parent->lastChild()->id());
}
public function testReplaceChild()
{
$parent = new Node;
$child = new Node;
$child2 = new Node;
$child3 = new Node;
$parent->addChild($child);
$parent->addChild($child2);
$parent->replaceChild($child->id(), $child3);
$this->assertFalse($parent->isChild($child->id()));
}
/**
* @expectedException PHPHtmlParser\Exceptions\CircularException
*/
public function testSetParentDescendantException()
{
$parent = new Node;
$child = new Node;
$parent->addChild($child);
$parent->setParent($child);
}
/**
* @expectedException PHPHtmlParser\Exceptions\CircularException
*/
public function testAddChildAncestorException()
{
$parent = new Node;
$child = new Node;
$parent->addChild($child);
$child->addChild($parent);
}
/**
* @expectedException PHPHtmlParser\Exceptions\CircularException
*/
public function testAddItselfAsChild()
{
$parent = new Node;
$parent->addChild($parent);
}
public function testIsAncestorParent()
{
$parent = new Node;
$child = new Node;
$parent->addChild($child);
$this->assertTrue($child->isAncestor($parent->id()));
}
public function testGetAncestor()
{
$parent = new Node;
$child = new Node;
$parent->addChild($child);
$ancestor = $child->getAncestor($parent->id());
$this->assertEquals($parent->id(), $ancestor->id());
}
public function testGetGreatAncestor()
{
$parent = new Node;
$child = new Node;
$child2 = new Node;
$parent->addChild($child);
$child->addChild($child2);
$ancestor = $child2->getAncestor($parent->id());
$this->assertEquals($parent->id(), $ancestor->id());
}
public function testGetAncestorNotFound()
{
$parent = new Node;
$ancestor = $parent->getAncestor(1);
$this->assertNull($ancestor);
}
}
| gpl-2.0 |
percy-g2/Novathor_xperia_u8500 | 6.1.1.B.0.253/external/webkit/Source/WebCore/loader/DocumentThreadableLoader.cpp | 17006 | /*
* Copyright (C) 2011 Google Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#include "DocumentThreadableLoader.h"
#include "AuthenticationChallenge.h"
#include "CrossOriginAccessControl.h"
#include "CrossOriginPreflightResultCache.h"
#include "Document.h"
#include "DocumentThreadableLoaderClient.h"
#include "Frame.h"
#include "FrameLoader.h"
#include "ResourceHandle.h"
#include "ResourceLoadScheduler.h"
#include "ResourceRequest.h"
#include "SecurityOrigin.h"
#include "SubresourceLoader.h"
#include "ThreadableLoaderClient.h"
#include <wtf/UnusedParam.h>
namespace WebCore {
void DocumentThreadableLoader::loadResourceSynchronously(Document* document, const ResourceRequest& request, ThreadableLoaderClient& client, const ThreadableLoaderOptions& options)
{
// The loader will be deleted as soon as this function exits.
RefPtr<DocumentThreadableLoader> loader = adoptRef(new DocumentThreadableLoader(document, &client, LoadSynchronously, request, options, String()));
ASSERT(loader->hasOneRef());
}
PassRefPtr<DocumentThreadableLoader> DocumentThreadableLoader::create(Document* document, ThreadableLoaderClient* client, const ResourceRequest& request, const ThreadableLoaderOptions& options, const String& optionalOutgoingReferrer)
{
RefPtr<DocumentThreadableLoader> loader = adoptRef(new DocumentThreadableLoader(document, client, LoadAsynchronously, request, options, optionalOutgoingReferrer));
if (!loader->m_loader)
loader = 0;
return loader.release();
}
DocumentThreadableLoader::DocumentThreadableLoader(Document* document, ThreadableLoaderClient* client, BlockingBehavior blockingBehavior, const ResourceRequest& request, const ThreadableLoaderOptions& options, const String& optionalOutgoingReferrer)
: m_client(client)
, m_document(document)
, m_options(options)
, m_optionalOutgoingReferrer(optionalOutgoingReferrer)
, m_sameOriginRequest(document->securityOrigin()->canRequest(request.url()))
, m_async(blockingBehavior == LoadAsynchronously)
{
ASSERT(document);
ASSERT(client);
// Setting an outgoing referer is only supported in the async code path.
ASSERT(m_async || m_optionalOutgoingReferrer.isEmpty());
if (m_sameOriginRequest || m_options.crossOriginRequestPolicy == AllowCrossOriginRequests) {
loadRequest(request, DoSecurityCheck);
return;
}
if (m_options.crossOriginRequestPolicy == DenyCrossOriginRequests) {
m_client->didFail(ResourceError(errorDomainWebKitInternal, 0, request.url().string(), "Cross origin requests are not supported."));
return;
}
ASSERT(m_options.crossOriginRequestPolicy == UseAccessControl);
OwnPtr<ResourceRequest> crossOriginRequest = adoptPtr(new ResourceRequest(request));
crossOriginRequest->removeCredentials();
crossOriginRequest->setAllowCookies(m_options.allowCredentials);
if (!m_options.forcePreflight && isSimpleCrossOriginAccessRequest(crossOriginRequest->httpMethod(), crossOriginRequest->httpHeaderFields()))
makeSimpleCrossOriginAccessRequest(*crossOriginRequest);
else {
m_actualRequest = crossOriginRequest.release();
if (CrossOriginPreflightResultCache::shared().canSkipPreflight(document->securityOrigin()->toString(), m_actualRequest->url(), m_options.allowCredentials, m_actualRequest->httpMethod(), m_actualRequest->httpHeaderFields()))
preflightSuccess();
else
makeCrossOriginAccessRequestWithPreflight(*m_actualRequest);
}
}
void DocumentThreadableLoader::makeSimpleCrossOriginAccessRequest(const ResourceRequest& request)
{
ASSERT(isSimpleCrossOriginAccessRequest(request.httpMethod(), request.httpHeaderFields()));
// Cross-origin requests are only defined for HTTP. We would catch this when checking response headers later, but there is no reason to send a request that's guaranteed to be denied.
if (!request.url().protocolInHTTPFamily()) {
m_client->didFail(ResourceError(errorDomainWebKitInternal, 0, request.url().string(), "Cross origin requests are only supported for HTTP."));
return;
}
// Make a copy of the passed request so that we can modify some details.
ResourceRequest crossOriginRequest(request);
crossOriginRequest.setHTTPOrigin(m_document->securityOrigin()->toString());
loadRequest(crossOriginRequest, DoSecurityCheck);
}
void DocumentThreadableLoader::makeCrossOriginAccessRequestWithPreflight(const ResourceRequest& request)
{
ResourceRequest preflightRequest(request.url());
preflightRequest.removeCredentials();
preflightRequest.setHTTPOrigin(m_document->securityOrigin()->toString());
preflightRequest.setAllowCookies(m_options.allowCredentials);
preflightRequest.setHTTPMethod("OPTIONS");
preflightRequest.setHTTPHeaderField("Access-Control-Request-Method", request.httpMethod());
const HTTPHeaderMap& requestHeaderFields = request.httpHeaderFields();
if (requestHeaderFields.size() > 0) {
Vector<UChar> headerBuffer;
HTTPHeaderMap::const_iterator it = requestHeaderFields.begin();
append(headerBuffer, it->first);
++it;
HTTPHeaderMap::const_iterator end = requestHeaderFields.end();
for (; it != end; ++it) {
headerBuffer.append(',');
headerBuffer.append(' ');
append(headerBuffer, it->first);
}
preflightRequest.setHTTPHeaderField("Access-Control-Request-Headers", String::adopt(headerBuffer));
}
preflightRequest.setPriority(request.priority());
loadRequest(preflightRequest, DoSecurityCheck);
}
DocumentThreadableLoader::~DocumentThreadableLoader()
{
if (m_loader)
m_loader->clearClient();
}
void DocumentThreadableLoader::cancel()
{
if (!m_loader)
return;
m_loader->cancel();
m_loader->clearClient();
m_loader = 0;
m_client = 0;
}
void DocumentThreadableLoader::setDefersLoading(bool value)
{
if (m_loader)
m_loader->setDefersLoading(value);
}
void DocumentThreadableLoader::willSendRequest(SubresourceLoader* loader, ResourceRequest& request, const ResourceResponse& redirectResponse)
{
ASSERT(m_client);
ASSERT_UNUSED(loader, loader == m_loader);
if (!isAllowedRedirect(request.url())) {
RefPtr<DocumentThreadableLoader> protect(this);
m_client->didFailRedirectCheck();
request = ResourceRequest();
} else {
if (m_client->isDocumentThreadableLoaderClient())
static_cast<DocumentThreadableLoaderClient*>(m_client)->willSendRequest(request, redirectResponse);
}
}
void DocumentThreadableLoader::didSendData(SubresourceLoader* loader, unsigned long long bytesSent, unsigned long long totalBytesToBeSent)
{
ASSERT(m_client);
ASSERT_UNUSED(loader, loader == m_loader);
m_client->didSendData(bytesSent, totalBytesToBeSent);
}
void DocumentThreadableLoader::didReceiveResponse(SubresourceLoader* loader, const ResourceResponse& response)
{
ASSERT(m_client);
ASSERT_UNUSED(loader, loader == m_loader);
String accessControlErrorDescription;
if (m_actualRequest) {
if (!passesAccessControlCheck(response, m_options.allowCredentials, m_document->securityOrigin(), accessControlErrorDescription)) {
preflightFailure(response.url(), accessControlErrorDescription);
return;
}
OwnPtr<CrossOriginPreflightResultCacheItem> preflightResult = adoptPtr(new CrossOriginPreflightResultCacheItem(m_options.allowCredentials));
if (!preflightResult->parse(response, accessControlErrorDescription)
|| !preflightResult->allowsCrossOriginMethod(m_actualRequest->httpMethod(), accessControlErrorDescription)
|| !preflightResult->allowsCrossOriginHeaders(m_actualRequest->httpHeaderFields(), accessControlErrorDescription)) {
preflightFailure(response.url(), accessControlErrorDescription);
return;
}
CrossOriginPreflightResultCache::shared().appendEntry(m_document->securityOrigin()->toString(), m_actualRequest->url(), preflightResult.release());
} else {
if (!m_sameOriginRequest && m_options.crossOriginRequestPolicy == UseAccessControl) {
if (!passesAccessControlCheck(response, m_options.allowCredentials, m_document->securityOrigin(), accessControlErrorDescription)) {
m_client->didFail(ResourceError(errorDomainWebKitInternal, 0, response.url().string(), accessControlErrorDescription));
return;
}
}
m_client->didReceiveResponse(response);
}
}
void DocumentThreadableLoader::didReceiveData(SubresourceLoader* loader, const char* data, int dataLength)
{
ASSERT(m_client);
ASSERT_UNUSED(loader, loader == m_loader);
// Preflight data should be invisible to clients.
if (m_actualRequest)
return;
m_client->didReceiveData(data, dataLength);
}
void DocumentThreadableLoader::didReceiveCachedMetadata(SubresourceLoader* loader, const char* data, int dataLength)
{
ASSERT(m_client);
ASSERT_UNUSED(loader, loader == m_loader);
// Preflight data should be invisible to clients.
if (m_actualRequest)
return;
m_client->didReceiveCachedMetadata(data, dataLength);
}
void DocumentThreadableLoader::didFinishLoading(SubresourceLoader* loader, double finishTime)
{
ASSERT(loader == m_loader);
ASSERT(m_client);
didFinishLoading(loader->identifier(), finishTime);
}
void DocumentThreadableLoader::didFinishLoading(unsigned long identifier, double finishTime)
{
if (m_actualRequest) {
ASSERT(!m_sameOriginRequest);
ASSERT(m_options.crossOriginRequestPolicy == UseAccessControl);
preflightSuccess();
} else
m_client->didFinishLoading(identifier, finishTime);
}
void DocumentThreadableLoader::didFail(SubresourceLoader* loader, const ResourceError& error)
{
ASSERT(m_client);
// m_loader may be null if we arrive here via SubresourceLoader::create in the ctor
ASSERT_UNUSED(loader, loader == m_loader || !m_loader);
m_client->didFail(error);
}
bool DocumentThreadableLoader::getShouldUseCredentialStorage(SubresourceLoader* loader, bool& shouldUseCredentialStorage)
{
ASSERT_UNUSED(loader, loader == m_loader || !m_loader);
if (!m_options.allowCredentials) {
shouldUseCredentialStorage = false;
return true;
}
return false; // Only FrameLoaderClient can ultimately permit credential use.
}
void DocumentThreadableLoader::didReceiveAuthenticationChallenge(SubresourceLoader* loader, const AuthenticationChallenge& challenge)
{
ASSERT(loader == m_loader);
// Users are not prompted for credentials for cross-origin requests.
if (!m_sameOriginRequest) {
#if PLATFORM(MAC) || USE(CFNETWORK) || USE(CURL)
loader->handle()->receivedRequestToContinueWithoutCredential(challenge);
#else
// These platforms don't provide a way to continue without credentials, cancel the load altogether.
UNUSED_PARAM(challenge);
RefPtr<DocumentThreadableLoader> protect(this);
m_client->didFail(loader->blockedError());
cancel();
#endif
}
}
void DocumentThreadableLoader::receivedCancellation(SubresourceLoader* loader, const AuthenticationChallenge& challenge)
{
ASSERT(m_client);
ASSERT_UNUSED(loader, loader == m_loader);
m_client->didReceiveAuthenticationCancellation(challenge.failureResponse());
}
void DocumentThreadableLoader::preflightSuccess()
{
OwnPtr<ResourceRequest> actualRequest;
actualRequest.swap(m_actualRequest);
actualRequest->setHTTPOrigin(m_document->securityOrigin()->toString());
// It should be ok to skip the security check since we already asked about the preflight request.
loadRequest(*actualRequest, SkipSecurityCheck);
}
void DocumentThreadableLoader::preflightFailure(const String& url, const String& errorDescription)
{
m_actualRequest = 0; // Prevent didFinishLoading() from bypassing access check.
m_client->didFail(ResourceError(errorDomainWebKitInternal, 0, url, errorDescription));
}
void DocumentThreadableLoader::loadRequest(const ResourceRequest& request, SecurityCheckPolicy securityCheck)
{
// Any credential should have been removed from the cross-site requests.
const KURL& requestURL = request.url();
ASSERT(m_sameOriginRequest || requestURL.user().isEmpty());
ASSERT(m_sameOriginRequest || requestURL.pass().isEmpty());
if (m_async) {
// Don't sniff content or send load callbacks for the preflight request.
bool sendLoadCallbacks = m_options.sendLoadCallbacks && !m_actualRequest;
bool sniffContent = m_options.sniffContent && !m_actualRequest;
// Keep buffering the data for the preflight request.
bool shouldBufferData = m_options.shouldBufferData || m_actualRequest;
// Clear the loader so that any callbacks from SubresourceLoader::create will not have the old loader.
m_loader = 0;
m_loader = resourceLoadScheduler()->scheduleSubresourceLoad(m_document->frame(), this, request, ResourceLoadPriorityMedium, securityCheck, sendLoadCallbacks,
sniffContent, m_optionalOutgoingReferrer, shouldBufferData);
return;
}
// FIXME: ThreadableLoaderOptions.sniffContent is not supported for synchronous requests.
StoredCredentials storedCredentials = m_options.allowCredentials ? AllowStoredCredentials : DoNotAllowStoredCredentials;
Vector<char> data;
ResourceError error;
ResourceResponse response;
unsigned long identifier = std::numeric_limits<unsigned long>::max();
if (m_document->frame())
identifier = m_document->frame()->loader()->loadResourceSynchronously(request, storedCredentials, error, response, data);
// No exception for file:/// resources, see <rdar://problem/4962298>.
// Also, if we have an HTTP response, then it wasn't a network error in fact.
if (!error.isNull() && !requestURL.isLocalFile() && response.httpStatusCode() <= 0) {
m_client->didFail(error);
return;
}
// FIXME: FrameLoader::loadSynchronously() does not tell us whether a redirect happened or not, so we guess by comparing the
// request and response URLs. This isn't a perfect test though, since a server can serve a redirect to the same URL that was
// requested. Also comparing the request and response URLs as strings will fail if the requestURL still has its credentials.
if (requestURL != response.url() && !isAllowedRedirect(response.url())) {
m_client->didFailRedirectCheck();
return;
}
didReceiveResponse(0, response);
const char* bytes = static_cast<const char*>(data.data());
int len = static_cast<int>(data.size());
didReceiveData(0, bytes, len);
didFinishLoading(identifier, 0.0);
}
bool DocumentThreadableLoader::isAllowedRedirect(const KURL& url)
{
if (m_options.crossOriginRequestPolicy == AllowCrossOriginRequests)
return true;
// FIXME: We need to implement access control for each redirect. This will require some refactoring though, because the code
// that processes redirects doesn't know about access control and expects a synchronous answer from its client about whether
// a redirect should proceed.
return m_sameOriginRequest && m_document->securityOrigin()->canRequest(url);
}
} // namespace WebCore
| gpl-2.0 |
CISC181/cspoker | common/common/src/main/java/org/cspoker/common/api/lobby/action/GetHoldemTableInformationAction.java | 1703 | /**
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
package org.cspoker.common.api.lobby.action;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlRootElement;
import net.jcip.annotations.Immutable;
import org.cspoker.common.api.lobby.context.StaticLobbyContext;
import org.cspoker.common.api.shared.event.EventId;
import org.cspoker.common.elements.table.DetailedHoldemTable;
import org.cspoker.common.elements.table.TableId;
@XmlRootElement
@Immutable
public class GetHoldemTableInformationAction extends LobbyAction<DetailedHoldemTable> {
private static final long serialVersionUID = 7897218843022885169L;
@XmlAttribute
private final TableId tableId;
public GetHoldemTableInformationAction(EventId id, TableId tableid) {
super(id);
this.tableId = tableid;
}
protected GetHoldemTableInformationAction() {
tableId = null;
}
@Override
public DetailedHoldemTable perform(StaticLobbyContext lobbyContext) {
return lobbyContext.getHoldemTableInformation(tableId);
}
}
| gpl-2.0 |
RedRightHand/llvm-analyses | cse231-proj1/llvm/src/utils/TableGen/AsmWriterEmitter.cpp | 35672 | //===- AsmWriterEmitter.cpp - Generate an assembly writer -----------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This tablegen backend is emits an assembly printer for the current target.
// Note that this is currently fairly skeletal, but will grow over time.
//
//===----------------------------------------------------------------------===//
#include "AsmWriterInst.h"
#include "CodeGenTarget.h"
#include "SequenceToOffsetTable.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/TableGenBackend.h"
#include <algorithm>
#include <cassert>
#include <map>
#include <vector>
using namespace llvm;
namespace {
class AsmWriterEmitter {
RecordKeeper &Records;
CodeGenTarget Target;
std::map<const CodeGenInstruction*, AsmWriterInst*> CGIAWIMap;
std::vector<const CodeGenInstruction*> NumberedInstructions;
std::vector<AsmWriterInst> Instructions;
public:
AsmWriterEmitter(RecordKeeper &R);
void run(raw_ostream &o);
private:
void EmitPrintInstruction(raw_ostream &o);
void EmitGetRegisterName(raw_ostream &o);
void EmitPrintAliasInstruction(raw_ostream &O);
AsmWriterInst *getAsmWriterInstByID(unsigned ID) const {
assert(ID < NumberedInstructions.size());
std::map<const CodeGenInstruction*, AsmWriterInst*>::const_iterator I =
CGIAWIMap.find(NumberedInstructions[ID]);
assert(I != CGIAWIMap.end() && "Didn't find inst!");
return I->second;
}
void FindUniqueOperandCommands(std::vector<std::string> &UOC,
std::vector<unsigned> &InstIdxs,
std::vector<unsigned> &InstOpsUsed) const;
};
} // end anonymous namespace
static void PrintCases(std::vector<std::pair<std::string,
AsmWriterOperand> > &OpsToPrint, raw_ostream &O) {
O << " case " << OpsToPrint.back().first << ": ";
AsmWriterOperand TheOp = OpsToPrint.back().second;
OpsToPrint.pop_back();
// Check to see if any other operands are identical in this list, and if so,
// emit a case label for them.
for (unsigned i = OpsToPrint.size(); i != 0; --i)
if (OpsToPrint[i-1].second == TheOp) {
O << "\n case " << OpsToPrint[i-1].first << ": ";
OpsToPrint.erase(OpsToPrint.begin()+i-1);
}
// Finally, emit the code.
O << TheOp.getCode();
O << "break;\n";
}
/// EmitInstructions - Emit the last instruction in the vector and any other
/// instructions that are suitably similar to it.
static void EmitInstructions(std::vector<AsmWriterInst> &Insts,
raw_ostream &O) {
AsmWriterInst FirstInst = Insts.back();
Insts.pop_back();
std::vector<AsmWriterInst> SimilarInsts;
unsigned DifferingOperand = ~0;
for (unsigned i = Insts.size(); i != 0; --i) {
unsigned DiffOp = Insts[i-1].MatchesAllButOneOp(FirstInst);
if (DiffOp != ~1U) {
if (DifferingOperand == ~0U) // First match!
DifferingOperand = DiffOp;
// If this differs in the same operand as the rest of the instructions in
// this class, move it to the SimilarInsts list.
if (DifferingOperand == DiffOp || DiffOp == ~0U) {
SimilarInsts.push_back(Insts[i-1]);
Insts.erase(Insts.begin()+i-1);
}
}
}
O << " case " << FirstInst.CGI->Namespace << "::"
<< FirstInst.CGI->TheDef->getName() << ":\n";
for (unsigned i = 0, e = SimilarInsts.size(); i != e; ++i)
O << " case " << SimilarInsts[i].CGI->Namespace << "::"
<< SimilarInsts[i].CGI->TheDef->getName() << ":\n";
for (unsigned i = 0, e = FirstInst.Operands.size(); i != e; ++i) {
if (i != DifferingOperand) {
// If the operand is the same for all instructions, just print it.
O << " " << FirstInst.Operands[i].getCode();
} else {
// If this is the operand that varies between all of the instructions,
// emit a switch for just this operand now.
O << " switch (MI->getOpcode()) {\n";
std::vector<std::pair<std::string, AsmWriterOperand> > OpsToPrint;
OpsToPrint.push_back(std::make_pair(FirstInst.CGI->Namespace + "::" +
FirstInst.CGI->TheDef->getName(),
FirstInst.Operands[i]));
for (unsigned si = 0, e = SimilarInsts.size(); si != e; ++si) {
AsmWriterInst &AWI = SimilarInsts[si];
OpsToPrint.push_back(std::make_pair(AWI.CGI->Namespace+"::"+
AWI.CGI->TheDef->getName(),
AWI.Operands[i]));
}
std::reverse(OpsToPrint.begin(), OpsToPrint.end());
while (!OpsToPrint.empty())
PrintCases(OpsToPrint, O);
O << " }";
}
O << "\n";
}
O << " break;\n";
}
void AsmWriterEmitter::
FindUniqueOperandCommands(std::vector<std::string> &UniqueOperandCommands,
std::vector<unsigned> &InstIdxs,
std::vector<unsigned> &InstOpsUsed) const {
InstIdxs.assign(NumberedInstructions.size(), ~0U);
// This vector parallels UniqueOperandCommands, keeping track of which
// instructions each case are used for. It is a comma separated string of
// enums.
std::vector<std::string> InstrsForCase;
InstrsForCase.resize(UniqueOperandCommands.size());
InstOpsUsed.assign(UniqueOperandCommands.size(), 0);
for (unsigned i = 0, e = NumberedInstructions.size(); i != e; ++i) {
const AsmWriterInst *Inst = getAsmWriterInstByID(i);
if (Inst == 0) continue; // PHI, INLINEASM, PROLOG_LABEL, etc.
std::string Command;
if (Inst->Operands.empty())
continue; // Instruction already done.
Command = " " + Inst->Operands[0].getCode() + "\n";
// Check to see if we already have 'Command' in UniqueOperandCommands.
// If not, add it.
bool FoundIt = false;
for (unsigned idx = 0, e = UniqueOperandCommands.size(); idx != e; ++idx)
if (UniqueOperandCommands[idx] == Command) {
InstIdxs[i] = idx;
InstrsForCase[idx] += ", ";
InstrsForCase[idx] += Inst->CGI->TheDef->getName();
FoundIt = true;
break;
}
if (!FoundIt) {
InstIdxs[i] = UniqueOperandCommands.size();
UniqueOperandCommands.push_back(Command);
InstrsForCase.push_back(Inst->CGI->TheDef->getName());
// This command matches one operand so far.
InstOpsUsed.push_back(1);
}
}
// For each entry of UniqueOperandCommands, there is a set of instructions
// that uses it. If the next command of all instructions in the set are
// identical, fold it into the command.
for (unsigned CommandIdx = 0, e = UniqueOperandCommands.size();
CommandIdx != e; ++CommandIdx) {
for (unsigned Op = 1; ; ++Op) {
// Scan for the first instruction in the set.
std::vector<unsigned>::iterator NIT =
std::find(InstIdxs.begin(), InstIdxs.end(), CommandIdx);
if (NIT == InstIdxs.end()) break; // No commonality.
// If this instruction has no more operands, we isn't anything to merge
// into this command.
const AsmWriterInst *FirstInst =
getAsmWriterInstByID(NIT-InstIdxs.begin());
if (!FirstInst || FirstInst->Operands.size() == Op)
break;
// Otherwise, scan to see if all of the other instructions in this command
// set share the operand.
bool AllSame = true;
// Keep track of the maximum, number of operands or any
// instruction we see in the group.
size_t MaxSize = FirstInst->Operands.size();
for (NIT = std::find(NIT+1, InstIdxs.end(), CommandIdx);
NIT != InstIdxs.end();
NIT = std::find(NIT+1, InstIdxs.end(), CommandIdx)) {
// Okay, found another instruction in this command set. If the operand
// matches, we're ok, otherwise bail out.
const AsmWriterInst *OtherInst =
getAsmWriterInstByID(NIT-InstIdxs.begin());
if (OtherInst &&
OtherInst->Operands.size() > FirstInst->Operands.size())
MaxSize = std::max(MaxSize, OtherInst->Operands.size());
if (!OtherInst || OtherInst->Operands.size() == Op ||
OtherInst->Operands[Op] != FirstInst->Operands[Op]) {
AllSame = false;
break;
}
}
if (!AllSame) break;
// Okay, everything in this command set has the same next operand. Add it
// to UniqueOperandCommands and remember that it was consumed.
std::string Command = " " + FirstInst->Operands[Op].getCode() + "\n";
UniqueOperandCommands[CommandIdx] += Command;
InstOpsUsed[CommandIdx]++;
}
}
// Prepend some of the instructions each case is used for onto the case val.
for (unsigned i = 0, e = InstrsForCase.size(); i != e; ++i) {
std::string Instrs = InstrsForCase[i];
if (Instrs.size() > 70) {
Instrs.erase(Instrs.begin()+70, Instrs.end());
Instrs += "...";
}
if (!Instrs.empty())
UniqueOperandCommands[i] = " // " + Instrs + "\n" +
UniqueOperandCommands[i];
}
}
static void UnescapeString(std::string &Str) {
for (unsigned i = 0; i != Str.size(); ++i) {
if (Str[i] == '\\' && i != Str.size()-1) {
switch (Str[i+1]) {
default: continue; // Don't execute the code after the switch.
case 'a': Str[i] = '\a'; break;
case 'b': Str[i] = '\b'; break;
case 'e': Str[i] = 27; break;
case 'f': Str[i] = '\f'; break;
case 'n': Str[i] = '\n'; break;
case 'r': Str[i] = '\r'; break;
case 't': Str[i] = '\t'; break;
case 'v': Str[i] = '\v'; break;
case '"': Str[i] = '\"'; break;
case '\'': Str[i] = '\''; break;
case '\\': Str[i] = '\\'; break;
}
// Nuke the second character.
Str.erase(Str.begin()+i+1);
}
}
}
/// EmitPrintInstruction - Generate the code for the "printInstruction" method
/// implementation. Destroys all instances of AsmWriterInst information, by
/// clearing the Instructions vector.
void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) {
Record *AsmWriter = Target.getAsmWriter();
std::string ClassName = AsmWriter->getValueAsString("AsmWriterClassName");
bool isMC = AsmWriter->getValueAsBit("isMCAsmWriter");
const char *MachineInstrClassName = isMC ? "MCInst" : "MachineInstr";
O <<
"/// printInstruction - This method is automatically generated by tablegen\n"
"/// from the instruction set description.\n"
"void " << Target.getName() << ClassName
<< "::printInstruction(const " << MachineInstrClassName
<< " *MI, raw_ostream &O) {\n";
// Build an aggregate string, and build a table of offsets into it.
SequenceToOffsetTable<std::string> StringTable;
/// OpcodeInfo - This encodes the index of the string to use for the first
/// chunk of the output as well as indices used for operand printing.
/// To reduce the number of unhandled cases, we expand the size from 32-bit
/// to 32+16 = 48-bit.
std::vector<uint64_t> OpcodeInfo;
// Add all strings to the string table upfront so it can generate an optimized
// representation.
for (unsigned i = 0, e = NumberedInstructions.size(); i != e; ++i) {
AsmWriterInst *AWI = CGIAWIMap[NumberedInstructions[i]];
if (AWI != 0 &&
AWI->Operands[0].OperandType ==
AsmWriterOperand::isLiteralTextOperand &&
!AWI->Operands[0].Str.empty()) {
std::string Str = AWI->Operands[0].Str;
UnescapeString(Str);
StringTable.add(Str);
}
}
StringTable.layout();
unsigned MaxStringIdx = 0;
for (unsigned i = 0, e = NumberedInstructions.size(); i != e; ++i) {
AsmWriterInst *AWI = CGIAWIMap[NumberedInstructions[i]];
unsigned Idx;
if (AWI == 0) {
// Something not handled by the asmwriter printer.
Idx = ~0U;
} else if (AWI->Operands[0].OperandType !=
AsmWriterOperand::isLiteralTextOperand ||
AWI->Operands[0].Str.empty()) {
// Something handled by the asmwriter printer, but with no leading string.
Idx = StringTable.get("");
} else {
std::string Str = AWI->Operands[0].Str;
UnescapeString(Str);
Idx = StringTable.get(Str);
MaxStringIdx = std::max(MaxStringIdx, Idx);
// Nuke the string from the operand list. It is now handled!
AWI->Operands.erase(AWI->Operands.begin());
}
// Bias offset by one since we want 0 as a sentinel.
OpcodeInfo.push_back(Idx+1);
}
// Figure out how many bits we used for the string index.
unsigned AsmStrBits = Log2_32_Ceil(MaxStringIdx+2);
// To reduce code size, we compactify common instructions into a few bits
// in the opcode-indexed table.
unsigned BitsLeft = 64-AsmStrBits;
std::vector<std::vector<std::string> > TableDrivenOperandPrinters;
while (1) {
std::vector<std::string> UniqueOperandCommands;
std::vector<unsigned> InstIdxs;
std::vector<unsigned> NumInstOpsHandled;
FindUniqueOperandCommands(UniqueOperandCommands, InstIdxs,
NumInstOpsHandled);
// If we ran out of operands to print, we're done.
if (UniqueOperandCommands.empty()) break;
// Compute the number of bits we need to represent these cases, this is
// ceil(log2(numentries)).
unsigned NumBits = Log2_32_Ceil(UniqueOperandCommands.size());
// If we don't have enough bits for this operand, don't include it.
if (NumBits > BitsLeft) {
DEBUG(errs() << "Not enough bits to densely encode " << NumBits
<< " more bits\n");
break;
}
// Otherwise, we can include this in the initial lookup table. Add it in.
for (unsigned i = 0, e = InstIdxs.size(); i != e; ++i)
if (InstIdxs[i] != ~0U) {
OpcodeInfo[i] |= (uint64_t)InstIdxs[i] << (64-BitsLeft);
}
BitsLeft -= NumBits;
// Remove the info about this operand.
for (unsigned i = 0, e = NumberedInstructions.size(); i != e; ++i) {
if (AsmWriterInst *Inst = getAsmWriterInstByID(i))
if (!Inst->Operands.empty()) {
unsigned NumOps = NumInstOpsHandled[InstIdxs[i]];
assert(NumOps <= Inst->Operands.size() &&
"Can't remove this many ops!");
Inst->Operands.erase(Inst->Operands.begin(),
Inst->Operands.begin()+NumOps);
}
}
// Remember the handlers for this set of operands.
TableDrivenOperandPrinters.push_back(UniqueOperandCommands);
}
// We always emit at least one 32-bit table. A second table is emitted if
// more bits are needed.
O<<" static const uint32_t OpInfo[] = {\n";
for (unsigned i = 0, e = NumberedInstructions.size(); i != e; ++i) {
O << " " << (OpcodeInfo[i] & 0xffffffff) << "U,\t// "
<< NumberedInstructions[i]->TheDef->getName() << "\n";
}
// Add a dummy entry so the array init doesn't end with a comma.
O << " 0U\n";
O << " };\n\n";
if (BitsLeft < 32) {
// Add a second OpInfo table only when it is necessary.
// Adjust the type of the second table based on the number of bits needed.
O << " static const uint"
<< ((BitsLeft < 16) ? "32" : (BitsLeft < 24) ? "16" : "8")
<< "_t OpInfo2[] = {\n";
for (unsigned i = 0, e = NumberedInstructions.size(); i != e; ++i) {
O << " " << (OpcodeInfo[i] >> 32) << "U,\t// "
<< NumberedInstructions[i]->TheDef->getName() << "\n";
}
// Add a dummy entry so the array init doesn't end with a comma.
O << " 0U\n";
O << " };\n\n";
}
// Emit the string itself.
O << " const char AsmStrs[] = {\n";
StringTable.emit(O, printChar);
O << " };\n\n";
O << " O << \"\\t\";\n\n";
O << " // Emit the opcode for the instruction.\n";
if (BitsLeft < 32) {
// If we have two tables then we need to perform two lookups and combine
// the results into a single 64-bit value.
O << " uint64_t Bits1 = OpInfo[MI->getOpcode()];\n"
<< " uint64_t Bits2 = OpInfo2[MI->getOpcode()];\n"
<< " uint64_t Bits = (Bits2 << 32) | Bits1;\n";
} else {
// If only one table is used we just need to perform a single lookup.
O << " uint32_t Bits = OpInfo[MI->getOpcode()];\n";
}
O << " assert(Bits != 0 && \"Cannot print this instruction.\");\n"
<< " O << AsmStrs+(Bits & " << (1 << AsmStrBits)-1 << ")-1;\n\n";
// Output the table driven operand information.
BitsLeft = 64-AsmStrBits;
for (unsigned i = 0, e = TableDrivenOperandPrinters.size(); i != e; ++i) {
std::vector<std::string> &Commands = TableDrivenOperandPrinters[i];
// Compute the number of bits we need to represent these cases, this is
// ceil(log2(numentries)).
unsigned NumBits = Log2_32_Ceil(Commands.size());
assert(NumBits <= BitsLeft && "consistency error");
// Emit code to extract this field from Bits.
O << "\n // Fragment " << i << " encoded into " << NumBits
<< " bits for " << Commands.size() << " unique commands.\n";
if (Commands.size() == 2) {
// Emit two possibilitys with if/else.
O << " if ((Bits >> "
<< (64-BitsLeft) << ") & "
<< ((1 << NumBits)-1) << ") {\n"
<< Commands[1]
<< " } else {\n"
<< Commands[0]
<< " }\n\n";
} else if (Commands.size() == 1) {
// Emit a single possibility.
O << Commands[0] << "\n\n";
} else {
O << " switch ((Bits >> "
<< (64-BitsLeft) << ") & "
<< ((1 << NumBits)-1) << ") {\n"
<< " default: // unreachable.\n";
// Print out all the cases.
for (unsigned i = 0, e = Commands.size(); i != e; ++i) {
O << " case " << i << ":\n";
O << Commands[i];
O << " break;\n";
}
O << " }\n\n";
}
BitsLeft -= NumBits;
}
// Okay, delete instructions with no operand info left.
for (unsigned i = 0, e = Instructions.size(); i != e; ++i) {
// Entire instruction has been emitted?
AsmWriterInst &Inst = Instructions[i];
if (Inst.Operands.empty()) {
Instructions.erase(Instructions.begin()+i);
--i; --e;
}
}
// Because this is a vector, we want to emit from the end. Reverse all of the
// elements in the vector.
std::reverse(Instructions.begin(), Instructions.end());
// Now that we've emitted all of the operand info that fit into 32 bits, emit
// information for those instructions that are left. This is a less dense
// encoding, but we expect the main 32-bit table to handle the majority of
// instructions.
if (!Instructions.empty()) {
// Find the opcode # of inline asm.
O << " switch (MI->getOpcode()) {\n";
while (!Instructions.empty())
EmitInstructions(Instructions, O);
O << " }\n";
O << " return;\n";
}
O << "}\n";
}
static void
emitRegisterNameString(raw_ostream &O, StringRef AltName,
const std::vector<CodeGenRegister*> &Registers) {
SequenceToOffsetTable<std::string> StringTable;
SmallVector<std::string, 4> AsmNames(Registers.size());
for (unsigned i = 0, e = Registers.size(); i != e; ++i) {
const CodeGenRegister &Reg = *Registers[i];
std::string &AsmName = AsmNames[i];
// "NoRegAltName" is special. We don't need to do a lookup for that,
// as it's just a reference to the default register name.
if (AltName == "" || AltName == "NoRegAltName") {
AsmName = Reg.TheDef->getValueAsString("AsmName");
if (AsmName.empty())
AsmName = Reg.getName();
} else {
// Make sure the register has an alternate name for this index.
std::vector<Record*> AltNameList =
Reg.TheDef->getValueAsListOfDefs("RegAltNameIndices");
unsigned Idx = 0, e;
for (e = AltNameList.size();
Idx < e && (AltNameList[Idx]->getName() != AltName);
++Idx)
;
// If the register has an alternate name for this index, use it.
// Otherwise, leave it empty as an error flag.
if (Idx < e) {
std::vector<std::string> AltNames =
Reg.TheDef->getValueAsListOfStrings("AltNames");
if (AltNames.size() <= Idx)
PrintFatalError(Reg.TheDef->getLoc(),
(Twine("Register definition missing alt name for '") +
AltName + "'.").str());
AsmName = AltNames[Idx];
}
}
StringTable.add(AsmName);
}
StringTable.layout();
O << " static const char AsmStrs" << AltName << "[] = {\n";
StringTable.emit(O, printChar);
O << " };\n\n";
O << " static const uint32_t RegAsmOffset" << AltName << "[] = {";
for (unsigned i = 0, e = Registers.size(); i != e; ++i) {
if ((i % 14) == 0)
O << "\n ";
O << StringTable.get(AsmNames[i]) << ", ";
}
O << "\n };\n"
<< "\n";
}
void AsmWriterEmitter::EmitGetRegisterName(raw_ostream &O) {
Record *AsmWriter = Target.getAsmWriter();
std::string ClassName = AsmWriter->getValueAsString("AsmWriterClassName");
const std::vector<CodeGenRegister*> &Registers =
Target.getRegBank().getRegisters();
std::vector<Record*> AltNameIndices = Target.getRegAltNameIndices();
bool hasAltNames = AltNameIndices.size() > 1;
O <<
"\n\n/// getRegisterName - This method is automatically generated by tblgen\n"
"/// from the register set description. This returns the assembler name\n"
"/// for the specified register.\n"
"const char *" << Target.getName() << ClassName << "::";
if (hasAltNames)
O << "\ngetRegisterName(unsigned RegNo, unsigned AltIdx) {\n";
else
O << "getRegisterName(unsigned RegNo) {\n";
O << " assert(RegNo && RegNo < " << (Registers.size()+1)
<< " && \"Invalid register number!\");\n"
<< "\n";
if (hasAltNames) {
for (unsigned i = 0, e = AltNameIndices.size(); i < e; ++i)
emitRegisterNameString(O, AltNameIndices[i]->getName(), Registers);
} else
emitRegisterNameString(O, "", Registers);
if (hasAltNames) {
O << " const uint32_t *RegAsmOffset;\n"
<< " const char *AsmStrs;\n"
<< " switch(AltIdx) {\n"
<< " default: llvm_unreachable(\"Invalid register alt name index!\");\n";
for (unsigned i = 0, e = AltNameIndices.size(); i < e; ++i) {
StringRef Namespace = AltNameIndices[1]->getValueAsString("Namespace");
StringRef AltName(AltNameIndices[i]->getName());
O << " case " << Namespace << "::" << AltName
<< ":\n"
<< " AsmStrs = AsmStrs" << AltName << ";\n"
<< " RegAsmOffset = RegAsmOffset" << AltName << ";\n"
<< " break;\n";
}
O << "}\n";
}
O << " assert (*(AsmStrs+RegAsmOffset[RegNo-1]) &&\n"
<< " \"Invalid alt name index for register!\");\n"
<< " return AsmStrs+RegAsmOffset[RegNo-1];\n"
<< "}\n";
}
namespace {
// IAPrinter - Holds information about an InstAlias. Two InstAliases match if
// they both have the same conditionals. In which case, we cannot print out the
// alias for that pattern.
class IAPrinter {
std::vector<std::string> Conds;
std::map<StringRef, unsigned> OpMap;
std::string Result;
std::string AsmString;
SmallVector<Record*, 4> ReqFeatures;
public:
IAPrinter(std::string R, std::string AS)
: Result(R), AsmString(AS) {}
void addCond(const std::string &C) { Conds.push_back(C); }
void addOperand(StringRef Op, unsigned Idx) {
assert(Idx < 0xFF && "Index too large!");
OpMap[Op] = Idx;
}
unsigned getOpIndex(StringRef Op) { return OpMap[Op]; }
bool isOpMapped(StringRef Op) { return OpMap.find(Op) != OpMap.end(); }
void print(raw_ostream &O) {
if (Conds.empty() && ReqFeatures.empty()) {
O.indent(6) << "return true;\n";
return;
}
O << "if (";
for (std::vector<std::string>::iterator
I = Conds.begin(), E = Conds.end(); I != E; ++I) {
if (I != Conds.begin()) {
O << " &&\n";
O.indent(8);
}
O << *I;
}
O << ") {\n";
O.indent(6) << "// " << Result << "\n";
// Directly mangle mapped operands into the string. Each operand is
// identified by a '$' sign followed by a byte identifying the number of the
// operand. We add one to the index to avoid zero bytes.
std::pair<StringRef, StringRef> ASM = StringRef(AsmString).split(' ');
SmallString<128> OutString = ASM.first;
if (!ASM.second.empty()) {
raw_svector_ostream OS(OutString);
OS << ' ';
for (StringRef::iterator I = ASM.second.begin(), E = ASM.second.end();
I != E;) {
OS << *I;
if (*I == '$') {
StringRef::iterator Start = ++I;
while (I != E &&
((*I >= 'a' && *I <= 'z') || (*I >= 'A' && *I <= 'Z') ||
(*I >= '0' && *I <= '9') || *I == '_'))
++I;
StringRef Name(Start, I - Start);
assert(isOpMapped(Name) && "Unmapped operand!");
OS << format("\\x%02X", (unsigned char)getOpIndex(Name) + 1);
} else {
++I;
}
}
}
// Emit the string.
O.indent(6) << "AsmString = \"" << OutString.str() << "\";\n";
O.indent(6) << "break;\n";
O.indent(4) << '}';
}
bool operator==(const IAPrinter &RHS) {
if (Conds.size() != RHS.Conds.size())
return false;
unsigned Idx = 0;
for (std::vector<std::string>::iterator
I = Conds.begin(), E = Conds.end(); I != E; ++I)
if (*I != RHS.Conds[Idx++])
return false;
return true;
}
bool operator()(const IAPrinter &RHS) {
if (Conds.size() < RHS.Conds.size())
return true;
unsigned Idx = 0;
for (std::vector<std::string>::iterator
I = Conds.begin(), E = Conds.end(); I != E; ++I)
if (*I != RHS.Conds[Idx++])
return *I < RHS.Conds[Idx++];
return false;
}
};
} // end anonymous namespace
static unsigned CountNumOperands(StringRef AsmString) {
unsigned NumOps = 0;
std::pair<StringRef, StringRef> ASM = AsmString.split(' ');
while (!ASM.second.empty()) {
++NumOps;
ASM = ASM.second.split(' ');
}
return NumOps;
}
static unsigned CountResultNumOperands(StringRef AsmString) {
unsigned NumOps = 0;
std::pair<StringRef, StringRef> ASM = AsmString.split('\t');
if (!ASM.second.empty()) {
size_t I = ASM.second.find('{');
StringRef Str = ASM.second;
if (I != StringRef::npos)
Str = ASM.second.substr(I, ASM.second.find('|', I));
ASM = Str.split(' ');
do {
++NumOps;
ASM = ASM.second.split(' ');
} while (!ASM.second.empty());
}
return NumOps;
}
void AsmWriterEmitter::EmitPrintAliasInstruction(raw_ostream &O) {
Record *AsmWriter = Target.getAsmWriter();
if (!AsmWriter->getValueAsBit("isMCAsmWriter"))
return;
O << "\n#ifdef PRINT_ALIAS_INSTR\n";
O << "#undef PRINT_ALIAS_INSTR\n\n";
// Emit the method that prints the alias instruction.
std::string ClassName = AsmWriter->getValueAsString("AsmWriterClassName");
std::vector<Record*> AllInstAliases =
Records.getAllDerivedDefinitions("InstAlias");
// Create a map from the qualified name to a list of potential matches.
std::map<std::string, std::vector<CodeGenInstAlias*> > AliasMap;
for (std::vector<Record*>::iterator
I = AllInstAliases.begin(), E = AllInstAliases.end(); I != E; ++I) {
CodeGenInstAlias *Alias = new CodeGenInstAlias(*I, Target);
const Record *R = *I;
if (!R->getValueAsBit("EmitAlias"))
continue; // We were told not to emit the alias, but to emit the aliasee.
const DagInit *DI = R->getValueAsDag("ResultInst");
const DefInit *Op = cast<DefInit>(DI->getOperator());
AliasMap[getQualifiedName(Op->getDef())].push_back(Alias);
}
// A map of which conditions need to be met for each instruction operand
// before it can be matched to the mnemonic.
std::map<std::string, std::vector<IAPrinter*> > IAPrinterMap;
for (std::map<std::string, std::vector<CodeGenInstAlias*> >::iterator
I = AliasMap.begin(), E = AliasMap.end(); I != E; ++I) {
std::vector<CodeGenInstAlias*> &Aliases = I->second;
for (std::vector<CodeGenInstAlias*>::iterator
II = Aliases.begin(), IE = Aliases.end(); II != IE; ++II) {
const CodeGenInstAlias *CGA = *II;
unsigned LastOpNo = CGA->ResultInstOperandIndex.size();
unsigned NumResultOps =
CountResultNumOperands(CGA->ResultInst->AsmString);
// Don't emit the alias if it has more operands than what it's aliasing.
if (NumResultOps < CountNumOperands(CGA->AsmString))
continue;
IAPrinter *IAP = new IAPrinter(CGA->Result->getAsString(),
CGA->AsmString);
std::string Cond;
Cond = std::string("MI->getNumOperands() == ") + llvm::utostr(LastOpNo);
IAP->addCond(Cond);
bool CantHandle = false;
for (unsigned i = 0, e = LastOpNo; i != e; ++i) {
const CodeGenInstAlias::ResultOperand &RO = CGA->ResultOperands[i];
switch (RO.Kind) {
case CodeGenInstAlias::ResultOperand::K_Record: {
const Record *Rec = RO.getRecord();
StringRef ROName = RO.getName();
if (Rec->isSubClassOf("RegisterOperand"))
Rec = Rec->getValueAsDef("RegClass");
if (Rec->isSubClassOf("RegisterClass")) {
Cond = std::string("MI->getOperand(")+llvm::utostr(i)+").isReg()";
IAP->addCond(Cond);
if (!IAP->isOpMapped(ROName)) {
IAP->addOperand(ROName, i);
Record *R = CGA->ResultOperands[i].getRecord();
if (R->isSubClassOf("RegisterOperand"))
R = R->getValueAsDef("RegClass");
Cond = std::string("MRI.getRegClass(") + Target.getName() + "::" +
R->getName() + "RegClassID)"
".contains(MI->getOperand(" + llvm::utostr(i) + ").getReg())";
IAP->addCond(Cond);
} else {
Cond = std::string("MI->getOperand(") +
llvm::utostr(i) + ").getReg() == MI->getOperand(" +
llvm::utostr(IAP->getOpIndex(ROName)) + ").getReg()";
IAP->addCond(Cond);
}
} else {
assert(Rec->isSubClassOf("Operand") && "Unexpected operand!");
// FIXME: We may need to handle these situations.
delete IAP;
IAP = 0;
CantHandle = true;
break;
}
break;
}
case CodeGenInstAlias::ResultOperand::K_Imm: {
std::string Op = "MI->getOperand(" + llvm::utostr(i) + ")";
// Just because the alias has an immediate result, doesn't mean the
// MCInst will. An MCExpr could be present, for example.
IAP->addCond(Op + ".isImm()");
Cond = Op + ".getImm() == "
+ llvm::utostr(CGA->ResultOperands[i].getImm());
IAP->addCond(Cond);
break;
}
case CodeGenInstAlias::ResultOperand::K_Reg:
// If this is zero_reg, something's playing tricks we're not
// equipped to handle.
if (!CGA->ResultOperands[i].getRegister()) {
CantHandle = true;
break;
}
Cond = std::string("MI->getOperand(") +
llvm::utostr(i) + ").getReg() == " + Target.getName() +
"::" + CGA->ResultOperands[i].getRegister()->getName();
IAP->addCond(Cond);
break;
}
if (!IAP) break;
}
if (CantHandle) continue;
IAPrinterMap[I->first].push_back(IAP);
}
}
std::string Header;
raw_string_ostream HeaderO(Header);
HeaderO << "bool " << Target.getName() << ClassName
<< "::printAliasInstr(const MCInst"
<< " *MI, raw_ostream &OS) {\n";
std::string Cases;
raw_string_ostream CasesO(Cases);
for (std::map<std::string, std::vector<IAPrinter*> >::iterator
I = IAPrinterMap.begin(), E = IAPrinterMap.end(); I != E; ++I) {
std::vector<IAPrinter*> &IAPs = I->second;
std::vector<IAPrinter*> UniqueIAPs;
for (std::vector<IAPrinter*>::iterator
II = IAPs.begin(), IE = IAPs.end(); II != IE; ++II) {
IAPrinter *LHS = *II;
bool IsDup = false;
for (std::vector<IAPrinter*>::iterator
III = IAPs.begin(), IIE = IAPs.end(); III != IIE; ++III) {
IAPrinter *RHS = *III;
if (LHS != RHS && *LHS == *RHS) {
IsDup = true;
break;
}
}
if (!IsDup) UniqueIAPs.push_back(LHS);
}
if (UniqueIAPs.empty()) continue;
CasesO.indent(2) << "case " << I->first << ":\n";
for (std::vector<IAPrinter*>::iterator
II = UniqueIAPs.begin(), IE = UniqueIAPs.end(); II != IE; ++II) {
IAPrinter *IAP = *II;
CasesO.indent(4);
IAP->print(CasesO);
CasesO << '\n';
}
CasesO.indent(4) << "return false;\n";
}
if (CasesO.str().empty()) {
O << HeaderO.str();
O << " return false;\n";
O << "}\n\n";
O << "#endif // PRINT_ALIAS_INSTR\n";
return;
}
O << HeaderO.str();
O.indent(2) << "const char *AsmString;\n";
O.indent(2) << "switch (MI->getOpcode()) {\n";
O.indent(2) << "default: return false;\n";
O << CasesO.str();
O.indent(2) << "}\n\n";
// Code that prints the alias, replacing the operands with the ones from the
// MCInst.
O << " unsigned I = 0;\n";
O << " while (AsmString[I] != ' ' && AsmString[I] != '\\0')\n";
O << " ++I;\n";
O << " OS << '\\t' << StringRef(AsmString, I);\n";
O << " if (AsmString[I] != '\\0') {\n";
O << " OS << '\\t';\n";
O << " do {\n";
O << " if (AsmString[I] == '$') {\n";
O << " ++I;\n";
O << " printOperand(MI, unsigned(AsmString[I++]) - 1, OS);\n";
O << " } else {\n";
O << " OS << AsmString[I++];\n";
O << " }\n";
O << " } while (AsmString[I] != '\\0');\n";
O << " }\n\n";
O << " return true;\n";
O << "}\n\n";
O << "#endif // PRINT_ALIAS_INSTR\n";
}
AsmWriterEmitter::AsmWriterEmitter(RecordKeeper &R) : Records(R), Target(R) {
Record *AsmWriter = Target.getAsmWriter();
for (CodeGenTarget::inst_iterator I = Target.inst_begin(),
E = Target.inst_end();
I != E; ++I)
if (!(*I)->AsmString.empty() && (*I)->TheDef->getName() != "PHI")
Instructions.push_back(
AsmWriterInst(**I, AsmWriter->getValueAsInt("Variant"),
AsmWriter->getValueAsInt("FirstOperandColumn"),
AsmWriter->getValueAsInt("OperandSpacing")));
// Get the instruction numbering.
NumberedInstructions = Target.getInstructionsByEnumValue();
// Compute the CodeGenInstruction -> AsmWriterInst mapping. Note that not
// all machine instructions are necessarily being printed, so there may be
// target instructions not in this map.
for (unsigned i = 0, e = Instructions.size(); i != e; ++i)
CGIAWIMap.insert(std::make_pair(Instructions[i].CGI, &Instructions[i]));
}
void AsmWriterEmitter::run(raw_ostream &O) {
EmitPrintInstruction(O);
EmitGetRegisterName(O);
EmitPrintAliasInstruction(O);
}
namespace llvm {
void EmitAsmWriter(RecordKeeper &RK, raw_ostream &OS) {
emitSourceFileHeader("Assembly Writer Source Fragment", OS);
AsmWriterEmitter(RK).run(OS);
}
} // End llvm namespace
| gpl-2.0 |
jadref/buffer_bci | java/SignalViewer/src/org/jfree/chart/renderer/xy/CyclicXYItemRenderer.java | 17319 | /* ===========================================================
* JFreeChart : a free chart library for the Java(tm) platform
* ===========================================================
*
* (C) Copyright 2000-2013, by Object Refinery Limited and Contributors.
*
* Project Info: http://www.jfree.org/jfreechart/index.html
*
* This library is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* This library is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
* License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
* USA.
*
* [Oracle and Java are registered trademarks of Oracle and/or its affiliates.
* Other names may be trademarks of their respective owners.]
*
* ---------------------------
* CyclicXYItemRenderer.java
* ---------------------------
* (C) Copyright 2003-2008, by Nicolas Brodu and Contributors.
*
* Original Author: Nicolas Brodu;
* Contributor(s): David Gilbert (for Object Refinery Limited);
*
* Changes
* -------
* 19-Nov-2003 : Initial import to JFreeChart from the JSynoptic project (NB);
* 23-Dec-2003 : Added missing Javadocs (DG);
* 25-Feb-2004 : Replaced CrosshairInfo with CrosshairState (DG);
* 15-Jul-2004 : Switched getX() with getXValue() and getY() with
* getYValue() (DG);
* ------------- JFREECHART 1.0.0 ---------------------------------------------
* 06-Jul-2006 : Modified to call only dataset methods that return double
* primitives (DG);
*
*/
package org.jfree.chart.renderer.xy;
import java.awt.Graphics2D;
import java.awt.geom.Rectangle2D;
import java.io.Serializable;
import org.jfree.chart.axis.CyclicNumberAxis;
import org.jfree.chart.axis.ValueAxis;
import org.jfree.chart.labels.XYToolTipGenerator;
import org.jfree.chart.plot.CrosshairState;
import org.jfree.chart.plot.PlotRenderingInfo;
import org.jfree.chart.plot.XYPlot;
import org.jfree.chart.urls.XYURLGenerator;
import org.jfree.data.DomainOrder;
import org.jfree.data.general.DatasetChangeListener;
import org.jfree.data.general.DatasetGroup;
import org.jfree.data.xy.XYDataset;
/**
* The Cyclic XY item renderer is specially designed to handle cyclic axis.
* While the standard renderer would draw a line across the plot when a cycling
* occurs, the cyclic renderer splits the line at each cycle end instead. This
* is done by interpolating new points at cycle boundary. Thus, correct
* appearance is restored.
*
* The Cyclic XY item renderer works exactly like a standard XY item renderer
* with non-cyclic axis.
*/
public class CyclicXYItemRenderer extends StandardXYItemRenderer
implements Serializable {
/** For serialization. */
private static final long serialVersionUID = 4035912243303764892L;
/**
* Default constructor.
*/
public CyclicXYItemRenderer() {
super();
}
/**
* Creates a new renderer.
*
* @param type the renderer type.
*/
public CyclicXYItemRenderer(int type) {
super(type);
}
/**
* Creates a new renderer.
*
* @param type the renderer type.
* @param labelGenerator the tooltip generator.
*/
public CyclicXYItemRenderer(int type, XYToolTipGenerator labelGenerator) {
super(type, labelGenerator);
}
/**
* Creates a new renderer.
*
* @param type the renderer type.
* @param labelGenerator the tooltip generator.
* @param urlGenerator the url generator.
*/
public CyclicXYItemRenderer(int type,
XYToolTipGenerator labelGenerator,
XYURLGenerator urlGenerator) {
super(type, labelGenerator, urlGenerator);
}
/**
* Draws the visual representation of a single data item.
* When using cyclic axis, do not draw a line from right to left when
* cycling as would a standard XY item renderer, but instead draw a line
* from the previous point to the cycle bound in the last cycle, and a line
* from the cycle bound to current point in the current cycle.
*
* @param g2 the graphics device.
* @param state the renderer state.
* @param dataArea the data area.
* @param info the plot rendering info.
* @param plot the plot.
* @param domainAxis the domain axis.
* @param rangeAxis the range axis.
* @param dataset the dataset.
* @param series the series index.
* @param item the item index.
* @param crosshairState crosshair information for the plot
* (<code>null</code> permitted).
* @param pass the current pass index.
*/
@Override
public void drawItem(Graphics2D g2, XYItemRendererState state,
Rectangle2D dataArea, PlotRenderingInfo info, XYPlot plot,
ValueAxis domainAxis, ValueAxis rangeAxis, XYDataset dataset,
int series, int item, CrosshairState crosshairState, int pass) {
if ((!getPlotLines()) || ((!(domainAxis instanceof CyclicNumberAxis))
&& (!(rangeAxis instanceof CyclicNumberAxis))) || (item <= 0)) {
super.drawItem(g2, state, dataArea, info, plot, domainAxis,
rangeAxis, dataset, series, item, crosshairState, pass);
return;
}
// get the previous data point...
double xn = dataset.getXValue(series, item - 1);
double yn = dataset.getYValue(series, item - 1);
// If null, don't draw line => then delegate to parent
if (Double.isNaN(yn)) {
super.drawItem(g2, state, dataArea, info, plot, domainAxis,
rangeAxis, dataset, series, item, crosshairState, pass);
return;
}
double[] x = new double[2];
double[] y = new double[2];
x[0] = xn;
y[0] = yn;
// get the data point...
xn = dataset.getXValue(series, item);
yn = dataset.getYValue(series, item);
// If null, don't draw line at all
if (Double.isNaN(yn)) {
return;
}
x[1] = xn;
y[1] = yn;
// Now split the segment as needed
double xcycleBound = Double.NaN;
double ycycleBound = Double.NaN;
boolean xBoundMapping = false, yBoundMapping = false;
CyclicNumberAxis cnax = null, cnay = null;
if (domainAxis instanceof CyclicNumberAxis) {
cnax = (CyclicNumberAxis) domainAxis;
xcycleBound = cnax.getCycleBound();
xBoundMapping = cnax.isBoundMappedToLastCycle();
// If the segment must be splitted, insert a new point
// Strict test forces to have real segments (not 2 equal points)
// and avoids division by 0
if ((x[0] != x[1])
&& ((xcycleBound >= x[0])
&& (xcycleBound <= x[1])
|| (xcycleBound >= x[1])
&& (xcycleBound <= x[0]))) {
double[] nx = new double[3];
double[] ny = new double[3];
nx[0] = x[0]; nx[2] = x[1]; ny[0] = y[0]; ny[2] = y[1];
nx[1] = xcycleBound;
ny[1] = (y[1] - y[0]) * (xcycleBound - x[0])
/ (x[1] - x[0]) + y[0];
x = nx; y = ny;
}
}
if (rangeAxis instanceof CyclicNumberAxis) {
cnay = (CyclicNumberAxis) rangeAxis;
ycycleBound = cnay.getCycleBound();
yBoundMapping = cnay.isBoundMappedToLastCycle();
// The split may occur in either x splitted segments, if any, but
// not in both
if ((y[0] != y[1]) && ((ycycleBound >= y[0])
&& (ycycleBound <= y[1])
|| (ycycleBound >= y[1]) && (ycycleBound <= y[0]))) {
double[] nx = new double[x.length + 1];
double[] ny = new double[y.length + 1];
nx[0] = x[0]; nx[2] = x[1]; ny[0] = y[0]; ny[2] = y[1];
ny[1] = ycycleBound;
nx[1] = (x[1] - x[0]) * (ycycleBound - y[0])
/ (y[1] - y[0]) + x[0];
if (x.length == 3) {
nx[3] = x[2]; ny[3] = y[2];
}
x = nx; y = ny;
}
else if ((x.length == 3) && (y[1] != y[2]) && ((ycycleBound >= y[1])
&& (ycycleBound <= y[2])
|| (ycycleBound >= y[2]) && (ycycleBound <= y[1]))) {
double[] nx = new double[4];
double[] ny = new double[4];
nx[0] = x[0]; nx[1] = x[1]; nx[3] = x[2];
ny[0] = y[0]; ny[1] = y[1]; ny[3] = y[2];
ny[2] = ycycleBound;
nx[2] = (x[2] - x[1]) * (ycycleBound - y[1])
/ (y[2] - y[1]) + x[1];
x = nx; y = ny;
}
}
// If the line is not wrapping, then parent is OK
if (x.length == 2) {
super.drawItem(g2, state, dataArea, info, plot, domainAxis,
rangeAxis, dataset, series, item, crosshairState, pass);
return;
}
OverwriteDataSet newset = new OverwriteDataSet(x, y, dataset);
if (cnax != null) {
if (xcycleBound == x[0]) {
cnax.setBoundMappedToLastCycle(x[1] <= xcycleBound);
}
if (xcycleBound == x[1]) {
cnax.setBoundMappedToLastCycle(x[0] <= xcycleBound);
}
}
if (cnay != null) {
if (ycycleBound == y[0]) {
cnay.setBoundMappedToLastCycle(y[1] <= ycycleBound);
}
if (ycycleBound == y[1]) {
cnay.setBoundMappedToLastCycle(y[0] <= ycycleBound);
}
}
super.drawItem(
g2, state, dataArea, info, plot, domainAxis, rangeAxis,
newset, series, 1, crosshairState, pass
);
if (cnax != null) {
if (xcycleBound == x[1]) {
cnax.setBoundMappedToLastCycle(x[2] <= xcycleBound);
}
if (xcycleBound == x[2]) {
cnax.setBoundMappedToLastCycle(x[1] <= xcycleBound);
}
}
if (cnay != null) {
if (ycycleBound == y[1]) {
cnay.setBoundMappedToLastCycle(y[2] <= ycycleBound);
}
if (ycycleBound == y[2]) {
cnay.setBoundMappedToLastCycle(y[1] <= ycycleBound);
}
}
super.drawItem(g2, state, dataArea, info, plot, domainAxis, rangeAxis,
newset, series, 2, crosshairState, pass);
if (x.length == 4) {
if (cnax != null) {
if (xcycleBound == x[2]) {
cnax.setBoundMappedToLastCycle(x[3] <= xcycleBound);
}
if (xcycleBound == x[3]) {
cnax.setBoundMappedToLastCycle(x[2] <= xcycleBound);
}
}
if (cnay != null) {
if (ycycleBound == y[2]) {
cnay.setBoundMappedToLastCycle(y[3] <= ycycleBound);
}
if (ycycleBound == y[3]) {
cnay.setBoundMappedToLastCycle(y[2] <= ycycleBound);
}
}
super.drawItem(g2, state, dataArea, info, plot, domainAxis,
rangeAxis, newset, series, 3, crosshairState, pass);
}
if (cnax != null) {
cnax.setBoundMappedToLastCycle(xBoundMapping);
}
if (cnay != null) {
cnay.setBoundMappedToLastCycle(yBoundMapping);
}
}
/**
* A dataset to hold the interpolated points when drawing new lines.
*/
protected static class OverwriteDataSet implements XYDataset {
/** The delegate dataset. */
protected XYDataset delegateSet;
/** Storage for the x and y values. */
Double[] x, y;
/**
* Creates a new dataset.
*
* @param x the x values.
* @param y the y values.
* @param delegateSet the dataset.
*/
public OverwriteDataSet(double [] x, double[] y,
XYDataset delegateSet) {
this.delegateSet = delegateSet;
this.x = new Double[x.length]; this.y = new Double[y.length];
for (int i = 0; i < x.length; ++i) {
this.x[i] = new Double(x[i]);
this.y[i] = new Double(y[i]);
}
}
/**
* Returns the order of the domain (X) values.
*
* @return The domain order.
*/
@Override
public DomainOrder getDomainOrder() {
return DomainOrder.NONE;
}
/**
* Returns the number of items for the given series.
*
* @param series the series index (zero-based).
*
* @return The item count.
*/
@Override
public int getItemCount(int series) {
return this.x.length;
}
/**
* Returns the x-value.
*
* @param series the series index (zero-based).
* @param item the item index (zero-based).
*
* @return The x-value.
*/
@Override
public Number getX(int series, int item) {
return this.x[item];
}
/**
* Returns the x-value (as a double primitive) for an item within a
* series.
*
* @param series the series (zero-based index).
* @param item the item (zero-based index).
*
* @return The x-value.
*/
@Override
public double getXValue(int series, int item) {
double result = Double.NaN;
Number xx = getX(series, item);
if (xx != null) {
result = xx.doubleValue();
}
return result;
}
/**
* Returns the y-value.
*
* @param series the series index (zero-based).
* @param item the item index (zero-based).
*
* @return The y-value.
*/
@Override
public Number getY(int series, int item) {
return this.y[item];
}
/**
* Returns the y-value (as a double primitive) for an item within a
* series.
*
* @param series the series (zero-based index).
* @param item the item (zero-based index).
*
* @return The y-value.
*/
@Override
public double getYValue(int series, int item) {
double result = Double.NaN;
Number yy = getY(series, item);
if (yy != null) {
result = yy.doubleValue();
}
return result;
}
/**
* Returns the number of series in the dataset.
*
* @return The series count.
*/
@Override
public int getSeriesCount() {
return this.delegateSet.getSeriesCount();
}
/**
* Returns the name of the given series.
*
* @param series the series index (zero-based).
*
* @return The series name.
*/
@Override
public Comparable getSeriesKey(int series) {
return this.delegateSet.getSeriesKey(series);
}
/**
* Returns the index of the named series, or -1.
*
* @param seriesName the series name.
*
* @return The index.
*/
@Override
public int indexOf(Comparable seriesName) {
return this.delegateSet.indexOf(seriesName);
}
/**
* Does nothing.
*
* @param listener ignored.
*/
@Override
public void addChangeListener(DatasetChangeListener listener) {
// unused in parent
}
/**
* Does nothing.
*
* @param listener ignored.
*/
@Override
public void removeChangeListener(DatasetChangeListener listener) {
// unused in parent
}
/**
* Returns the dataset group.
*
* @return The dataset group.
*/
@Override
public DatasetGroup getGroup() {
// unused but must return something, so while we are at it...
return this.delegateSet.getGroup();
}
/**
* Does nothing.
*
* @param group ignored.
*/
@Override
public void setGroup(DatasetGroup group) {
// unused in parent
}
}
}
| gpl-3.0 |
Dynamic-Business/moodle | mod/forum/classes/event/subscription_deleted.php | 3330 | <?php
// This file is part of Moodle - http://moodle.org/
//
// Moodle is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Moodle is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Moodle. If not, see <http://www.gnu.org/licenses/>.
/**
* The mod_forum subscription deleted event.
*
* @package mod_forum
* @copyright 2014 Dan Poltawski <[email protected]>
* @license http://www.gnu.org/copyleft/gpl.html GNU GPL v3 or later
*/
namespace mod_forum\event;
defined('MOODLE_INTERNAL') || die();
/**
* The mod_forum subscription deleted event class.
*
* @property-read array $other {
* Extra information about the event.
*
* - int forumid: The id of the forum which has been unsusbcribed from.
* }
*
* @package mod_forum
* @since Moodle 2.7
* @copyright 2014 Dan Poltawski <[email protected]>
* @license http://www.gnu.org/copyleft/gpl.html GNU GPL v3 or later
*/
class subscription_deleted extends \core\event\base {
/**
* Init method.
*
* @return void
*/
protected function init() {
$this->data['crud'] = 'd';
$this->data['edulevel'] = self::LEVEL_OTHER;
$this->data['objecttable'] = 'forum_subscriptions';
}
/**
* Returns description of what happened.
*
* @return string
*/
public function get_description() {
return "The user with id '$this->userid' unsubscribed the user with id '$this->relateduserid' to the forum with " .
"course module id '$this->contextinstanceid'.";
}
/**
* Return localised event name.
*
* @return string
*/
public static function get_name() {
return get_string('eventsubscriptiondeleted', 'mod_forum');
}
/**
* Get URL related to the action
*
* @return \moodle_url
*/
public function get_url() {
return new \moodle_url('/mod/forum/subscribers.php', array('id' => $this->other['forumid']));
}
/**
* Return the legacy event log data.
*
* @return array|null
*/
protected function get_legacy_logdata() {
return array($this->courseid, 'forum', 'unsubscribe', 'view.php?f=' . $this->other['forumid'],
$this->other['forumid'], $this->contextinstanceid);
}
/**
* Custom validation.
*
* @throws \coding_exception
* @return void
*/
protected function validate_data() {
parent::validate_data();
if (!isset($this->relateduserid)) {
throw new \coding_exception('The \'relateduserid\' must be set.');
}
if (!isset($this->other['forumid'])) {
throw new \coding_exception('The \'forumid\' value must be set in other.');
}
if ($this->contextlevel != CONTEXT_MODULE) {
throw new \coding_exception('Context level must be CONTEXT_MODULE.');
}
}
}
| gpl-3.0 |
sameesh/Myrepos | Ris/Application/Common/Manifest/RisServerManifestVerification.cs | 2186 | #region License
// Copyright (c) 2013, ClearCanvas Inc.
// All rights reserved.
// http://www.clearcanvas.ca
//
// This file is part of the ClearCanvas RIS/PACS open source project.
//
// The ClearCanvas RIS/PACS open source project is free software: you can
// redistribute it and/or modify it under the terms of the GNU General Public
// License as published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// The ClearCanvas RIS/PACS open source project is distributed in the hope that it
// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
// Public License for more details.
//
// You should have received a copy of the GNU General Public License along with
// the ClearCanvas RIS/PACS open source project. If not, see
// <http://www.gnu.org/licenses/>.
#endregion
using System;
using ClearCanvas.Common;
namespace ClearCanvas.Ris.Application.Common.Manifest
{
public static class RisServerManifestVerification
{
private static readonly object _syncRoot = new object();
private static int _lastCheck;
private static bool? _lastValid;
public static bool Valid
{
get
{
const int cacheTimeout = 90000;
if (_lastValid.HasValue && Math.Abs(Environment.TickCount - _lastCheck) < cacheTimeout) return _lastValid.Value;
lock (_syncRoot)
{
if (!(_lastValid.HasValue && Math.Abs(Environment.TickCount - _lastCheck) < cacheTimeout))
{
_lastValid = Check();
_lastCheck = Environment.TickCount;
}
return _lastValid.GetValueOrDefault(true); // just return true if server status unknown, since there will likely be other (more pressing) issues
}
}
}
private static bool? Check()
{
bool? valid = null;
try
{
Platform.GetService<IRisManifestService>(s => valid = s.GetStatus(new GetStatusRequest()).IsValid);
}
catch (Exception)
{
// if the manifest service errors out, eat the exception and
valid = null;
}
return valid;
}
}
} | gpl-3.0 |
LyndonMarques/devdollibar | htdocs/includes/ckeditor/_source/plugins/format/lang/ka.js | 701 | /*
Copyright (c) 2003-2014, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or http://ckeditor.com/license
*/
CKEDITOR.plugins.setLang( 'format', 'ka', {
label: 'ფიორმატირება',
panelTitle: 'ფორმატირება',
tag_address: 'მისამართი',
tag_div: 'ჩვეულებრივი (DIV)',
tag_h1: 'სათაური 1',
tag_h2: 'სათაური 2',
tag_h3: 'სათაური 3',
tag_h4: 'სათაური 4',
tag_h5: 'სათაური 5',
tag_h6: 'სათაური 6',
tag_p: 'ჩვეულებრივი',
tag_pre: 'ფორმატირებული'
} );
| gpl-3.0 |
LyndonMarques/devdollibar | htdocs/includes/ckeditor/_source/plugins/forms/lang/es.js | 1617 | /*
Copyright (c) 2003-2014, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or http://ckeditor.com/license
*/
CKEDITOR.plugins.setLang( 'forms', 'es', {
button: {
title: 'Propiedades de Botón',
text: 'Texto (Valor)',
type: 'Tipo',
typeBtn: 'Boton',
typeSbm: 'Enviar',
typeRst: 'Reestablecer'
},
checkboxAndRadio: {
checkboxTitle: 'Propiedades de Casilla',
radioTitle: 'Propiedades de Botón de Radio',
value: 'Valor',
selected: 'Seleccionado'
},
form: {
title: 'Propiedades de Formulario',
menu: 'Propiedades de Formulario',
action: 'Acción',
method: 'Método',
encoding: 'Codificación'
},
hidden: {
title: 'Propiedades de Campo Oculto',
name: 'Nombre',
value: 'Valor'
},
select: {
title: 'Propiedades de Campo de Selección',
selectInfo: 'Información',
opAvail: 'Opciones disponibles',
value: 'Valor',
size: 'Tamaño',
lines: 'Lineas',
chkMulti: 'Permitir múltiple selección',
opText: 'Texto',
opValue: 'Valor',
btnAdd: 'Agregar',
btnModify: 'Modificar',
btnUp: 'Subir',
btnDown: 'Bajar',
btnSetValue: 'Establecer como predeterminado',
btnDelete: 'Eliminar'
},
textarea: {
title: 'Propiedades de Area de Texto',
cols: 'Columnas',
rows: 'Filas'
},
textfield: {
title: 'Propiedades de Campo de Texto',
name: 'Nombre',
value: 'Valor',
charWidth: 'Caracteres de ancho',
maxChars: 'Máximo caracteres',
type: 'Tipo',
typeText: 'Texto',
typePass: 'Contraseña',
typeEmail: 'Correo electrónico',
typeSearch: 'Buscar',
typeTel: 'Número de teléfono',
typeUrl: 'URL'
}
} );
| gpl-3.0 |
aaujon/dolibarr | htdocs/includes/ckeditor/_source/plugins/div/lang/gu.js | 936 | /*
Copyright (c) 2003-2014, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or http://ckeditor.com/license
*/
CKEDITOR.plugins.setLang( 'div', 'gu', {
IdInputLabel: 'Id',
advisoryTitleInputLabel: 'એડવાઈઝર શીર્ષક',
cssClassInputLabel: 'સ્ટાઈલશીટ કલાસીસ',
edit: 'ડીવીમાં ફેરફાર કરવો',
inlineStyleInputLabel: 'ઈનલાઈન પદ્ધતિ',
langDirLTRLabel: 'ડાબે થી જમણે (LTR)',
langDirLabel: 'ભાષાની દિશા',
langDirRTLLabel: 'જમણે થી ડાબે (RTL)',
languageCodeInputLabel: 'ભાષાનો કોડ',
remove: 'ડીવી કાઢી કાઢવું',
styleSelectLabel: 'સ્ટાઈલ',
title: 'Div કન્ટેનર બનાવુંવું',
toolbar: 'Div કન્ટેનર બનાવુંવું'
} );
| gpl-3.0 |
martinep/OpenFOAM-2.2.X | src/lagrangian/distributionModels/distributionModel/distributionModel.C | 3729 | /*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "distributionModel.H"
// * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
namespace Foam
{
namespace distributionModels
{
defineTypeNameAndDebug(distributionModel, 0);
defineRunTimeSelectionTable(distributionModel, dictionary);
}
}
// * * * * * * * * * * * * * Protected Member Functions * * * * * * * * * * //
void Foam::distributionModels::distributionModel::check() const
{
if (minValue() < 0)
{
FatalErrorIn("distributionModels::distributionModel::check() const")
<< type() << "distribution: Minimum value must be greater than "
<< "zero." << nl << "Supplied minValue = " << minValue()
<< abort(FatalError);
}
if (maxValue() < minValue())
{
FatalErrorIn("distributionModels::distributionModel::check() const")
<< type() << "distribution: Maximum value is smaller than the "
<< "minimum value:" << nl << " maxValue = " << maxValue()
<< ", minValue = " << minValue()
<< abort(FatalError);
}
}
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
Foam::distributionModels::distributionModel::distributionModel
(
const word& name,
const dictionary& dict,
cachedRandom& rndGen
)
:
distributionModelDict_(dict.subDict(name + "Distribution")),
rndGen_(rndGen)
{}
Foam::distributionModels::distributionModel::distributionModel
(
const distributionModel& p
)
:
distributionModelDict_(p.distributionModelDict_),
rndGen_(p.rndGen_)
{}
// * * * * * * * * * * * * * * * * Destructor * * * * * * * * * * * * * * * //
Foam::distributionModels::distributionModel::~distributionModel()
{}
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
Foam::scalar Foam::distributionModels::distributionModel::sample() const
{
notImplemented
(
"Foam::scalar "
"Foam::distributionModels::distributionModel::sample() const"
);
return 0.0;
}
Foam::scalar Foam::distributionModels::distributionModel::minValue() const
{
notImplemented
(
"Foam::scalar "
"Foam::distributionModels::distributionModel::minValue() const"
);
return 0.0;
}
Foam::scalar Foam::distributionModels::distributionModel::maxValue() const
{
notImplemented
(
"Foam::scalar "
"Foam::distributionModels::distributionModel::maxValue() const"
);
return 0.0;
}
// ************************************************************************* //
| gpl-3.0 |
syncany/syncany-plugin-dropbox | core/syncany-lib/src/main/java/org/syncany/operations/status/StatusOperation.java | 8965 | /*
* Syncany, www.syncany.org
* Copyright (C) 2011-2015 Philipp C. Heckel <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.syncany.operations.status;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.nio.file.FileVisitResult;
import java.nio.file.FileVisitor;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.attribute.BasicFileAttributes;
import java.util.Map;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.syncany.config.Config;
import org.syncany.config.LocalEventBus;
import org.syncany.database.FileVersion;
import org.syncany.database.FileVersion.FileStatus;
import org.syncany.database.FileVersionComparator;
import org.syncany.database.FileVersionComparator.FileVersionComparison;
import org.syncany.database.SqlDatabase;
import org.syncany.operations.ChangeSet;
import org.syncany.operations.Operation;
import org.syncany.operations.daemon.messages.StatusEndSyncExternalEvent;
import org.syncany.operations.daemon.messages.StatusStartSyncExternalEvent;
import org.syncany.util.FileUtil;
/**
* The status operation analyzes the local file tree and compares it to the current local
* database. It uses the {@link FileVersionComparator} to determine differences and returns
* new/changed/deleted files in form of a {@link ChangeSet}.
*
* @author Philipp C. Heckel <[email protected]>
*/
public class StatusOperation extends Operation {
private static final Logger logger = Logger.getLogger(StatusOperation.class.getSimpleName());
private FileVersionComparator fileVersionComparator;
private SqlDatabase localDatabase;
private StatusOperationOptions options;
private LocalEventBus eventBus;
public StatusOperation(Config config) {
this(config, new StatusOperationOptions());
}
public StatusOperation(Config config, StatusOperationOptions options) {
super(config);
this.fileVersionComparator = new FileVersionComparator(config.getLocalDir(), config.getChunker().getChecksumAlgorithm());
this.localDatabase = new SqlDatabase(config);
this.options = options;
this.eventBus = LocalEventBus.getInstance();
}
@Override
public StatusOperationResult execute() throws Exception {
logger.log(Level.INFO, "");
logger.log(Level.INFO, "Running 'Status' at client "+config.getMachineName()+" ...");
logger.log(Level.INFO, "--------------------------------------------");
if (options != null && options.isForceChecksum()) {
logger.log(Level.INFO, "Force checksum ENABLED.");
}
if (options != null && !options.isDelete()) {
logger.log(Level.INFO, "Delete missing files DISABLED.");
}
// Get local database
logger.log(Level.INFO, "Querying current file tree from database ...");
eventBus.post(new StatusStartSyncExternalEvent(config.getLocalDir().getAbsolutePath()));
// Path to actual file version
final Map<String, FileVersion> filesInDatabase = localDatabase.getCurrentFileTree();
// Find local changes
logger.log(Level.INFO, "Analyzing local folder "+config.getLocalDir()+" ...");
ChangeSet localChanges = findLocalChanges(filesInDatabase);
if (!localChanges.hasChanges()) {
logger.log(Level.INFO, "- No changes to local database");
}
// Return result
StatusOperationResult statusResult = new StatusOperationResult();
statusResult.setChangeSet(localChanges);
eventBus.post(new StatusEndSyncExternalEvent(config.getLocalDir().getAbsolutePath(), localChanges.hasChanges()));
return statusResult;
}
private ChangeSet findLocalChanges(final Map<String, FileVersion> filesInDatabase) throws FileNotFoundException, IOException {
ChangeSet localChanges = findLocalChangedAndNewFiles(config.getLocalDir(), filesInDatabase);
if (options == null || options.isDelete()) {
findAndAppendDeletedFiles(localChanges, filesInDatabase);
}
return localChanges;
}
private ChangeSet findLocalChangedAndNewFiles(final File root, Map<String, FileVersion> filesInDatabase) throws FileNotFoundException, IOException {
Path rootPath = Paths.get(root.getAbsolutePath());
StatusFileVisitor fileVisitor = new StatusFileVisitor(rootPath, filesInDatabase);
Files.walkFileTree(rootPath, fileVisitor);
return fileVisitor.getChangeSet();
}
private void findAndAppendDeletedFiles(ChangeSet localChanges, Map<String,FileVersion> filesInDatabase) {
for (FileVersion lastLocalVersion : filesInDatabase.values()) {
// Check if file exists, remove if it doesn't
File lastLocalVersionOnDisk = new File(config.getLocalDir()+File.separator+lastLocalVersion.getPath());
// Ignore this file history if the last version is marked "DELETED"
if (lastLocalVersion.getStatus() == FileStatus.DELETED) {
continue;
}
// If file has VANISHED, mark as DELETED
if (!FileUtil.exists(lastLocalVersionOnDisk)) {
localChanges.getDeletedFiles().add(lastLocalVersion.getPath());
}
}
}
private class StatusFileVisitor implements FileVisitor<Path> {
private Path root;
private ChangeSet changeSet;
private Map<String, FileVersion> currentFileTree;
public StatusFileVisitor(Path root, Map<String, FileVersion> currentFileTree) {
this.root = root;
this.changeSet = new ChangeSet();
this.currentFileTree = currentFileTree;
}
public ChangeSet getChangeSet() {
return changeSet;
}
@Override
public FileVisitResult visitFile(Path actualLocalFile, BasicFileAttributes attrs) throws IOException {
String relativeFilePath = FileUtil.getRelativeDatabasePath(root.toFile(), actualLocalFile.toFile()); //root.relativize(actualLocalFile).toString();
// Skip Syncany root folder
if (actualLocalFile.toFile().equals(config.getLocalDir())) {
return FileVisitResult.CONTINUE;
}
// Skip .syncany (or app related acc. to config)
boolean isAppRelatedDir =
actualLocalFile.toFile().equals(config.getAppDir())
|| actualLocalFile.toFile().equals(config.getCache())
|| actualLocalFile.toFile().equals(config.getDatabaseDir())
|| actualLocalFile.toFile().equals(config.getLogDir());
if (isAppRelatedDir) {
logger.log(Level.FINEST, "- Ignoring file (syncany app-related): {0}", relativeFilePath);
return FileVisitResult.SKIP_SUBTREE;
}
// Check if file is locked
boolean fileLocked = FileUtil.isFileLocked(actualLocalFile.toFile());
if (fileLocked) {
logger.log(Level.FINEST, "- Ignoring file (locked): {0}", relativeFilePath);
return FileVisitResult.CONTINUE;
}
// Check database by file path
FileVersion expectedLastFileVersion = currentFileTree.get(relativeFilePath);
if (expectedLastFileVersion != null) {
// Compare
boolean forceChecksum = options != null && options.isForceChecksum();
FileVersionComparison fileVersionComparison = fileVersionComparator.compare(expectedLastFileVersion, actualLocalFile.toFile(), forceChecksum);
if (fileVersionComparison.areEqual()) {
changeSet.getUnchangedFiles().add(relativeFilePath);
}
else {
changeSet.getChangedFiles().add(relativeFilePath);
}
}
else {
if (!config.getIgnoredFiles().isFileIgnored(relativeFilePath)) {
changeSet.getNewFiles().add(relativeFilePath);
logger.log(Level.FINEST, "- New file: "+relativeFilePath);
}
else {
logger.log(Level.FINEST, "- Ignoring file; " + relativeFilePath);
return FileVisitResult.SKIP_SUBTREE;
}
}
// Check if file is symlink directory
boolean isSymlinkDir = attrs.isDirectory() && attrs.isSymbolicLink();
if (isSymlinkDir) {
logger.log(Level.FINEST, " + File is sym. directory. Skipping subtree.");
return FileVisitResult.SKIP_SUBTREE;
}
else {
return FileVisitResult.CONTINUE;
}
}
@Override public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException {
return visitFile(dir, attrs);
}
@Override
public FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException {
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException {
return FileVisitResult.CONTINUE;
}
}
}
| gpl-3.0 |
mban94/mixerp | src/Libraries/Logic/MixERP.Net.Common/Helpers/CatalogHelper.cs | 808 | using MixERP.Net.Framework;
using MixERP.Net.i18n.Resources;
using System.Collections.Generic;
using System.Linq;
namespace MixERP.Net.Common.Helpers
{
public static class CatalogHelper
{
public static void ValidateCatalog(string catalog)
{
if (string.IsNullOrWhiteSpace(catalog))
{
return;
}
string catalogs = ConfigurationHelper.GetDbServerParameter("Catalogs");
string meta = ConfigurationHelper.GetDbServerParameter("MetaDatabase");
List<string> list = catalogs.Split(',').Select(p => p.Trim()).ToList();
list.Add(meta);
if (!list.Contains(catalog))
{
throw new MixERPException(Titles.AccessIsDenied);
}
}
}
} | gpl-3.0 |
wormzjl/PneumaticCraft | api/powercrystals/minefactoryreloaded/api/HarvestType.java | 1112 | package powercrystals.minefactoryreloaded.api;
/**
* @author PowerCrystals
*
* Determines what algorithm the Harvester uses when it encounters this IFactoryHarvestable in the world.
*/
public enum HarvestType
{
/**
* Just break the single block - no special action needed. e.g. Carrots, flowers, wheat.
*/
Normal,
/**
* Search for harvestable blocks adjacent to this block but leave this block. e.g. Pumpkin, melon
*/
Gourd,
/**
* Search for identical blocks above.
*/
Column,
/**
* Search for identical blocks above but leave the bottom one for the future. e.g. Cactus, sugarcane.
*/
LeaveBottom,
/**
* This block is the base of a tree and the harvester should enter tree-cutting mode.
*/
Tree,
/**
* This block is the base of the tree and the harvester should enter tree-cutting mode.
* The tree is searched for in the negative y axis instead.
*/
TreeFlipped,
/**
* This block is part of a tree as above.
*/
TreeLeaf,
/**
* This block is part of a tree as above, but fruits are cut before logs. e.g. cocoa
* The tree is not searched for.
*/
TreeFruit
}
| gpl-3.0 |
ozit/dolibarr | htdocs/includes/ckeditor/_source/plugins/scayt/lang/en-ca.js | 1734 | /*
Copyright (c) 2003-2012, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.html or http://ckeditor.com/license
*/
CKEDITOR.plugins.setLang( 'scayt', 'en-ca', {
about: 'About SCAYT', // MISSING
aboutTab: 'About', // MISSING
addWord: 'Add Word', // MISSING
allCaps: 'Ignore All-Caps Words', // MISSING
dic_create: 'Create', // MISSING
dic_delete: 'Delete', // MISSING
dic_field_name: 'Dictionary name', // MISSING
dic_info: 'Initially the User Dictionary is stored in a Cookie. However, Cookies are limited in size. When the User Dictionary grows to a point where it cannot be stored in a Cookie, then the dictionary may be stored on our server. To store your personal dictionary on our server you should specify a name for your dictionary. If you already have a stored dictionary, please type its name and click the Restore button.', // MISSING
dic_rename: 'Rename', // MISSING
dic_restore: 'Restore', // MISSING
dictionariesTab: 'Dictionaries', // MISSING
disable: 'Disable SCAYT', // MISSING
emptyDic: 'Dictionary name should not be empty.', // MISSING
enable: 'Enable SCAYT', // MISSING
ignore: 'Ignore', // MISSING
ignoreAll: 'Ignore All', // MISSING
ignoreDomainNames: 'Ignore Domain Names', // MISSING
langs: 'Languages', // MISSING
languagesTab: 'Languages', // MISSING
mixedCase: 'Ignore Words with Mixed Case', // MISSING
mixedWithDigits: 'Ignore Words with Numbers', // MISSING
moreSuggestions: 'More suggestions', // MISSING
opera_title: 'Not supported by Opera', // MISSING
options: 'Options', // MISSING
optionsTab: 'Options', // MISSING
title: 'Spell Check As You Type', // MISSING
toggle: 'Toggle SCAYT',// MISSING
noSuggestions: 'No suggestion' // MISSING
});
| gpl-3.0 |
michath/ConMonkey | js/src/jit-test/tests/ion/bug889186.js | 110 | function f()
{
return (4 >>> 0) > ((0 % (1 == 2)) >>> 0);
}
assertEq(f(), true);
assertEq(f(), true);
| mpl-2.0 |
JasonGross/mozjs | js/src/jit-test/tests/basic/testBug637014.js | 47 | var o = newGlobal();
o.makeFinalizeObserver();
| mpl-2.0 |
NeverMin/wuxiapptec | var/httpd/htdocs/js/Core.Form.ErrorTooltips.js | 5492 | // --
// Core.Form.ErrorTooltips.js - provides provides Tooltip functions
// Copyright (C) 2001-2012 OTRS AG, http://otrs.org/
// --
// This software comes with ABSOLUTELY NO WARRANTY. For details, see
// the enclosed file COPYING for license information (AGPL). If you
// did not receive this file, see http://www.gnu.org/licenses/agpl.txt.
// --
"use strict";
var Core = Core || {};
Core.Form = Core.Form || {};
/**
* @namespace
* @exports TargetNS as Core.Form.ErrorTooltips
* @description
* This namespace contains the Tooltip initialization functions
*/
Core.Form.ErrorTooltips = (function (TargetNS) {
var TooltipContainerID = 'OTRS_UI_Tooltips_ErrorTooltip',
TooltipOffsetTop = 20,
TooltipOffsetLeft = 20,
TongueClass = 'TongueLeft',
$TooltipContent = $('<div class="Content" role="tooltip"></div>'),
$Tooltip,
Offset;
/**
* @function
* @private
* @param {jQueryObject} $Element jquery object
* @param {String} TooltipContent The string content that will be show in tooltip
* @return nothing
* This function shows the tooltip for an element with a certain content
*/
function ShowTooltip($Element, TooltipContent) {
var $TooltipContainer = $('#' + TooltipContainerID);
if (!$TooltipContainer.length) {
$('body').append('<div id="' + TooltipContainerID + '" class="TooltipContainer"></div>');
$TooltipContainer = $('#' + TooltipContainerID);
}
/*
* Now determine if the tongue needs to be right or left, depending on the
* position of the target element on the screen.
*/
if (($(document).width() - $Element.offset().left) < 250) {
TongueClass = 'TongueRight';
}
/*
* Now create and fill the tooltip with the error message.
*/
$Tooltip = $('<div class="Tooltip ' + TongueClass + '"><div class="Tongue"></div></div>');
$TooltipContent.html(TooltipContent);
$Tooltip.append($TooltipContent);
Offset = $Element.offset();
$TooltipContainer
.empty()
.append($Tooltip)
.css('left', Offset.left + TooltipOffsetLeft)
.css('top', Offset.top + TooltipOffsetTop)
.show();
}
/**
* @function
* @private
* @return nothing
* This function hides the tooltip for an element
*/
TargetNS.HideTooltip = function() {
$('#' + TooltipContainerID).hide().empty();
};
/**
* @function
* @description
* This function initializes the tooltips on an input field
* @param {jQueryObject} $Elements
* The elements (within a jQuery object) for whom the tooltips are initialized.
* @param {String} TooltipContent
* Content of the tooltip, may contain HTML.
* @return nothing
*/
TargetNS.InitTooltip = function ($Element, TooltipContent) {
$Element.unbind('focus.Tooltip');
$Element.bind('focus.Tooltip', function () {
ShowTooltip($Element, TooltipContent);
});
$Element.unbind('blur.Tooltip');
$Element.bind('blur.Tooltip', TargetNS.HideTooltip);
};
/**
* @function
* @description
* This function removes the tooltip from an input field
* @param {jQueryObject} $Element
* The elements (within a jQuery object) for whom the tooltips are removed.
* @return nothing
*/
TargetNS.RemoveTooltip = function ($Element) {
TargetNS.HideTooltip();
$Element.unbind('focus.Tooltip');
$Element.unbind('blur.Tooltip');
};
/**
* @function
* @private
* @return nothing
* This function shows the tooltip for a rich text editor
*/
function ShowRTETooltip(Event) {
ShowTooltip($('#cke_' + Event.listenerData.ElementID + ' .cke_contents'), Event.listenerData.Message);
}
/**
* @function
* @private
* @return nothing
* This function remove the tooltip from a rich text editor
*/
function RemoveRTETooltip(Event) {
TargetNS.HideTooltip();
}
/**
* @function
* @description
* This function initializes the necessary stuff for a tooltip in a rich text editor
* @param {jQueryObject} $Element
* The RTE element for whom the tooltips are inicialized.
* @param {String} Message
* The string content that will be show in tooltip
* @return nothing
*/
TargetNS.InitRTETooltip = function ($Element, Message) {
var ElementID = $Element.attr('id');
CKEDITOR.instances[ElementID].on('focus', ShowRTETooltip, null, {ElementID: ElementID, Message: Message});
CKEDITOR.instances[ElementID].on('blur', RemoveRTETooltip, null, ElementID);
};
/**
* @function
* @description
* This function removes the tooltip in a rich text editor
* @param {jQueryObject} $Element
* The RTE element for whom the tooltips are removed.
* @return nothing
*/
TargetNS.RemoveRTETooltip = function ($Element) {
var ElementID = $Element.attr('id');
CKEDITOR.instances[ElementID].removeListener('focus', ShowRTETooltip);
CKEDITOR.instances[ElementID].removeListener('blur', RemoveRTETooltip);
TargetNS.HideTooltip();
};
return TargetNS;
}(Core.Form.ErrorTooltips || {}));
| agpl-3.0 |
zidootech/xbmc4zidoo | kodi_14.1_release/xbmc/NfoFile.cpp | 4888 | /*
* Copyright (C) 2005-2013 Team XBMC
* http://xbmc.org
*
* This Program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This Program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with XBMC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
*/
// NfoFile.cpp: implementation of the CNfoFile class.
//
//////////////////////////////////////////////////////////////////////
#include "NfoFile.h"
#include "video/VideoInfoDownloader.h"
#include "addons/AddonManager.h"
#include "filesystem/File.h"
#include "FileItem.h"
#include "music/Album.h"
#include "music/Artist.h"
#include "settings/Settings.h"
#include "utils/log.h"
#include <vector>
using namespace std;
using namespace XFILE;
using namespace ADDON;
CNfoFile::NFOResult CNfoFile::Create(const CStdString& strPath, const ScraperPtr& info, int episode)
{
m_info = info; // assume we can use these settings
m_type = ScraperTypeFromContent(info->Content());
if (FAILED(Load(strPath)))
return NO_NFO;
CFileItemList items;
bool bNfo=false;
AddonPtr addon;
ScraperPtr defaultScraper;
if (CAddonMgr::Get().GetDefault(m_type, addon))
defaultScraper = boost::dynamic_pointer_cast<CScraper>(addon);
if (m_type == ADDON_SCRAPER_ALBUMS)
{
CAlbum album;
bNfo = GetDetails(album);
}
else if (m_type == ADDON_SCRAPER_ARTISTS)
{
CArtist artist;
bNfo = GetDetails(artist);
}
else if (m_type == ADDON_SCRAPER_TVSHOWS || m_type == ADDON_SCRAPER_MOVIES || m_type == ADDON_SCRAPER_MUSICVIDEOS)
{
// first check if it's an XML file with the info we need
CVideoInfoTag details;
bNfo = GetDetails(details);
if (episode > -1 && bNfo && m_type == ADDON_SCRAPER_TVSHOWS)
{
int infos=0;
while (m_headPos != std::string::npos && details.m_iEpisode != episode)
{
m_headPos = m_doc.find("<episodedetails", m_headPos + 1);
if (m_headPos == std::string::npos)
break;
bNfo = GetDetails(details);
infos++;
}
if (details.m_iEpisode != episode)
{
bNfo = false;
details.Reset();
m_headPos = 0;
if (infos == 1) // still allow differing nfo/file numbers for single ep nfo's
bNfo = GetDetails(details);
}
}
}
vector<ScraperPtr> vecScrapers;
// add selected scraper - first proirity
if (m_info)
vecScrapers.push_back(m_info);
// Add all scrapers except selected and default
VECADDONS addons;
CAddonMgr::Get().GetAddons(m_type,addons);
for (unsigned i = 0; i < addons.size(); ++i)
{
ScraperPtr scraper = boost::dynamic_pointer_cast<CScraper>(addons[i]);
// skip if scraper requires settings and there's nothing set yet
if (scraper->RequiresSettings() && !scraper->HasUserSettings())
continue;
if( (!m_info || m_info->ID() != scraper->ID()) && (!defaultScraper || defaultScraper->ID() != scraper->ID()) )
vecScrapers.push_back(scraper);
}
// add default scraper - not user selectable so it's last priority
if( defaultScraper && (!m_info || m_info->ID() != defaultScraper->ID()) &&
( !defaultScraper->RequiresSettings() || defaultScraper->HasUserSettings() ) )
vecScrapers.push_back(defaultScraper);
// search ..
int res = -1;
for (unsigned int i=0;i<vecScrapers.size();++i)
if ((res = Scrape(vecScrapers[i])) == 0 || res == 2)
break;
if (res == 2)
return ERROR_NFO;
if (bNfo)
return m_scurl.m_url.empty() ? FULL_NFO : COMBINED_NFO;
return m_scurl.m_url.empty() ? NO_NFO : URL_NFO;
}
// return value: 0 - success; 1 - no result; skip; 2 - error
int CNfoFile::Scrape(ScraperPtr& scraper)
{
if (scraper->IsNoop())
{
m_scurl = CScraperUrl();
return 0;
}
if (scraper->Type() != m_type)
return 1;
scraper->ClearCache();
try
{
m_scurl = scraper->NfoUrl(m_doc);
}
catch (const CScraperError &sce)
{
CVideoInfoDownloader::ShowErrorDialog(sce);
if (!sce.FAborted())
return 2;
}
if (!m_scurl.m_url.empty())
SetScraperInfo(scraper);
return m_scurl.m_url.empty() ? 1 : 0;
}
int CNfoFile::Load(const CStdString& strFile)
{
Close();
XFILE::CFile file;
XFILE::auto_buffer buf;
if (file.LoadFile(strFile, buf) > 0)
{
m_doc.assign(buf.get(), buf.size());
m_headPos = 0;
return 0;
}
m_doc.clear();
return 1;
}
void CNfoFile::Close()
{
m_doc.clear();
m_headPos = 0;
m_scurl.Clear();
}
| lgpl-2.1 |
PatidarWeb/opencms-core | modules/org.opencms.workplace/resources/system/workplace/resources/components/widgets/multiselector.js | 547 | function toggleMultiSelectWidget(source) {
function onChange(evt){
var value=$(this).val();
// set the value on the hidden input
$(this).siblings("input:hidden").val(value);
}
// check if the checkbox is checked
if ($(source).attr("checked")) {
// the checkbox is checked, enable select box
$(source).siblings("select").attr("disabled", false).click(onChange);
} else {
// the checkbox is not checked
// disable select box
$(source).siblings("select").attr("disabled", true).unbind("click", onChange);
}
} | lgpl-2.1 |
afsinka/jet_jsp | src/main/webapp/js/libs/oj/v1.1.2/resources/nls/ja/localeElements.js | 5835 | define({main:{ja:{identity:{version:{_cldrVersion:"24",_number:"$Revision: 9287 $"},generation:{_date:"$Date: 2013-08-28 21:32:04 -0500 (Wed, 28 Aug 2013) $"},language:"ja"},dates:{calendars:{gregorian:{months:{format:{abbreviated:{1:"1月",2:"2月",3:"3月",4:"4月",5:"5月",6:"6月",7:"7月",8:"8月",9:"9月",10:"10月",11:"11月",12:"12月"},narrow:{1:"1",2:"2",3:"3",4:"4",5:"5",6:"6",7:"7",8:"8",9:"9",10:"10",11:"11",12:"12"},wide:{1:"1月",2:"2月",3:"3月",4:"4月",5:"5月",6:"6月",7:"7月",8:"8月",9:"9月",10:"10月",11:"11月",12:"12月"}},
"stand-alone":{abbreviated:{1:"1月",2:"2月",3:"3月",4:"4月",5:"5月",6:"6月",7:"7月",8:"8月",9:"9月",10:"10月",11:"11月",12:"12月"},narrow:{1:"1",2:"2",3:"3",4:"4",5:"5",6:"6",7:"7",8:"8",9:"9",10:"10",11:"11",12:"12"},wide:{1:"1月",2:"2月",3:"3月",4:"4月",5:"5月",6:"6月",7:"7月",8:"8月",9:"9月",10:"10月",11:"11月",12:"12月"}}},days:{format:{abbreviated:{sun:"日",mon:"月",tue:"火",wed:"水",thu:"木",fri:"金",sat:"土"},narrow:{sun:"日",mon:"月",tue:"火",wed:"水",thu:"木",fri:"金",sat:"土"},wide:{sun:"日曜日",mon:"月曜日",tue:"火曜日",wed:"水曜日",thu:"木曜日",
fri:"金曜日",sat:"土曜日"}},"stand-alone":{abbreviated:{sun:"日",mon:"月",tue:"火",wed:"水",thu:"木",fri:"金",sat:"土"},narrow:{sun:"日",mon:"月",tue:"火",wed:"水",thu:"木",fri:"金",sat:"土"},wide:{sun:"日曜日",mon:"月曜日",tue:"火曜日",wed:"水曜日",thu:"木曜日",fri:"金曜日",sat:"土曜日"}}},dayPeriods:{format:{wide:{am:"午前",pm:"午後"}}},eras:{eraAbbr:{0:"紀元前",1:"西暦"}},dateFormats:{full:"y年M月d日EEEE","long":"y年M月d日",medium:"y/MM/dd","short":"y/MM/dd"},timeFormats:{full:"H時mm分ss秒 zzzz","long":"H:mm:ss z",medium:"H:mm:ss","short":"H:mm"},dateTimeFormats:{full:"{1} {0}",
"long":"{1} {0}",medium:"{1} {0}","short":"{1} {0}",availableFormats:{d:"d日",Ed:"d日(E)",EEEEd:"d日EEEE",Ehm:"a K 時 mm 分 (E)",EHm:"HH 時 mm 分 (E)",Ehms:"a K 時 mm 分 ss 秒 (E)",EHms:"HH 時 mm 分 ss 秒 (E)",Gy:"Gy年",GyMMM:"Gy年M月",GyMMMd:"Gy年M月d日",GyMMMEd:"Gy年M月d日(E)",GyMMMEEEEd:"Gy年M月d日EEEE",h:"aK時",H:"H時",hm:"aK:mm",Hm:"H:mm",hms:"aK:mm:ss",Hms:"H:mm:ss",M:"M月",Md:"M/d",MEd:"M/d(E)",MEEEEd:"M/dEEEE",MMM:"M月",MMMd:"M月d日",MMMEd:"M月d日(E)",MMMEEEEd:"M月d日EEEE",ms:"mm:ss",y:"y年",yM:"y/M",yMd:"y/M/d",yMEd:"y/M/d(E)",
yMEEEEd:"y/M/dEEEE",yMM:"y/MM",yMMM:"y年M月",yMMMd:"y年M月d日",yMMMEd:"y年M月d日(E)",yMMMEEEEd:"y年M月d日EEEE",yQQQ:"y/QQQ",yQQQQ:"yQQQQ"}}}},fields:{era:{displayName:"時代"},year:{displayName:"年","relative-type--1":"昨年","relative-type-0":"今年","relative-type-1":"翌年"},month:{displayName:"月","relative-type--1":"先月","relative-type-0":"今月","relative-type-1":"翌月"},week:{displayName:"週","relative-type--1":"先週","relative-type-0":"今週","relative-type-1":"翌週"},day:{displayName:"日","relative-type--1":"昨日","relative-type-0":"今日",
"relative-type-1":"明日"},weekday:{displayName:"曜日"},dayperiod:{displayName:"午前/午後"},hour:{displayName:"時"},minute:{displayName:"分"},second:{displayName:"秒"},zone:{displayName:"タイムゾーン"}}},numbers:{defaultNumberingSystem:"latn",otherNumberingSystems:{"native":"latn",traditional:"jpan",finance:"jpanfin"},"symbols-numberSystem-latn":{decimal:".",group:",",list:";",percentSign:"%",plusSign:"+",minusSign:"-",exponential:"E",perMille:"‰",infinity:"∞",nan:"NaN"},"decimalFormats-numberSystem-latn":{standard:"#,##0.###",
"long":{decimalFormat:{"1000-count-other":"0千","10000-count-other":"0万","100000-count-other":"00万","1000000-count-other":"000万","10000000-count-other":"0000万","100000000-count-other":"0億","1000000000-count-other":"00億","10000000000-count-other":"000億","100000000000-count-other":"0000億","1000000000000-count-other":"0兆","10000000000000-count-other":"00兆","100000000000000-count-other":"000兆"}},"short":{decimalFormat:{"1000-count-other":"0千","10000-count-other":"0万","100000-count-other":"00万","1000000-count-other":"000万",
"10000000-count-other":"0000万","100000000-count-other":"0億","1000000000-count-other":"00億","10000000000-count-other":"000億","100000000000-count-other":"0000億","1000000000000-count-other":"0兆","10000000000000-count-other":"00兆","100000000000000-count-other":"000兆"}}},"percentFormats-numberSystem-latn":{standard:"#,##0%"},"currencyFormats-numberSystem-latn":{standard:"¤#,##0.00","unitPattern-count-other":"{0} {1}"},currencies:{AUD:{displayName:"オーストラリア ドル",symbol:"AU$"},BRL:{displayName:"ブラジル レアル",
symbol:"R$"},CAD:{displayName:"カナダ ドル",symbol:"CA$"},CHF:{displayName:"スイス フラン",symbol:"CHF"},CNY:{displayName:"中国人民元",symbol:"元"},DKK:{displayName:"デンマーク クローネ",symbol:"DKK"},EUR:{displayName:"ユーロ",symbol:"€"},GBP:{displayName:"英国ポンド",symbol:"£"},HKD:{displayName:"香港ドル",symbol:"HK$"},IDR:{displayName:"インドネシア ルピア",symbol:"IDR"},INR:{displayName:"インド ルピー",symbol:"₹"},JPY:{displayName:"日本円",symbol:"¥"},KRW:{displayName:"韓国 ウォン",symbol:"₩"},MXN:{displayName:"メキシコ ペソ",symbol:"MX$"},NOK:{displayName:"ノルウェー クローネ",
symbol:"NOK"},PLN:{displayName:"ポーランド ズウォティ",symbol:"PLN"},RUB:{displayName:"ロシア ルーブル",symbol:"RUB"},SAR:{displayName:"サウジ リヤル",symbol:"SAR"},SEK:{displayName:"スウェーデン クローナ",symbol:"SEK"},THB:{displayName:"タイ バーツ",symbol:"฿"},TRY:{displayName:"新トルコリラ",symbol:"TRY"},TWD:{displayName:"新台湾ドル",symbol:"NT$"},USD:{displayName:"米ドル",symbol:"$"},ZAR:{displayName:"南アフリカ ランド",symbol:"ZAR"}}}}}}); | lgpl-2.1 |
JiriOndrusek/wildfly-core | host-controller/src/main/java/org/jboss/as/domain/controller/operations/coordination/DomainControllerLockIdUtils.java | 1990 | /*
*
* * JBoss, Home of Professional Open Source.
* * Copyright 2013, Red Hat, Inc., and individual contributors
* * as indicated by the @author tags. See the copyright.txt file in the
* * distribution for a full listing of individual contributors.
* *
* * This is free software; you can redistribute it and/or modify it
* * under the terms of the GNU Lesser General Public License as
* * published by the Free Software Foundation; either version 2.1 of
* * the License, or (at your option) any later version.
* *
* * This software is distributed in the hope that it will be useful,
* * but WITHOUT ANY WARRANTY; without even the implied warranty of
* * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* * Lesser General Public License for more details.
* *
* * You should have received a copy of the GNU Lesser General Public
* * License along with this software; if not, write to the Free
* * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*
*/
package org.jboss.as.domain.controller.operations.coordination;
import org.jboss.as.controller.OperationContext.AttachmentKey;
/**
* Contains operation headers and attachments used to communicate the domain controller's lock id to the slaves.
*
* @author <a href="[email protected]">Kabir Khan</a>
*/
public final class DomainControllerLockIdUtils {
/**
* The domain controller lock id header sent by the DC to the slave host. This is used by the slaves
* to latch onto any ongoing operation in the DC.
*/
public static final String DOMAIN_CONTROLLER_LOCK_ID = "domain-controller-lock-id";
/**
* The attachment used by the slave to keep track of the lock id on the DC (if any)
*/
public static final AttachmentKey<Integer> DOMAIN_CONTROLLER_LOCK_ID_ATTACHMENT = AttachmentKey.create(Integer.class);
private DomainControllerLockIdUtils() {
}
}
| lgpl-2.1 |
JiriOndrusek/wildfly-core | testsuite/rbac/src/test/java/org/jboss/as/test/integration/mgmt/access/StandardUsersSetupTask.java | 1781 | /*
*
* JBoss, Home of Professional Open Source.
* Copyright 2014, Red Hat, Inc., and individual contributors
* as indicated by the @author tags. See the copyright.txt file in the
* distribution for a full listing of individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
* /
*/
package org.jboss.as.test.integration.mgmt.access;
import org.jboss.as.test.integration.management.rbac.UserRolesMappingServerSetupTask;
import org.wildfly.core.testrunner.ManagementClient;
import org.wildfly.core.testrunner.ServerSetupTask;
/**
* @author Tomaz Cerar (c) 2014 Red Hat Inc.
*/
public class StandardUsersSetupTask extends UserRolesMappingServerSetupTask.StandardUsersSetup implements ServerSetupTask {
@Override
public void setup(ManagementClient managementClient) throws Exception {
setup(managementClient.getControllerClient());
}
@Override
public void tearDown(ManagementClient managementClient) throws Exception {
tearDown(managementClient.getControllerClient());
}
}
| lgpl-2.1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.