repo_name
stringlengths 4
116
| path
stringlengths 3
942
| size
stringlengths 1
7
| content
stringlengths 3
1.05M
| license
stringclasses 15
values |
---|---|---|---|---|
drbeermann/cordova-docs | docs/en/2.9.0/cordova/globalization/globalization.dateToString.md | 3115 | ---
license: >
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
---
globalization.dateToString
===========
Returns a date formatted as a string according to the client's locale and timezone.
navigator.globalization.dateToString(date, successCallback, errorCallback, options);
Description
-----------
Returns the formatted date `String` via a `value` property accessible
from the object passed as a parameter to the `successCallback`.
The inbound `date` parameter should be of type `Date`.
If there is an error formatting the date, then the `errorCallback`
executes with a `GlobalizationError` object as a parameter. The
error's expected code is `GlobalizationError.FORMATTING\_ERROR`.
The `options` parameter is optional, and its default values are:
{formatLength:'short', selector:'date and time'}
The `options.formatLength` can be `short`, `medium`, `long`, or `full`.
The `options.selector` can be `date`, `time` or `date and time`.
Supported Platforms
-------------------
- Android
- BlackBerry WebWorks (OS 5.0 and higher)
- iOS
- Windows Phone 8
Quick Example
-------------
If the browser is set to the `en\_US` locale, this displays a popup
dialog with text similar to `date: 9/25/2012 4:21PM` using the default
options:
navigator.globalization.dateToString(
new Date(),
function (date) { alert('date: ' + date.value + '\n'); },
function () { alert('Error getting dateString\n'); },
{ formatLength: 'short', selector: 'date and time' }
);
Full Example
------------
<!DOCTYPE HTML>
<html>
<head>
<title>dateToString Example</title>
<script type="text/javascript" charset="utf-8" src="cordova-x.x.x.js"></script>
<script type="text/javascript" charset="utf-8">
function checkDateString() {
navigator.globalization.dateToString(
new Date(),
function (date) {alert('date: ' + date.value + '\n');},
function () {alert('Error getting dateString\n');,
{formatLength:'short', selector:'date and time'}}
);
}
</script>
</head>
<body>
<button onclick="checkDateString()">Click for date string</button>
</body>
</html>
Windows Phone 8 Quirks
--------------
- The `formatLength` option supports only `short` and `full` values.
| apache-2.0 |
danmcp/source-to-image | vendor/k8s.io/kubernetes/pkg/api/v1/conversion.go | 21451 | /*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"fmt"
"reflect"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/conversion"
"k8s.io/kubernetes/pkg/runtime"
"speter.net/go/exp/math/dec/inf"
)
const (
// Annotation key used to identify mirror pods.
mirrorAnnotationKey = "kubernetes.io/config.mirror"
// Value used to identify mirror pods from pre-v1.1 kubelet.
mirrorAnnotationValue_1_0 = "mirror"
)
func addConversionFuncs(scheme *runtime.Scheme) {
// Add non-generated conversion functions
err := scheme.AddConversionFuncs(
Convert_api_Pod_To_v1_Pod,
Convert_api_PodSpec_To_v1_PodSpec,
Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec,
Convert_api_ServiceSpec_To_v1_ServiceSpec,
Convert_v1_Pod_To_api_Pod,
Convert_v1_PodSpec_To_api_PodSpec,
Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec,
Convert_v1_ServiceSpec_To_api_ServiceSpec,
Convert_v1_ResourceList_To_api_ResourceList,
Convert_api_VolumeSource_To_v1_VolumeSource,
Convert_v1_VolumeSource_To_api_VolumeSource,
Convert_v1_SecurityContextConstraints_To_api_SecurityContextConstraints,
Convert_api_SecurityContextConstraints_To_v1_SecurityContextConstraints,
)
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
// Add field label conversions for kinds having selectable nothing but ObjectMeta fields.
for _, kind := range []string{
"Endpoints",
"ResourceQuota",
"PersistentVolumeClaim",
"Service",
"ServiceAccount",
"ConfigMap",
} {
err = api.Scheme.AddFieldLabelConversionFunc("v1", kind,
func(label, value string) (string, string, error) {
switch label {
case "metadata.namespace",
"metadata.name":
return label, value, nil
default:
return "", "", fmt.Errorf("field label %q not supported for %q", label, kind)
}
})
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
}
// Add field conversion funcs.
err = api.Scheme.AddFieldLabelConversionFunc("v1", "Pod",
func(label, value string) (string, string, error) {
switch label {
case "metadata.name",
"metadata.namespace",
"metadata.labels",
"metadata.annotations",
"status.phase",
"status.podIP",
"spec.nodeName",
"spec.restartPolicy":
return label, value, nil
// This is for backwards compatibility with old v1 clients which send spec.host
case "spec.host":
return "spec.nodeName", value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
err = api.Scheme.AddFieldLabelConversionFunc("v1", "Node",
func(label, value string) (string, string, error) {
switch label {
case "metadata.name":
return label, value, nil
case "spec.unschedulable":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
err = api.Scheme.AddFieldLabelConversionFunc("v1", "ReplicationController",
func(label, value string) (string, string, error) {
switch label {
case "metadata.name",
"metadata.namespace",
"status.replicas":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
err = api.Scheme.AddFieldLabelConversionFunc("v1", "Event",
func(label, value string) (string, string, error) {
switch label {
case "involvedObject.kind",
"involvedObject.namespace",
"involvedObject.name",
"involvedObject.uid",
"involvedObject.apiVersion",
"involvedObject.resourceVersion",
"involvedObject.fieldPath",
"reason",
"source",
"type",
"metadata.namespace",
"metadata.name":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
err = api.Scheme.AddFieldLabelConversionFunc("v1", "Namespace",
func(label, value string) (string, string, error) {
switch label {
case "status.phase",
"metadata.name":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
err = api.Scheme.AddFieldLabelConversionFunc("v1", "PersistentVolume",
func(label, value string) (string, string, error) {
switch label {
case "metadata.name":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
err = api.Scheme.AddFieldLabelConversionFunc("v1", "Secret",
func(label, value string) (string, string, error) {
switch label {
case "type",
"metadata.namespace",
"metadata.name":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
}
func Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *api.ReplicationControllerSpec, out *ReplicationControllerSpec, s conversion.Scope) error {
out.Replicas = new(int32)
*out.Replicas = int32(in.Replicas)
if in.Selector != nil {
out.Selector = make(map[string]string)
for key, val := range in.Selector {
out.Selector[key] = val
}
} else {
out.Selector = nil
}
//if in.TemplateRef != nil {
// out.TemplateRef = new(ObjectReference)
// if err := Convert_api_ObjectReference_To_v1_ObjectReference(in.TemplateRef, out.TemplateRef, s); err != nil {
// return err
// }
//} else {
// out.TemplateRef = nil
//}
if in.Template != nil {
out.Template = new(PodTemplateSpec)
if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in.Template, out.Template, s); err != nil {
return err
}
} else {
out.Template = nil
}
return nil
}
func Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(in *ReplicationControllerSpec, out *api.ReplicationControllerSpec, s conversion.Scope) error {
out.Replicas = *in.Replicas
if in.Selector != nil {
out.Selector = make(map[string]string)
for key, val := range in.Selector {
out.Selector[key] = val
}
} else {
out.Selector = nil
}
//if in.TemplateRef != nil {
// out.TemplateRef = new(api.ObjectReference)
// if err := Convert_v1_ObjectReference_To_api_ObjectReference(in.TemplateRef, out.TemplateRef, s); err != nil {
// return err
// }
//} else {
// out.TemplateRef = nil
//}
if in.Template != nil {
out.Template = new(api.PodTemplateSpec)
if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in.Template, out.Template, s); err != nil {
return err
}
} else {
out.Template = nil
}
return nil
}
// The following two PodSpec conversions are done here to support ServiceAccount
// as an alias for ServiceAccountName.
func Convert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conversion.Scope) error {
if in.Volumes != nil {
out.Volumes = make([]Volume, len(in.Volumes))
for i := range in.Volumes {
if err := Convert_api_Volume_To_v1_Volume(&in.Volumes[i], &out.Volumes[i], s); err != nil {
return err
}
}
} else {
out.Volumes = nil
}
if in.Containers != nil {
out.Containers = make([]Container, len(in.Containers))
for i := range in.Containers {
if err := Convert_api_Container_To_v1_Container(&in.Containers[i], &out.Containers[i], s); err != nil {
return err
}
}
} else {
out.Containers = nil
}
out.RestartPolicy = RestartPolicy(in.RestartPolicy)
if in.TerminationGracePeriodSeconds != nil {
out.TerminationGracePeriodSeconds = new(int64)
*out.TerminationGracePeriodSeconds = *in.TerminationGracePeriodSeconds
} else {
out.TerminationGracePeriodSeconds = nil
}
if in.ActiveDeadlineSeconds != nil {
out.ActiveDeadlineSeconds = new(int64)
*out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds
} else {
out.ActiveDeadlineSeconds = nil
}
out.DNSPolicy = DNSPolicy(in.DNSPolicy)
if in.NodeSelector != nil {
out.NodeSelector = make(map[string]string)
for key, val := range in.NodeSelector {
out.NodeSelector[key] = val
}
} else {
out.NodeSelector = nil
}
out.ServiceAccountName = in.ServiceAccountName
// DeprecatedServiceAccount is an alias for ServiceAccountName.
out.DeprecatedServiceAccount = in.ServiceAccountName
out.NodeName = in.NodeName
if in.SecurityContext != nil {
out.SecurityContext = new(PodSecurityContext)
if err := Convert_api_PodSecurityContext_To_v1_PodSecurityContext(in.SecurityContext, out.SecurityContext, s); err != nil {
return err
}
// the host namespace fields have to be handled here for backward compatibility
// with v1.0.0
out.HostPID = in.SecurityContext.HostPID
out.HostNetwork = in.SecurityContext.HostNetwork
out.HostIPC = in.SecurityContext.HostIPC
}
if in.ImagePullSecrets != nil {
out.ImagePullSecrets = make([]LocalObjectReference, len(in.ImagePullSecrets))
for i := range in.ImagePullSecrets {
if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.ImagePullSecrets[i], &out.ImagePullSecrets[i], s); err != nil {
return err
}
}
} else {
out.ImagePullSecrets = nil
}
out.Hostname = in.Hostname
out.Subdomain = in.Subdomain
// carry conversion
out.DeprecatedHost = in.NodeName
return nil
}
func Convert_v1_PodSpec_To_api_PodSpec(in *PodSpec, out *api.PodSpec, s conversion.Scope) error {
SetDefaults_PodSpec(in)
if in.Volumes != nil {
out.Volumes = make([]api.Volume, len(in.Volumes))
for i := range in.Volumes {
if err := Convert_v1_Volume_To_api_Volume(&in.Volumes[i], &out.Volumes[i], s); err != nil {
return err
}
}
} else {
out.Volumes = nil
}
if in.Containers != nil {
out.Containers = make([]api.Container, len(in.Containers))
for i := range in.Containers {
if err := Convert_v1_Container_To_api_Container(&in.Containers[i], &out.Containers[i], s); err != nil {
return err
}
}
} else {
out.Containers = nil
}
out.RestartPolicy = api.RestartPolicy(in.RestartPolicy)
if in.TerminationGracePeriodSeconds != nil {
out.TerminationGracePeriodSeconds = new(int64)
*out.TerminationGracePeriodSeconds = *in.TerminationGracePeriodSeconds
} else {
out.TerminationGracePeriodSeconds = nil
}
if in.ActiveDeadlineSeconds != nil {
out.ActiveDeadlineSeconds = new(int64)
*out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds
} else {
out.ActiveDeadlineSeconds = nil
}
out.DNSPolicy = api.DNSPolicy(in.DNSPolicy)
if in.NodeSelector != nil {
out.NodeSelector = make(map[string]string)
for key, val := range in.NodeSelector {
out.NodeSelector[key] = val
}
} else {
out.NodeSelector = nil
}
// We support DeprecatedServiceAccount as an alias for ServiceAccountName.
// If both are specified, ServiceAccountName (the new field) wins.
out.ServiceAccountName = in.ServiceAccountName
if in.ServiceAccountName == "" {
out.ServiceAccountName = in.DeprecatedServiceAccount
}
out.NodeName = in.NodeName
// carry conversion
if in.NodeName == "" {
out.NodeName = in.DeprecatedHost
}
if in.SecurityContext != nil {
out.SecurityContext = new(api.PodSecurityContext)
if err := Convert_v1_PodSecurityContext_To_api_PodSecurityContext(in.SecurityContext, out.SecurityContext, s); err != nil {
return err
}
}
// the host namespace fields have to be handled specially for backward compatibility
// with v1.0.0
if out.SecurityContext == nil {
out.SecurityContext = new(api.PodSecurityContext)
}
out.SecurityContext.HostNetwork = in.HostNetwork
out.SecurityContext.HostPID = in.HostPID
out.SecurityContext.HostIPC = in.HostIPC
if in.ImagePullSecrets != nil {
out.ImagePullSecrets = make([]api.LocalObjectReference, len(in.ImagePullSecrets))
for i := range in.ImagePullSecrets {
if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.ImagePullSecrets[i], &out.ImagePullSecrets[i], s); err != nil {
return err
}
}
} else {
out.ImagePullSecrets = nil
}
out.Hostname = in.Hostname
out.Subdomain = in.Subdomain
return nil
}
func Convert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) error {
if err := autoConvert_api_Pod_To_v1_Pod(in, out, s); err != nil {
return err
}
// We need to reset certain fields for mirror pods from pre-v1.1 kubelet
// (#15960).
// TODO: Remove this code after we drop support for v1.0 kubelets.
if value, ok := in.Annotations[mirrorAnnotationKey]; ok && value == mirrorAnnotationValue_1_0 {
// Reset the TerminationGracePeriodSeconds.
out.Spec.TerminationGracePeriodSeconds = nil
// Reset the resource requests.
for i := range out.Spec.Containers {
out.Spec.Containers[i].Resources.Requests = nil
}
}
return nil
}
func Convert_v1_Pod_To_api_Pod(in *Pod, out *api.Pod, s conversion.Scope) error {
return autoConvert_v1_Pod_To_api_Pod(in, out, s)
}
func Convert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *ServiceSpec, s conversion.Scope) error {
if err := autoConvert_api_ServiceSpec_To_v1_ServiceSpec(in, out, s); err != nil {
return err
}
// Publish both externalIPs and deprecatedPublicIPs fields in v1.
for _, ip := range in.ExternalIPs {
out.DeprecatedPublicIPs = append(out.DeprecatedPublicIPs, ip)
}
// Carry conversion
out.DeprecatedPortalIP = in.ClusterIP
return nil
}
func Convert_v1_ServiceSpec_To_api_ServiceSpec(in *ServiceSpec, out *api.ServiceSpec, s conversion.Scope) error {
if err := autoConvert_v1_ServiceSpec_To_api_ServiceSpec(in, out, s); err != nil {
return err
}
// Prefer the legacy deprecatedPublicIPs field, if provided.
if len(in.DeprecatedPublicIPs) > 0 {
out.ExternalIPs = nil
for _, ip := range in.DeprecatedPublicIPs {
out.ExternalIPs = append(out.ExternalIPs, ip)
}
}
return nil
}
func Convert_api_PodSecurityContext_To_v1_PodSecurityContext(in *api.PodSecurityContext, out *PodSecurityContext, s conversion.Scope) error {
out.SupplementalGroups = in.SupplementalGroups
if in.SELinuxOptions != nil {
out.SELinuxOptions = new(SELinuxOptions)
if err := Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil {
return err
}
} else {
out.SELinuxOptions = nil
}
if in.RunAsUser != nil {
out.RunAsUser = new(int64)
*out.RunAsUser = *in.RunAsUser
} else {
out.RunAsUser = nil
}
if in.RunAsNonRoot != nil {
out.RunAsNonRoot = new(bool)
*out.RunAsNonRoot = *in.RunAsNonRoot
} else {
out.RunAsNonRoot = nil
}
if in.FSGroup != nil {
out.FSGroup = new(int64)
*out.FSGroup = *in.FSGroup
} else {
out.FSGroup = nil
}
return nil
}
func Convert_v1_PodSecurityContext_To_api_PodSecurityContext(in *PodSecurityContext, out *api.PodSecurityContext, s conversion.Scope) error {
out.SupplementalGroups = in.SupplementalGroups
if in.SELinuxOptions != nil {
out.SELinuxOptions = new(api.SELinuxOptions)
if err := Convert_v1_SELinuxOptions_To_api_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil {
return err
}
} else {
out.SELinuxOptions = nil
}
if in.RunAsUser != nil {
out.RunAsUser = new(int64)
*out.RunAsUser = *in.RunAsUser
} else {
out.RunAsUser = nil
}
if in.RunAsNonRoot != nil {
out.RunAsNonRoot = new(bool)
*out.RunAsNonRoot = *in.RunAsNonRoot
} else {
out.RunAsNonRoot = nil
}
if in.FSGroup != nil {
out.FSGroup = new(int64)
*out.FSGroup = *in.FSGroup
} else {
out.FSGroup = nil
}
return nil
}
func Convert_v1_ResourceList_To_api_ResourceList(in *ResourceList, out *api.ResourceList, s conversion.Scope) error {
if *in == nil {
return nil
}
converted := make(api.ResourceList)
for key, val := range *in {
value := val.Copy()
// TODO(#18538): We round up resource values to milli scale to maintain API compatibility.
// In the future, we should instead reject values that need rounding.
const milliScale = 3
value.Amount.Round(value.Amount, milliScale, inf.RoundUp)
converted[api.ResourceName(key)] = *value
}
*out = converted
return nil
}
// This will Convert our internal represantation of VolumeSource to its v1 representation
// Used for keeping backwards compatibility for the Metadata field
func Convert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *VolumeSource, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.VolumeSource))(in)
}
if err := s.DefaultConvert(in, out, conversion.IgnoreMissingFields); err != nil {
return err
}
if in.DownwardAPI != nil {
out.Metadata = new(MetadataVolumeSource)
if err := Convert_api_DownwardAPIVolumeSource_To_v1_MetadataVolumeSource(in.DownwardAPI, out.Metadata, s); err != nil {
return err
}
}
return nil
}
// downward -> metadata (api -> v1)
func Convert_api_DownwardAPIVolumeSource_To_v1_MetadataVolumeSource(in *api.DownwardAPIVolumeSource, out *MetadataVolumeSource, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.DownwardAPIVolumeSource))(in)
}
if in.Items != nil {
out.Items = make([]MetadataFile, len(in.Items))
for i := range in.Items {
if err := Convert_api_DownwardAPIVolumeFile_To_v1_MetadataFile(&in.Items[i], &out.Items[i], s); err != nil {
return err
}
}
}
return nil
}
func Convert_api_DownwardAPIVolumeFile_To_v1_MetadataFile(in *api.DownwardAPIVolumeFile, out *MetadataFile, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.DownwardAPIVolumeFile))(in)
}
out.Name = in.Path
if err := Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(&in.FieldRef, &out.FieldRef, s); err != nil {
return err
}
return nil
}
// This will Convert the v1 representation of VolumeSource to our internal representation
// Used for keeping backwards compatibility for the Metadata field
func Convert_v1_VolumeSource_To_api_VolumeSource(in *VolumeSource, out *api.VolumeSource, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*VolumeSource))(in)
}
if err := s.DefaultConvert(in, out, conversion.IgnoreMissingFields); err != nil {
return err
}
if in.Metadata != nil {
out.DownwardAPI = new(api.DownwardAPIVolumeSource)
if err := Convert_v1_MetadataVolumeSource_To_api_DownwardAPIVolumeSource(in.Metadata, out.DownwardAPI, s); err != nil {
return err
}
}
return nil
}
// metadata -> downward (v1 -> api)
func Convert_v1_MetadataVolumeSource_To_api_DownwardAPIVolumeSource(in *MetadataVolumeSource, out *api.DownwardAPIVolumeSource, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*MetadataVolumeSource))(in)
}
if in.Items != nil {
out.Items = make([]api.DownwardAPIVolumeFile, len(in.Items))
for i := range in.Items {
if err := Convert_v1_MetadataFile_To_api_DownwardAPIVolumeFile(&in.Items[i], &out.Items[i], s); err != nil {
return err
}
}
}
return nil
}
func Convert_v1_MetadataFile_To_api_DownwardAPIVolumeFile(in *MetadataFile, out *api.DownwardAPIVolumeFile, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*MetadataFile))(in)
}
out.Path = in.Name
if err := Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(&in.FieldRef, &out.FieldRef, s); err != nil {
return err
}
return nil
}
func Convert_v1_SecurityContextConstraints_To_api_SecurityContextConstraints(in *SecurityContextConstraints, out *api.SecurityContextConstraints, s conversion.Scope) error {
return autoConvert_v1_SecurityContextConstraints_To_api_SecurityContextConstraints(in, out, s)
}
func Convert_api_SecurityContextConstraints_To_v1_SecurityContextConstraints(in *api.SecurityContextConstraints, out *SecurityContextConstraints, s conversion.Scope) error {
if err := autoConvert_api_SecurityContextConstraints_To_v1_SecurityContextConstraints(in, out, s); err != nil {
return err
}
if in.Volumes != nil {
for _, v := range in.Volumes {
// set the Allow* fields based on the existence in the volume slice
switch v {
case api.FSTypeHostPath, api.FSTypeAll:
out.AllowHostDirVolumePlugin = true
}
}
}
return nil
}
| apache-2.0 |
smmribeiro/intellij-community | platform/util/testSrc/com/intellij/ui/MixedColorProducerTest.java | 4269 | // Copyright 2000-2021 JetBrains s.r.o. and contributors. Use of this source code is governed by the Apache 2.0 license.
package com.intellij.ui;
import org.jetbrains.annotations.NotNull;
import org.junit.Assert;
import org.junit.Test;
import java.awt.*;
public final class MixedColorProducerTest {
@Test
public void checkFirstColorInstance() {
Assert.assertSame(Color.BLACK, getBlackWhite(0).get());
Assert.assertSame(Color.WHITE, getWhiteBlack(0).get());
}
@Test
public void checkSecondColorInstance() {
Assert.assertSame(Color.WHITE, getBlackWhite(1).get());
Assert.assertSame(Color.BLACK, getWhiteBlack(1).get());
}
@Test
public void checkCachedColorInstance() {
MixedColorProducer producer = getTransparentRed(.999);
Color color = producer.get();
producer.setMixer(.999);
Assert.assertEquals(color, producer.get());
Assert.assertSame(color, producer.get());
producer.setMixer(.9999);
Assert.assertEquals(color, producer.get());
Assert.assertNotSame(color, producer.get());
}
private static void testInvalidValue(double mixer) {
try {
getTransparentRed(mixer);
Assert.fail("invalid value: " + mixer);
}
catch (IllegalArgumentException ignore) {
}
}
@Test
public void testMinNegativeValue() {
testInvalidValue(-Double.MIN_VALUE);
}
@Test
public void testMaxNegativeValue() {
testInvalidValue(-Double.MAX_VALUE);
}
@Test
public void testMaxPositiveValue() {
testInvalidValue(Double.MAX_VALUE);
}
@Test
public void testNegativeInfinity() {
testInvalidValue(Double.NEGATIVE_INFINITY);
}
@Test
public void testPositiveInfinity() {
testInvalidValue(Double.POSITIVE_INFINITY);
}
@Test
public void testNaN() {
testInvalidValue(Double.NaN);
}
@NotNull
private static MixedColorProducer getBlackWhite(double mixer) {
return new MixedColorProducer(Color.BLACK, Color.WHITE, mixer);
}
@Test
public void testBlackWhite25() {
assertColor(getBlackWhite(.25), 0x404040);
}
@Test
public void testBlackWhite50() {
assertColor(getBlackWhite(.50), 0x808080);
}
@Test
public void testBlackWhite75() {
assertColor(getBlackWhite(.75), 0xBFBFBF);
}
@Test
public void testBlackWhiteAll() {
MixedColorProducer producer = getBlackWhite(0);
for (int i = 0; i <= 0xFF; i++) {
producer.setMixer((float)i / 0xFF);
assertGrayColor(producer, i);
}
}
@NotNull
private static MixedColorProducer getWhiteBlack(double mixer) {
return new MixedColorProducer(Color.WHITE, Color.BLACK, mixer);
}
@Test
public void testWhiteBlack25() {
assertColor(getWhiteBlack(.25), 0xBFBFBF);
}
@Test
public void testWhiteBlack50() {
assertColor(getWhiteBlack(.50), 0x808080);
}
@Test
public void testWhiteBlack75() {
assertColor(getWhiteBlack(.75), 0x404040);
}
@Test
public void testWhiteBlackAll() {
MixedColorProducer producer = getWhiteBlack(0);
for (int i = 0; i <= 0xFF; i++) {
producer.setMixer((float)i / 0xFF);
assertGrayColor(producer, 0xFF - i);
}
}
@NotNull
private static MixedColorProducer getTransparentRed(double mixer) {
return new MixedColorProducer(new Color(0xFF, 0, 0, 0), Color.RED, mixer);
}
@Test
public void testTransparentRed25() {
assertColorWithAlpha(getTransparentRed(.25), 0x40FF0000);
}
@Test
public void testTransparentRed50() {
assertColorWithAlpha(getTransparentRed(.50), 0x80FF0000);
}
@Test
public void testTransparentRed75() {
assertColorWithAlpha(getTransparentRed(.75), 0xBFFF0000);
}
private static void assertColor(@NotNull MixedColorProducer producer, int expected) {
assertColor(producer, new Color(expected, false));
}
private static void assertColorWithAlpha(@NotNull MixedColorProducer producer, int expected) {
assertColor(producer, new Color(expected, true));
}
private static void assertGrayColor(@NotNull MixedColorProducer producer, int expected) {
assertColor(producer, new Color(expected, expected, expected));
}
private static void assertColor(@NotNull MixedColorProducer producer, @NotNull Color expected) {
Assert.assertEquals(expected, producer.get());
}
}
| apache-2.0 |
ilovesoup/hyracks | hyracks/hyracks-hdfs/hyracks-hdfs-core/src/main/java/edu/uci/ics/hyracks/hdfs/api/IKeyValueParser.java | 1862 | /*
* Copyright 2009-2013 by The Regents of the University of California
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* you may obtain a copy of the License from
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package edu.uci.ics.hyracks.hdfs.api;
import edu.uci.ics.hyracks.api.comm.IFrameWriter;
import edu.uci.ics.hyracks.api.exceptions.HyracksDataException;
/**
* Users need to implement this interface to use the HDFSReadOperatorDescriptor.
*
* @param <K>
* the key type
* @param <V>
* the value type
*/
public interface IKeyValueParser<K, V> {
/**
* Initialize the key value parser.
*
* @param writer
* The hyracks writer for outputting data.
* @throws HyracksDataException
*/
public void open(IFrameWriter writer) throws HyracksDataException;
/**
* @param key
* @param value
* @param writer
* @param fileName
* @throws HyracksDataException
*/
public void parse(K key, V value, IFrameWriter writer, String fileString) throws HyracksDataException;
/**
* Flush the residual tuples in the internal buffer to the writer.
* This method is called in the close() of HDFSReadOperatorDescriptor.
*
* @param writer
* The hyracks writer for outputting data.
* @throws HyracksDataException
*/
public void close(IFrameWriter writer) throws HyracksDataException;
}
| apache-2.0 |
knadikari/developer-studio | common/org.wso2.developerstudio.eclipse.artifact.security/src/org/wso2/developerstudio/eclipse/security/project/model/ServiceGroup.java | 5773 | //
// This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, v2.2.4-2
// See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a>
// Any modifications to this file will be lost upon recompilation of the source schema.
// Generated on: 2013.12.11 at 12:17:22 PM IST
//
package org.wso2.developerstudio.eclipse.security.project.model;
import java.util.ArrayList;
import java.util.List;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlElements;
import javax.xml.bind.annotation.XmlRootElement;
import javax.xml.bind.annotation.XmlSchemaType;
import javax.xml.bind.annotation.XmlType;
import javax.xml.bind.annotation.adapters.CollapsedStringAdapter;
import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
/**
* <p>Java class for anonymous complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType>
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <sequence>
* <element ref="{}service" maxOccurs="unbounded"/>
* <choice maxOccurs="unbounded">
* <element ref="{}module" minOccurs="0"/>
* <element ref="{}parameter" minOccurs="0"/>
* </choice>
* </sequence>
* <attribute name="hashValue" type="{http://www.w3.org/2001/XMLSchema}anySimpleType" />
* <attribute name="name" use="required" type="{http://www.w3.org/2001/XMLSchema}NCName" />
* <attribute name="successfullyAdded" use="required" type="{http://www.w3.org/2001/XMLSchema}boolean" />
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "", propOrder = {
"service",
"moduleOrParameter"
})
@XmlRootElement(name = "serviceGroup", namespace = "")
public class ServiceGroup {
@XmlElement(namespace = "", required = true)
protected List<Service> service;
@XmlElements({
@XmlElement(name = "module", namespace = "", type = Module.class),
@XmlElement(name = "parameter", namespace = "", type = Parameter.class)
})
protected List<Object> moduleOrParameter;
@XmlAttribute(name = "hashValue")
@XmlSchemaType(name = "anySimpleType")
protected String hashValue;
@XmlAttribute(name = "name", required = true)
@XmlJavaTypeAdapter(CollapsedStringAdapter.class)
@XmlSchemaType(name = "NCName")
protected String name;
@XmlAttribute(name = "successfullyAdded", required = true)
protected boolean successfullyAdded;
/**
* Gets the value of the service property.
*
* <p>
* This accessor method returns a reference to the live list,
* not a snapshot. Therefore any modification you make to the
* returned list will be present inside the JAXB object.
* This is why there is not a <CODE>set</CODE> method for the service property.
*
* <p>
* For example, to add a new item, do as follows:
* <pre>
* getService().add(newItem);
* </pre>
*
*
* <p>
* Objects of the following type(s) are allowed in the list
* {@link Service }
*
*
*/
public List<Service> getService() {
if (service == null) {
service = new ArrayList<Service>();
}
return this.service;
}
/**
* Gets the value of the moduleOrParameter property.
*
* <p>
* This accessor method returns a reference to the live list,
* not a snapshot. Therefore any modification you make to the
* returned list will be present inside the JAXB object.
* This is why there is not a <CODE>set</CODE> method for the moduleOrParameter property.
*
* <p>
* For example, to add a new item, do as follows:
* <pre>
* getModuleOrParameter().add(newItem);
* </pre>
*
*
* <p>
* Objects of the following type(s) are allowed in the list
* {@link Module }
* {@link Parameter }
*
*
*/
public List<Object> getModuleOrParameter() {
if (moduleOrParameter == null) {
moduleOrParameter = new ArrayList<Object>();
}
return this.moduleOrParameter;
}
/**
* Gets the value of the hashValue property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getHashValue() {
return hashValue;
}
/**
* Sets the value of the hashValue property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setHashValue(String value) {
this.hashValue = value;
}
/**
* Gets the value of the name property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getName() {
return name;
}
/**
* Sets the value of the name property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setName(String value) {
this.name = value;
}
/**
* Gets the value of the successfullyAdded property.
*
*/
public boolean isSuccessfullyAdded() {
return successfullyAdded;
}
/**
* Sets the value of the successfullyAdded property.
*
*/
public void setSuccessfullyAdded(boolean value) {
this.successfullyAdded = value;
}
}
| apache-2.0 |
wiltonlazary/arangodb | tests/js/server/recovery/collection-duplicate.js | 2684 | /* jshint globalstrict:false, strict:false, unused : false */
/* global assertEqual */
// //////////////////////////////////////////////////////////////////////////////
// / @brief tests for dump/reload
// /
// / @file
// /
// / DISCLAIMER
// /
// / Copyright 2010-2012 triagens GmbH, Cologne, Germany
// /
// / Licensed under the Apache License, Version 2.0 (the "License")
// / you may not use this file except in compliance with the License.
// / You may obtain a copy of the License at
// /
// / http://www.apache.org/licenses/LICENSE-2.0
// /
// / Unless required by applicable law or agreed to in writing, software
// / distributed under the License is distributed on an "AS IS" BASIS,
// / WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// / See the License for the specific language governing permissions and
// / limitations under the License.
// /
// / Copyright holder is triAGENS GmbH, Cologne, Germany
// /
// / @author Jan Steemann
// / @author Copyright 2012, triAGENS GmbH, Cologne, Germany
// //////////////////////////////////////////////////////////////////////////////
var db = require('@arangodb').db;
var internal = require('internal');
var jsunity = require('jsunity');
function runSetup () {
'use strict';
internal.debugClearFailAt();
db._drop('UnitTestsRecovery');
var c = db._create('UnitTestsRecovery');
// try to re-create collection with the same name
try {
db._create('UnitTestsRecovery');
} catch (err) {
}
c.save({ _key: 'foo' }, true);
internal.debugTerminate('crashing server');
}
// //////////////////////////////////////////////////////////////////////////////
// / @brief test suite
// //////////////////////////////////////////////////////////////////////////////
function recoverySuite () {
'use strict';
jsunity.jsUnity.attachAssertions();
return {
setUp: function () {},
tearDown: function () {},
// //////////////////////////////////////////////////////////////////////////////
// / @brief test whether we can restore the trx data
// //////////////////////////////////////////////////////////////////////////////
testCollectionDuplicate: function () {
var c = db._collection('UnitTestsRecovery');
assertEqual(1, c.count());
}
};
}
// //////////////////////////////////////////////////////////////////////////////
// / @brief executes the test suite
// //////////////////////////////////////////////////////////////////////////////
function main (argv) {
'use strict';
if (argv[1] === 'setup') {
runSetup();
return 0;
} else {
jsunity.run(recoverySuite);
return jsunity.writeDone().status ? 0 : 1;
}
}
| apache-2.0 |
mehant/drill | exec/java-exec/src/main/java/org/apache/drill/exec/store/avro/AvroRecordReader.java | 13522 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.drill.exec.store.avro;
import io.netty.buffer.DrillBuf;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.List;
import java.util.Map.Entry;
import java.util.concurrent.TimeUnit;
import java.security.PrivilegedExceptionAction;
import org.apache.avro.Schema;
import org.apache.avro.Schema.Type;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.generic.GenericArray;
import org.apache.avro.generic.GenericContainer;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.mapred.FsInput;
import org.apache.avro.util.Utf8;
import org.apache.drill.common.exceptions.DrillRuntimeException;
import org.apache.drill.common.exceptions.ExecutionSetupException;
import org.apache.drill.common.expression.PathSegment;
import org.apache.drill.common.expression.SchemaPath;
import org.apache.drill.exec.expr.holders.BigIntHolder;
import org.apache.drill.exec.expr.holders.BitHolder;
import org.apache.drill.exec.expr.holders.Float4Holder;
import org.apache.drill.exec.expr.holders.Float8Holder;
import org.apache.drill.exec.expr.holders.IntHolder;
import org.apache.drill.exec.expr.holders.VarBinaryHolder;
import org.apache.drill.exec.expr.holders.VarCharHolder;
import org.apache.drill.exec.ops.FragmentContext;
import org.apache.drill.exec.ops.OperatorContext;
import org.apache.drill.exec.physical.impl.OutputMutator;
import org.apache.drill.exec.store.AbstractRecordReader;
import org.apache.drill.exec.store.RecordReader;
import org.apache.drill.exec.util.ImpersonationUtil;
import org.apache.drill.exec.vector.complex.impl.VectorContainerWriter;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Charsets;
import com.google.common.base.Stopwatch;
import org.apache.hadoop.security.UserGroupInformation;
/**
* A RecordReader implementation for Avro data files.
*
* @see RecordReader
*/
public class AvroRecordReader extends AbstractRecordReader {
static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(AvroRecordReader.class);
private final Path hadoop;
private final long start;
private final long end;
private DrillBuf buffer;
private VectorContainerWriter writer;
private DataFileReader<GenericContainer> reader = null;
private OperatorContext operatorContext;
private FileSystem fs;
private final String opUserName;
private final String queryUserName;
private static final int DEFAULT_BATCH_SIZE = 1000;
public AvroRecordReader(final FragmentContext fragmentContext,
final String inputPath,
final long start,
final long length,
final FileSystem fileSystem,
final List<SchemaPath> projectedColumns,
final String userName) {
this(fragmentContext, inputPath, start, length, fileSystem, projectedColumns, userName, DEFAULT_BATCH_SIZE);
}
public AvroRecordReader(final FragmentContext fragmentContext,
final String inputPath,
final long start,
final long length,
final FileSystem fileSystem,
List<SchemaPath> projectedColumns,
final String userName,
final int defaultBatchSize) {
hadoop = new Path(inputPath);
this.start = start;
this.end = start + length;
buffer = fragmentContext.getManagedBuffer();
this.fs = fileSystem;
this.opUserName = userName;
this.queryUserName = fragmentContext.getQueryUserName();
setColumns(projectedColumns);
}
private DataFileReader getReader(final Path hadoop, final FileSystem fs) throws ExecutionSetupException {
try {
final UserGroupInformation ugi = ImpersonationUtil.createProxyUgi(this.opUserName, this.queryUserName);
return ugi.doAs(new PrivilegedExceptionAction<DataFileReader>() {
@Override
public DataFileReader run() throws Exception {
return new DataFileReader<>(new FsInput(hadoop, fs.getConf()), new GenericDatumReader<GenericContainer>());
}
});
} catch (IOException | InterruptedException e) {
throw new ExecutionSetupException(
String.format("Error in creating avro reader for file: %s", hadoop), e);
}
}
@Override
public void setup(final OperatorContext context, final OutputMutator output) throws ExecutionSetupException {
operatorContext = context;
writer = new VectorContainerWriter(output);
try {
reader = getReader(hadoop, fs);
logger.debug("Processing file : {}, start position : {}, end position : {} ", hadoop, start, end);
reader.sync(this.start);
} catch (IOException e) {
throw new ExecutionSetupException(e);
}
}
@Override
public int next() {
final Stopwatch watch = new Stopwatch().start();
if (reader == null) {
throw new IllegalStateException("Avro reader is not open.");
}
if (!reader.hasNext()) {
return 0;
}
int recordCount = 0;
writer.allocate();
writer.reset();
try {
// XXX - Implement batch size
for (GenericContainer container = null; reader.hasNext() && !reader.pastSync(end); recordCount++) {
writer.setPosition(recordCount);
container = reader.next(container);
processRecord(container, container.getSchema());
}
writer.setValueCount(recordCount);
} catch (IOException e) {
throw new DrillRuntimeException(e);
}
logger.debug("Read {} records in {} ms", recordCount, watch.elapsed(TimeUnit.MILLISECONDS));
return recordCount;
}
private void processRecord(final GenericContainer container, final Schema schema) {
final Schema.Type type = schema.getType();
switch (type) {
case RECORD:
process(container, schema, null, new MapOrListWriter(writer.rootAsMap()));
break;
default:
throw new DrillRuntimeException("Root object must be record type. Found: " + type);
}
}
private void process(final Object value, final Schema schema, final String fieldName, MapOrListWriter writer) {
if (value == null) {
return;
}
final Schema.Type type = schema.getType();
switch (type) {
case RECORD:
// list field of MapOrListWriter will be non null when we want to store array of maps/records.
MapOrListWriter _writer = writer;
for (final Schema.Field field : schema.getFields()) {
if (field.schema().getType() == Schema.Type.RECORD ||
(field.schema().getType() == Schema.Type.UNION &&
field.schema().getTypes().get(0).getType() == Schema.Type.NULL &&
field.schema().getTypes().get(1).getType() == Schema.Type.RECORD)) {
_writer = writer.map(field.name());
}
process(((GenericRecord) value).get(field.name()), field.schema(), field.name(), _writer);
}
break;
case ARRAY:
assert fieldName != null;
final GenericArray array = (GenericArray) value;
Schema elementSchema = array.getSchema().getElementType();
Type elementType = elementSchema.getType();
if (elementType == Schema.Type.RECORD || elementType == Schema.Type.MAP){
writer = writer.list(fieldName).listoftmap(fieldName);
} else {
writer = writer.list(fieldName);
}
writer.start();
for (final Object o : array) {
process(o, elementSchema, fieldName, writer);
}
writer.end();
break;
case UNION:
// currently supporting only nullable union (optional fields) like ["null", "some-type"].
if (schema.getTypes().get(0).getType() != Schema.Type.NULL) {
throw new UnsupportedOperationException("Avro union type must be of the format : [\"null\", \"some-type\"]");
}
process(value, schema.getTypes().get(1), fieldName, writer);
break;
case MAP:
@SuppressWarnings("unchecked")
final HashMap<Object, Object> map = (HashMap<Object, Object>) value;
Schema valueSchema = schema.getValueType();
writer = writer.map(fieldName);
writer.start();
for (Entry<Object, Object> entry : map.entrySet()) {
process(entry.getValue(), valueSchema, entry.getKey().toString(), writer);
}
writer.end();
break;
case FIXED:
throw new UnsupportedOperationException("Unimplemented type: " + type.toString());
case ENUM: // Enum symbols are strings
case NULL: // Treat null type as a primitive
default:
assert fieldName != null;
if (writer.isMapWriter()) {
SchemaPath path;
if (writer.map.getField().getPath().getRootSegment().getPath().equals("")) {
path = new SchemaPath(new PathSegment.NameSegment(fieldName));
} else {
path = writer.map.getField().getPath().getChild(fieldName);
}
if (!selected(path)) {
break;
}
}
processPrimitive(value, schema.getType(), fieldName, writer);
break;
}
}
private void processPrimitive(final Object value, final Schema.Type type, final String fieldName,
final MapOrListWriter writer) {
if (value == null) {
return;
}
switch (type) {
case STRING:
byte[] binary = null;
if (value instanceof Utf8) {
binary = ((Utf8) value).getBytes();
} else {
binary = value.toString().getBytes(Charsets.UTF_8);
}
final int length = binary.length;
final VarCharHolder vh = new VarCharHolder();
ensure(length);
buffer.setBytes(0, binary);
vh.buffer = buffer;
vh.start = 0;
vh.end = length;
writer.varChar(fieldName).write(vh);
break;
case INT:
final IntHolder ih = new IntHolder();
ih.value = (Integer) value;
writer.integer(fieldName).write(ih);
break;
case LONG:
final BigIntHolder bh = new BigIntHolder();
bh.value = (Long) value;
writer.bigInt(fieldName).write(bh);
break;
case FLOAT:
final Float4Holder fh = new Float4Holder();
fh.value = (Float) value;
writer.float4(fieldName).write(fh);
break;
case DOUBLE:
final Float8Holder f8h = new Float8Holder();
f8h.value = (Double) value;
writer.float8(fieldName).write(f8h);
break;
case BOOLEAN:
final BitHolder bit = new BitHolder();
bit.value = (Boolean) value ? 1 : 0;
writer.bit(fieldName).write(bit);
break;
case BYTES:
// XXX - Not sure if this is correct. Nothing prints from sqlline for byte fields.
final VarBinaryHolder vb = new VarBinaryHolder();
final ByteBuffer buf = (ByteBuffer) value;
final byte[] bytes = buf.array();
ensure(bytes.length);
buffer.setBytes(0, bytes);
vb.buffer = buffer;
vb.start = 0;
vb.end = bytes.length;
writer.binary(fieldName).write(vb);
break;
case NULL:
// Nothing to do for null type
break;
case ENUM:
final String symbol = value.toString();
final byte[] b;
try {
b = symbol.getBytes("UTF-8");
} catch (UnsupportedEncodingException e) {
throw new DrillRuntimeException("Unable to read enum value for field: " + fieldName, e);
}
final VarCharHolder vch = new VarCharHolder();
ensure(b.length);
buffer.setBytes(0, b);
vch.buffer = buffer;
vch.start = 0;
vch.end = b.length;
writer.varChar(fieldName).write(vch);
break;
default:
throw new DrillRuntimeException("Unhandled Avro type: " + type.toString());
}
}
private boolean selected(SchemaPath field) {
if (isStarQuery()) {
return true;
}
for (final SchemaPath sp : getColumns()) {
if (sp.contains(field)) {
return true;
}
}
return false;
}
private void ensure(final int length) {
buffer = buffer.reallocIfNeeded(length);
}
@Override
public void close() {
if (reader != null) {
try {
reader.close();
} catch (IOException e) {
logger.warn("Error closing Avro reader", e);
} finally {
reader = null;
}
}
}
}
| apache-2.0 |
nishantmonu51/druid | server/src/main/java/org/apache/druid/guice/RouterProcessingModule.java | 3537 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.druid.guice;
import com.google.inject.Binder;
import com.google.inject.Module;
import com.google.inject.Provides;
import org.apache.druid.collections.BlockingPool;
import org.apache.druid.collections.DummyBlockingPool;
import org.apache.druid.collections.DummyNonBlockingPool;
import org.apache.druid.collections.NonBlockingPool;
import org.apache.druid.guice.annotations.Global;
import org.apache.druid.guice.annotations.Merging;
import org.apache.druid.java.util.common.concurrent.Execs;
import org.apache.druid.java.util.common.concurrent.ExecutorServiceConfig;
import org.apache.druid.java.util.common.logger.Logger;
import org.apache.druid.query.DruidProcessingConfig;
import org.apache.druid.query.ExecutorServiceMonitor;
import org.apache.druid.query.ForwardingQueryProcessingPool;
import org.apache.druid.query.QueryProcessingPool;
import org.apache.druid.server.metrics.MetricsModule;
import java.nio.ByteBuffer;
/**
* This module is used to fulfill dependency injection of query processing and caching resources: buffer pools and
* thread pools on Router Druid node type. Router needs to inject those resources, because it depends on
* {@link org.apache.druid.query.QueryToolChest}s, and they couple query type aspects not related to processing and
* caching, which Router uses, and related to processing and caching, which Router doesn't use, but they inject the
* resources.
*/
public class RouterProcessingModule implements Module
{
private static final Logger log = new Logger(RouterProcessingModule.class);
@Override
public void configure(Binder binder)
{
binder.bind(ExecutorServiceConfig.class).to(DruidProcessingConfig.class);
MetricsModule.register(binder, ExecutorServiceMonitor.class);
}
@Provides
@ManageLifecycle
public QueryProcessingPool getProcessingExecutorPool(DruidProcessingConfig config)
{
if (config.getNumThreadsConfigured() != ExecutorServiceConfig.DEFAULT_NUM_THREADS) {
log.error("numThreads[%d] configured, that is ignored on Router", config.getNumThreadsConfigured());
}
return new ForwardingQueryProcessingPool(Execs.dummy());
}
@Provides
@LazySingleton
@Global
public NonBlockingPool<ByteBuffer> getIntermediateResultsPool()
{
return DummyNonBlockingPool.instance();
}
@Provides
@LazySingleton
@Merging
public BlockingPool<ByteBuffer> getMergeBufferPool(DruidProcessingConfig config)
{
if (config.getNumMergeBuffersConfigured() != DruidProcessingConfig.DEFAULT_NUM_MERGE_BUFFERS) {
log.error(
"numMergeBuffers[%d] configured, that is ignored on Router",
config.getNumMergeBuffersConfigured()
);
}
return DummyBlockingPool.instance();
}
}
| apache-2.0 |
arenadata/ambari | contrib/views/wfmanager/src/main/java/org/apache/oozie/ambari/view/assets/AssetResource.java | 8219 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.oozie.ambari.view.assets;
import com.google.gson.Gson;
import com.google.gson.JsonElement;
import com.google.gson.JsonParser;
import org.apache.ambari.view.ViewContext;
import org.apache.oozie.ambari.view.*;
import org.apache.oozie.ambari.view.assets.model.ActionAsset;
import org.apache.oozie.ambari.view.assets.model.ActionAssetDefinition;
import org.apache.oozie.ambari.view.assets.model.AssetDefintion;
import org.apache.oozie.ambari.view.exception.ErrorCode;
import org.apache.oozie.ambari.view.exception.WfmException;
import org.apache.oozie.ambari.view.exception.WfmWebException;
import org.apache.oozie.ambari.view.model.APIResult;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.ws.rs.*;
import javax.ws.rs.core.*;
import java.io.IOException;
import java.util.*;
import static org.apache.oozie.ambari.view.Constants.*;
public class AssetResource {
private final static Logger LOGGER = LoggerFactory
.getLogger(AssetResource.class);
private final AssetService assetService;
private final ViewContext viewContext;
private final HDFSFileUtils hdfsFileUtils;
private final OozieUtils oozieUtils = new OozieUtils();
private final OozieDelegate oozieDelegate;
public AssetResource(ViewContext viewContext) {
this.viewContext = viewContext;
this.assetService = new AssetService(viewContext);
hdfsFileUtils = new HDFSFileUtils(viewContext);
oozieDelegate = new OozieDelegate(viewContext);
}
@GET
public Response getAssets() {
try {
Collection<ActionAsset> assets = assetService.getAssets();
APIResult result = new APIResult();
result.setStatus(APIResult.Status.SUCCESS);
result.getPaging().setTotal(assets != null ? assets.size() : 0L);
result.setData(assets);
return Response.ok(result).build();
} catch (Exception ex) {
LOGGER.error(ex.getMessage(),ex);
throw new WfmWebException(ex);
}
}
@GET
@Path("/mine")
public Response getMyAssets() {
try {
Collection<ActionAsset> assets = assetService.getMyAssets();
APIResult result = new APIResult();
result.setStatus(APIResult.Status.SUCCESS);
result.getPaging().setTotal(assets != null ? assets.size() : 0L);
result.setData(assets);
return Response.ok(result).build();
} catch (Exception ex) {
LOGGER.error(ex.getMessage(),ex);
throw new WfmWebException(ex);
}
}
@POST
public Response saveAsset(@Context HttpHeaders headers,
@QueryParam("id") String id, @Context UriInfo ui, String body) {
try {
Gson gson = new Gson();
AssetDefintion assetDefinition = gson.fromJson(body, AssetDefintion.class);
Map<String, String> validateAsset = validateAsset(headers,
assetDefinition.getDefinition(), ui.getQueryParameters());
if (!STATUS_OK.equals(validateAsset.get(STATUS_KEY))) {
throw new WfmWebException(ErrorCode.ASSET_INVALID_FROM_OOZIE);
}
assetService.saveAsset(id, viewContext.getUsername(), assetDefinition);
APIResult result = new APIResult();
result.setStatus(APIResult.Status.SUCCESS);
return Response.ok(result).build();
} catch (WfmWebException ex) {
LOGGER.error(ex.getMessage(),ex);
throw ex;
} catch (Exception ex) {
LOGGER.error(ex.getMessage(),ex);
throw new WfmWebException(ex);
}
}
private List<String> getAsList(String string) {
ArrayList<String> li = new ArrayList<>(1);
li.add(string);
return li;
}
public Map<String, String> validateAsset(HttpHeaders headers,
String postBody, MultivaluedMap<String, String> queryParams) {
String workflowXml = oozieUtils.generateWorkflowXml(postBody);
Map<String, String> result = new HashMap<>();
String tempWfPath = "/tmp" + "/tmpooziewfs/tempwf_" + Math.round(Math.random() * 100000) + ".xml";
try {
hdfsFileUtils.writeToFile(tempWfPath, workflowXml, true);
} catch (IOException ex) {
LOGGER.error(ex.getMessage(),ex);
throw new WfmWebException(ex, ErrorCode.FILE_ACCESS_UNKNOWN_ERROR);
}
queryParams.put("oozieparam.action", getAsList("dryrun"));
queryParams.put("oozieconfig.rerunOnFailure", getAsList("false"));
queryParams.put("oozieconfig.useSystemLibPath", getAsList("true"));
queryParams.put("resourceManager", getAsList("useDefault"));
String dryRunResp = oozieDelegate.submitWorkflowJobToOozie(headers,
tempWfPath, queryParams, JobType.WORKFLOW);
LOGGER.info(String.format("resp from validating asset=[%s]", dryRunResp));
try {
hdfsFileUtils.deleteFile(tempWfPath);
} catch (IOException ex) {
LOGGER.error(ex.getMessage(),ex);
throw new WfmWebException(ex, ErrorCode.FILE_ACCESS_UNKNOWN_ERROR);
}
if (dryRunResp != null && dryRunResp.trim().startsWith("{")) {
JsonElement jsonElement = new JsonParser().parse(dryRunResp);
JsonElement idElem = jsonElement.getAsJsonObject().get("id");
if (idElem != null) {
result.put(STATUS_KEY, STATUS_OK);
} else {
result.put(STATUS_KEY, STATUS_FAILED);
result.put(MESSAGE_KEY, dryRunResp);
}
} else {
result.put(STATUS_KEY, STATUS_FAILED);
result.put(MESSAGE_KEY, dryRunResp);
}
return result;
}
@GET
@Path("/assetNameAvailable")
public Response assetNameAvailable(@QueryParam("name") String name){
try {
boolean available = assetService.isAssetNameAvailable(name);
return Response.ok(available).build();
}catch (Exception ex){
LOGGER.error(ex.getMessage(),ex);
throw new WfmWebException(ex);
}
}
@GET
@Path("/{id}")
public Response getAssetDetail(@PathParam("id") String id) {
try {
AssetDefintion assetDefinition = assetService.getAssetDetail(id);
APIResult result = new APIResult();
result.setStatus(APIResult.Status.SUCCESS);
result.setData(assetDefinition);
return Response.ok(result).build();
} catch (Exception ex) {
LOGGER.error(ex.getMessage(),ex);
throw new WfmWebException(ex);
}
}
@GET
@Path("/definition/id}")
public Response getAssetDefinition(@PathParam("defnitionId") String id) {
try {
ActionAssetDefinition assetDefinition = assetService.getAssetDefinition(id);
APIResult result = new APIResult();
result.setStatus(APIResult.Status.SUCCESS);
result.setData(assetDefinition);
return Response.ok(result).build();
} catch (Exception ex) {
LOGGER.error(ex.getMessage(),ex);
throw new WfmWebException(ex);
}
}
@DELETE
@Path("/{id}")
public Response delete(@PathParam("id") String id) {
try {
ActionAsset asset = assetService.getAsset(id);
if (asset == null) {
throw new WfmWebException(ErrorCode.ASSET_NOT_EXIST);
}
if (!viewContext.getUsername().equals(asset.getOwner())){
throw new WfmWebException(ErrorCode.PERMISSION_ERROR);
}
assetService.deleteAsset(id);
APIResult result = new APIResult();
result.setStatus(APIResult.Status.SUCCESS);
return Response.ok(result).build();
} catch (WfmWebException ex) {
LOGGER.error(ex.getMessage(),ex);
throw ex;
} catch (Exception ex) {
LOGGER.error(ex.getMessage(),ex);
throw new WfmWebException(ex);
}
}
}
| apache-2.0 |
centic9/subversion-ppa | subversion/bindings/javahl/native/org_apache_subversion_javahl_types_Version.cpp | 2343 | /**
* @copyright
* ====================================================================
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
* ====================================================================
* @endcopyright
*
* @file org_apache_subversion_javahl_types_Version.cpp
* @brief Implementation of the native methods in the Java class Version.
*/
#include "../include/org_apache_subversion_javahl_types_Version.h"
#include "JNIStackElement.h"
#include "svn_version.h"
JNIEXPORT jint JNICALL
Java_org_apache_subversion_javahl_types_Version_getMajor(JNIEnv *env,
jobject jthis)
{
JNIEntry(Version, getMajor);
return SVN_VER_MAJOR;
}
JNIEXPORT jint JNICALL
Java_org_apache_subversion_javahl_types_Version_getMinor(JNIEnv *env,
jobject jthis)
{
JNIEntry(Version, getMinor);
return SVN_VER_MINOR;
}
JNIEXPORT jint JNICALL
Java_org_apache_subversion_javahl_types_Version_getPatch(JNIEnv *env,
jobject jthis)
{
JNIEntry(Version, getPatch);
return SVN_VER_PATCH;
}
JNIEXPORT jstring JNICALL
Java_org_apache_subversion_javahl_types_Version_getTag(JNIEnv *env,
jobject jthis)
{
JNIEntry(Version, getTag);
jstring tag = JNIUtil::makeJString(SVN_VER_TAG);
if (JNIUtil::isJavaExceptionThrown())
return NULL;
return tag;
}
JNIEXPORT jstring JNICALL
Java_org_apache_subversion_javahl_types_Version_getNumberTag(JNIEnv *env,
jobject jthis)
{
JNIEntry(Version, getNumberTag);
jstring numtag = JNIUtil::makeJString(SVN_VER_NUMTAG);
if (JNIUtil::isJavaExceptionThrown())
return NULL;
return numtag;
}
| apache-2.0 |
chibenwa/james | protocols/protocols-smtp/src/main/java/org/apache/james/smtpserver/netty/SMTPChannelUpstreamHandler.java | 2858 | /****************************************************************
* Licensed to the Apache Software Foundation (ASF) under one *
* or more contributor license agreements. See the NOTICE file *
* distributed with this work for additional information *
* regarding copyright ownership. The ASF licenses this file *
* to you under the Apache License, Version 2.0 (the *
* "License"); you may not use this file except in compliance *
* with the License. You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, *
* software distributed under the License is distributed on an *
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
* KIND, either express or implied. See the License for the *
* specific language governing permissions and limitations *
* under the License. *
****************************************************************/
package org.apache.james.smtpserver.netty;
import org.apache.james.lifecycle.api.LifecycleUtil;
import org.apache.james.protocols.api.Encryption;
import org.apache.james.protocols.api.Protocol;
import org.apache.james.protocols.api.ProtocolSession.State;
import org.apache.james.protocols.netty.BasicChannelUpstreamHandler;
import org.apache.james.protocols.smtp.SMTPSession;
import org.apache.james.smtpserver.SMTPConstants;
import org.jboss.netty.channel.ChannelHandler.Sharable;
import org.jboss.netty.channel.ChannelHandlerContext;
import org.jboss.netty.channel.ChannelUpstreamHandler;
import org.slf4j.Logger;
/**
* {@link ChannelUpstreamHandler} which is used by the SMTPServer
*/
@Sharable
public class SMTPChannelUpstreamHandler extends BasicChannelUpstreamHandler {
public SMTPChannelUpstreamHandler(Protocol protocol, Logger logger, Encryption encryption) {
super(protocol, encryption);
}
public SMTPChannelUpstreamHandler(Protocol protocol, Logger logger) {
super(protocol);
}
/**
* Cleanup temporary files
*
* @param ctx
*/
protected void cleanup(ChannelHandlerContext ctx) {
// Make sure we dispose everything on exit on session close
SMTPSession smtpSession = (SMTPSession) ctx.getAttachment();
if (smtpSession != null) {
LifecycleUtil.dispose(smtpSession.getAttachment(SMTPConstants.MAIL, State.Transaction));
LifecycleUtil.dispose(smtpSession.getAttachment(SMTPConstants.DATA_MIMEMESSAGE_STREAMSOURCE, State.Transaction));
}
super.cleanup(ctx);
}
}
| apache-2.0 |
Vaysman/maven-php-plugin | maven-plugins/it/src/test/resources/org/phpmaven/test/projects/mojos-phar/phar-with-dep1-folders/src/test/php/FooTest.php | 423 | <?php
/**
* The foo test class
*
* @author mepeisen
*/
class FooTest extends PHPUnit_Framework_TestCase
{
/**
* tests the bar function
*/
public function testFoo()
{
include "folderA/MyClassA.php";
$o = new folderA\MyMavenTestClassA();
$this->assertEquals("foo", $o->getFoo());
include "folderB/MyClassB.php";
$o = new folderB\MyMavenTestClassB();
$this->assertEquals("foo", $o->getFoo());
}
} | apache-2.0 |
hasinitg/airavata | modules/credential-store/credential-store-service/src/main/java/org/apache/airavata/credential/store/store/impl/db/CommunityUserDAO.java | 9093 | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.airavata.credential.store.store.impl.db;
import org.apache.airavata.common.utils.DBUtil;
import org.apache.airavata.credential.store.credential.CommunityUser;
import org.apache.airavata.credential.store.store.CredentialStoreException;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
/**
* Data access class for community_user table.
*/
public class CommunityUserDAO extends ParentDAO {
public CommunityUserDAO() {
super();
}
public void addCommunityUser(CommunityUser user, String token, Connection connection)
throws CredentialStoreException {
String sql = "INSERT INTO COMMUNITY_USER VALUES (?, ?, ?, ?)";
PreparedStatement preparedStatement = null;
try {
preparedStatement = connection.prepareStatement(sql);
preparedStatement.setString(1, user.getGatewayName());
preparedStatement.setString(2, user.getUserName());
preparedStatement.setString(3, token);
preparedStatement.setString(4, user.getUserEmail());
preparedStatement.executeUpdate();
connection.commit();
} catch (SQLException e) {
StringBuilder stringBuilder = new StringBuilder("Error persisting community user.");
stringBuilder.append("gateway - ").append(user.getGatewayName());
stringBuilder.append("community user name - ").append(user.getUserName());
stringBuilder.append("community user email - ").append(user.getUserEmail());
stringBuilder.append("token id - ").append(token);
log.error(stringBuilder.toString(), e);
throw new CredentialStoreException(stringBuilder.toString(), e);
} finally {
DBUtil.cleanup(preparedStatement);
}
}
public void deleteCommunityUser(CommunityUser user, Connection connection) throws CredentialStoreException {
String sql = "DELETE FROM COMMUNITY_USER WHERE GATEWAY_ID=? AND COMMUNITY_USER_NAME=?";
PreparedStatement preparedStatement = null;
try {
preparedStatement = connection.prepareStatement(sql);
preparedStatement.setString(1, user.getGatewayName());
preparedStatement.setString(2, user.getUserName());
preparedStatement.executeUpdate();
connection.commit();
} catch (SQLException e) {
StringBuilder stringBuilder = new StringBuilder("Error deleting community user.");
stringBuilder.append("gateway - ").append(user.getGatewayName());
stringBuilder.append("community user name - ").append(user.getUserName());
log.error(stringBuilder.toString(), e);
throw new CredentialStoreException(stringBuilder.toString(), e);
} finally {
DBUtil.cleanup(preparedStatement);
}
}
public void deleteCommunityUserByToken(CommunityUser user, String token, Connection connection)
throws CredentialStoreException {
String sql = "DELETE FROM COMMUNITY_USER WHERE GATEWAY_ID=? AND COMMUNITY_USER_NAME=? AND TOKEN_ID=?";
PreparedStatement preparedStatement = null;
try {
preparedStatement = connection.prepareStatement(sql);
preparedStatement.setString(1, user.getGatewayName());
preparedStatement.setString(2, user.getUserName());
preparedStatement.setString(3, token);
preparedStatement.executeUpdate();
connection.commit();
} catch (SQLException e) {
StringBuilder stringBuilder = new StringBuilder("Error deleting community user.");
stringBuilder.append("gateway - ").append(user.getGatewayName());
stringBuilder.append("community user name - ").append(user.getUserName());
log.error(stringBuilder.toString(), e);
throw new CredentialStoreException(stringBuilder.toString(), e);
} finally {
DBUtil.cleanup(preparedStatement);
}
}
public void updateCommunityUser(CommunityUser user) throws CredentialStoreException {
// TODO
}
public CommunityUser getCommunityUser(String gatewayName, String communityUserName, Connection connection)
throws CredentialStoreException {
String sql = "SELECT * FROM COMMUNITY_USER WHERE GATEWAY_ID=? AND COMMUNITY_USER_NAME=?";
PreparedStatement preparedStatement = null;
try {
preparedStatement = connection.prepareStatement(sql);
preparedStatement.setString(1, gatewayName);
preparedStatement.setString(2, communityUserName);
ResultSet resultSet = preparedStatement.executeQuery();
if (resultSet.next()) {
String email = resultSet.getString("COMMUNITY_USER_EMAIL"); // TODO fix typo
return new CommunityUser(gatewayName, communityUserName, email);
}
} catch (SQLException e) {
StringBuilder stringBuilder = new StringBuilder("Error retrieving community user.");
stringBuilder.append("gateway - ").append(gatewayName);
stringBuilder.append("community user name - ").append(communityUserName);
log.error(stringBuilder.toString(), e);
throw new CredentialStoreException(stringBuilder.toString(), e);
} finally {
DBUtil.cleanup(preparedStatement);
}
return null;
}
public CommunityUser getCommunityUserByToken(String gatewayName, String tokenId, Connection connection)
throws CredentialStoreException {
String sql = "SELECT * FROM COMMUNITY_USER WHERE GATEWAY_ID=? AND TOKEN_ID=?";
PreparedStatement preparedStatement = null;
try {
preparedStatement = connection.prepareStatement(sql);
preparedStatement.setString(1, gatewayName);
preparedStatement.setString(2, tokenId);
ResultSet resultSet = preparedStatement.executeQuery();
if (resultSet.next()) {
String communityUserName = resultSet.getString("COMMUNITY_USER_NAME");
String email = resultSet.getString("COMMUNITY_USER_EMAIL"); // TODO fix typo
return new CommunityUser(gatewayName, communityUserName, email);
}
} catch (SQLException e) {
StringBuilder stringBuilder = new StringBuilder("Error retrieving community user.");
stringBuilder.append("gateway - ").append(gatewayName);
stringBuilder.append("token- ").append(tokenId);
log.error(stringBuilder.toString(), e);
throw new CredentialStoreException(stringBuilder.toString(), e);
} finally {
DBUtil.cleanup(preparedStatement);
}
return null;
}
public List<CommunityUser> getCommunityUsers(String gatewayName, Connection connection)
throws CredentialStoreException {
List<CommunityUser> userList = new ArrayList<CommunityUser>();
String sql = "SELECT * FROM COMMUNITY_USER WHERE GATEWAY_ID=?";
PreparedStatement preparedStatement = null;
try {
preparedStatement = connection.prepareStatement(sql);
preparedStatement.setString(1, gatewayName);
ResultSet resultSet = preparedStatement.executeQuery();
while (resultSet.next()) {
String userName = resultSet.getString("COMMUNITY_USER_NAME");
String email = resultSet.getString("COMMUNITY_USER_EMAIL"); // TODO fix typo
userList.add(new CommunityUser(gatewayName, userName, email));
}
} catch (SQLException e) {
StringBuilder stringBuilder = new StringBuilder("Error retrieving community users for ");
stringBuilder.append("gateway - ").append(gatewayName);
log.error(stringBuilder.toString(), e);
throw new CredentialStoreException(stringBuilder.toString(), e);
} finally {
DBUtil.cleanup(preparedStatement);
}
return userList;
}
}
| apache-2.0 |
countshadow/MyScriptJS | src/output/shape/shapeErased.js | 532 | 'use strict';
(function (scope) {
/**
* Shape erased
*
* @class ShapeErased
* @extends ShapeCandidate
* @param {Object} [obj]
* @constructor
*/
function ShapeErased(obj) {
scope.ShapeCandidate.call(this, obj);
}
/**
* Inheritance property
*/
ShapeErased.prototype = new scope.ShapeCandidate();
/**
* Constructor property
*/
ShapeErased.prototype.constructor = ShapeErased;
// Export
scope.ShapeErased = ShapeErased;
})(MyScript); | apache-2.0 |
googlearchive/caja | third_party/java/htmlparser/src/nu/validator/htmlparser/annotation/Inline.java | 1211 | /*
* Copyright (c) 2009 Mozilla Foundation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package nu.validator.htmlparser.annotation;
public @interface Inline {
}
| apache-2.0 |
MRunFoss/NoahGameFrame | NFServer/NFGameServerScriptPlugin/NFCGameServerScriptModule.cpp | 1286 | // -------------------------------------------------------------------------
// @FileName : NFCGameServerScriptModule.cpp
// @Author : LvSheng.Huang
// @Date : 2013-01-02
// @Module : NFCGameServerScriptModule
// @Desc :
// -------------------------------------------------------------------------
//#include "stdafx.h"
#include "NFCGameServerScriptModule.h"
#include "NFGameServerScriptPlugin.h"
bool NFCGameServerScriptModule::Init()
{
m_pEventProcessModule = dynamic_cast<NFIEventProcessModule*>(pPluginManager->FindModule("NFCEventProcessModule"));
m_pKernelModule = dynamic_cast<NFIKernelModule*>(pPluginManager->FindModule("NFCKernelModule"));
m_pLogicClassModule = dynamic_cast<NFILogicClassModule*>(pPluginManager->FindModule("NFCLogicClassModule"));
assert(NULL != m_pEventProcessModule);
assert(NULL != m_pKernelModule);
assert(NULL != m_pLogicClassModule);
return true;
}
bool NFCGameServerScriptModule::AfterInit()
{
return true;
}
bool NFCGameServerScriptModule::Shut()
{
return true;
}
bool NFCGameServerScriptModule::Execute(const float fLasFrametime, const float fStartedTime)
{
return true;
}
| apache-2.0 |
goodwinnk/intellij-community | RegExpSupport/src/org/intellij/lang/regexp/RegExpLanguageHost.java | 4005 | /*
* Copyright 2000-2017 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.intellij.lang.regexp;
import com.intellij.psi.PsiElement;
import org.intellij.lang.regexp.psi.*;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import java.util.EnumSet;
/**
* @author yole
*/
public interface RegExpLanguageHost {
EnumSet<RegExpGroup.Type> EMPTY_NAMED_GROUP_TYPES = EnumSet.noneOf(RegExpGroup.Type.class);
boolean characterNeedsEscaping(char c);
boolean supportsPerl5EmbeddedComments();
boolean supportsPossessiveQuantifiers();
boolean supportsPythonConditionalRefs();
boolean supportsNamedGroupSyntax(RegExpGroup group);
boolean supportsNamedGroupRefSyntax(RegExpNamedGroupRef ref);
@NotNull
default EnumSet<RegExpGroup.Type> getSupportedNamedGroupTypes(RegExpElement context) {
return EMPTY_NAMED_GROUP_TYPES;
}
boolean supportsExtendedHexCharacter(RegExpChar regExpChar);
default boolean isValidGroupName(String name, @NotNull RegExpGroup group) {
for (int i = 0, length = name.length(); i < length; i++) {
final char c = name.charAt(i);
if (!AsciiUtil.isLetterOrDigit(c) && c != '_') {
return false;
}
}
return true;
}
default boolean supportsSimpleClass(RegExpSimpleClass simpleClass) {
return true;
}
default boolean supportsNamedCharacters(RegExpNamedCharacter namedCharacter) {
return false;
}
default boolean isValidNamedCharacter(RegExpNamedCharacter namedCharacter) {
return supportsNamedCharacters(namedCharacter);
}
default boolean supportsBoundary(RegExpBoundary boundary) {
switch (boundary.getType()) {
case UNICODE_EXTENDED_GRAPHEME:
return false;
case LINE_START:
case LINE_END:
case WORD:
case NON_WORD:
case BEGIN:
case END:
case END_NO_LINE_TERM:
case PREVIOUS_MATCH:
default:
return true;
}
}
default boolean supportsLiteralBackspace(RegExpChar aChar) {
return true;
}
default boolean supportsInlineOptionFlag(char flag, PsiElement context) {
return true;
}
boolean isValidCategory(@NotNull String category);
@NotNull
String[][] getAllKnownProperties();
@Nullable
String getPropertyDescription(@Nullable final String name);
@NotNull
String[][] getKnownCharacterClasses();
/**
* @param number the number element to extract the value from
* @return the value, or null when the value is out of range
*/
@Nullable
default Number getQuantifierValue(@NotNull RegExpNumber number) {
return Double.parseDouble(number.getText());
}
default Lookbehind supportsLookbehind(@NotNull RegExpGroup lookbehindGroup) {
return Lookbehind.FULL; // to not break existing implementations, although rarely actually supported.
}
enum Lookbehind {
/** Lookbehind not supported. */
NOT_SUPPORTED,
/**
* Alternation inside lookbehind (a|b|c) branches must have same length,
* finite repetition with identical min, max values (a{3} or a{3,3}) allowed.
*/
FIXED_LENGTH_ALTERNATION,
/** Alternation (a|bc|def) branches inside look behind may have different length */
VARIABLE_LENGTH_ALTERNATION,
/** Finite repetition inside lookbehind with different minimum, maximum values allowed */
FINITE_REPETITION,
/** Full regex syntax inside lookbehind, i.e. star (*) and plus (*) repetition and backreferences, allowed. */
FULL
}
}
| apache-2.0 |
LabAixBidouille/EmbeddedTeam | courses/examples/CMSIS/Device/ATMEL/sam4l/include/instance/picouart.h | 3076 | /**
* \file
*
* \brief Instance description for PICOUART
*
* Copyright (c) 2014 Atmel Corporation. All rights reserved.
*
* \asf_license_start
*
* \page License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. The name of Atmel may not be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* 4. This software may only be redistributed and used in connection with an
* Atmel microcontroller product.
*
* THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
* EXPRESSLY AND SPECIFICALLY DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* \asf_license_stop
*
*/
#ifndef _SAM4L_PICOUART_INSTANCE_
#define _SAM4L_PICOUART_INSTANCE_
/* ========== Register definition for PICOUART peripheral ========== */
#if (defined(__ASSEMBLY__) || defined(__IAR_SYSTEMS_ASM__))
#define REG_PICOUART_CR (0x400F1400U) /**< \brief (PICOUART) Control Register */
#define REG_PICOUART_CFG (0x400F1404U) /**< \brief (PICOUART) Configuration Register */
#define REG_PICOUART_SR (0x400F1408U) /**< \brief (PICOUART) Status Register */
#define REG_PICOUART_RHR (0x400F140CU) /**< \brief (PICOUART) Receive Holding Register */
#define REG_PICOUART_VERSION (0x400F1420U) /**< \brief (PICOUART) Version Register */
#else
#define REG_PICOUART_CR (*(WoReg *)0x400F1400U) /**< \brief (PICOUART) Control Register */
#define REG_PICOUART_CFG (*(RwReg *)0x400F1404U) /**< \brief (PICOUART) Configuration Register */
#define REG_PICOUART_SR (*(RoReg *)0x400F1408U) /**< \brief (PICOUART) Status Register */
#define REG_PICOUART_RHR (*(RoReg *)0x400F140CU) /**< \brief (PICOUART) Receive Holding Register */
#define REG_PICOUART_VERSION (*(RoReg *)0x400F1420U) /**< \brief (PICOUART) Version Register */
#endif /* (defined(__ASSEMBLY__) || defined(__IAR_SYSTEMS_ASM__)) */
#endif /* _SAM4L_PICOUART_INSTANCE_ */
| apache-2.0 |
JingchengDu/hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ManagedParentQueue.java | 18592 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueResourceQuotas;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler
.SchedulerDynamicEditException;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.queuemanagement.GuaranteedOrZeroCapacityOverTimePolicy;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica
.FiCaSchedulerApp;
import org.apache.hadoop.yarn.util.resource.Resources;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* Auto Creation enabled Parent queue. This queue initially does not have any
* children to start with and all child
* leaf queues will be auto created. Currently this does not allow other
* pre-configured leaf or parent queues to
* co-exist along with auto-created leaf queues. The auto creation is limited
* to leaf queues currently.
*/
public class ManagedParentQueue extends AbstractManagedParentQueue {
private boolean shouldFailAutoCreationWhenGuaranteedCapacityExceeded = false;
private static final Logger LOG = LoggerFactory.getLogger(
ManagedParentQueue.class);
public ManagedParentQueue(final CapacitySchedulerQueueContext queueContext,
final String queueName, final CSQueue parent, final CSQueue old)
throws IOException {
super(queueContext, queueName, parent, old);
shouldFailAutoCreationWhenGuaranteedCapacityExceeded =
queueContext.getConfiguration()
.getShouldFailAutoQueueCreationWhenGuaranteedCapacityExceeded(
getQueuePath());
leafQueueTemplate = initializeLeafQueueConfigs().build();
initializeQueueManagementPolicy();
}
@Override
public void reinitialize(CSQueue newlyParsedQueue, Resource clusterResource)
throws IOException {
writeLock.lock();
try {
validate(newlyParsedQueue);
shouldFailAutoCreationWhenGuaranteedCapacityExceeded =
queueContext.getConfiguration()
.getShouldFailAutoQueueCreationWhenGuaranteedCapacityExceeded(
getQueuePath());
//validate if capacity is exceeded for child queues
if (shouldFailAutoCreationWhenGuaranteedCapacityExceeded) {
float childCap = sumOfChildCapacities();
if (getCapacity() < childCap) {
throw new IOException(
"Total of Auto Created leaf queues guaranteed capacity : "
+ childCap + " exceeds Parent queue's " + getQueuePath()
+ " guaranteed capacity " + getCapacity() + ""
+ ".Cannot enforce policy to auto"
+ " create queues beyond parent queue's capacity");
}
}
leafQueueTemplate = initializeLeafQueueConfigs().build();
super.reinitialize(newlyParsedQueue, clusterResource);
// run reinitialize on each existing queue, to trigger absolute cap
// recomputations
for (CSQueue res : this.getChildQueues()) {
res.reinitialize(res, clusterResource);
}
//clear state in policy
reinitializeQueueManagementPolicy();
//reassign capacities according to policy
final List<QueueManagementChange> queueManagementChanges =
queueManagementPolicy.computeQueueManagementChanges();
validateAndApplyQueueManagementChanges(queueManagementChanges);
LOG.info(
"Reinitialized Managed Parent Queue: [{}] with capacity [{}]"
+ " with max capacity [{}]",
getQueueName(), super.getCapacity(), super.getMaximumCapacity());
} catch (YarnException ye) {
LOG.error("Exception while computing policy changes for leaf queue : "
+ getQueuePath(), ye);
throw new IOException(ye);
} finally {
writeLock.unlock();
}
}
private void initializeQueueManagementPolicy() throws IOException {
queueManagementPolicy =
queueContext.getConfiguration().getAutoCreatedQueueManagementPolicyClass(
getQueuePath());
queueManagementPolicy.init(this);
}
private void reinitializeQueueManagementPolicy() throws IOException {
AutoCreatedQueueManagementPolicy managementPolicy =
queueContext.getConfiguration().getAutoCreatedQueueManagementPolicyClass(
getQueuePath());
if (!(managementPolicy.getClass().equals(
this.queueManagementPolicy.getClass()))) {
queueManagementPolicy = managementPolicy;
queueManagementPolicy.init(this);
} else{
queueManagementPolicy.reinitialize(this);
}
}
protected AutoCreatedLeafQueueConfig.Builder initializeLeafQueueConfigs() throws IOException {
AutoCreatedLeafQueueConfig.Builder builder =
new AutoCreatedLeafQueueConfig.Builder();
CapacitySchedulerConfiguration configuration =
queueContext.getConfiguration();
// TODO load configs into CapacitySchedulerConfiguration instead of duplicating them
String leafQueueTemplateConfPrefix = getLeafQueueConfigPrefix(
configuration);
//Load template configuration into CapacitySchedulerConfiguration
CapacitySchedulerConfiguration autoCreatedTemplateConfig =
super.initializeLeafQueueConfigs(leafQueueTemplateConfPrefix);
builder.configuration(autoCreatedTemplateConfig);
QueueResourceQuotas queueResourceQuotas = new QueueResourceQuotas();
setAbsoluteResourceTemplates(configuration, queueResourceQuotas);
QueuePath templateQueuePath = configuration
.getAutoCreatedQueueObjectTemplateConfPrefix(getQueuePath());
Set<String> templateConfiguredNodeLabels = queueContext
.getQueueManager().getConfiguredNodeLabelsForAllQueues()
.getLabelsByQueue(templateQueuePath.getFullPath());
//Load template capacities
QueueCapacities queueCapacities = new QueueCapacities(false);
CSQueueUtils.loadCapacitiesByLabelsFromConf(templateQueuePath,
queueCapacities,
configuration,
templateConfiguredNodeLabels);
/**
* Populate leaf queue template (of Parent resources configured in
* ABSOLUTE_RESOURCE) capacities with actual values for which configured has
* been defined in ABSOLUTE_RESOURCE format.
*
*/
if (this.capacityConfigType.equals(CapacityConfigType.ABSOLUTE_RESOURCE)) {
updateQueueCapacities(queueCapacities);
}
builder.capacities(queueCapacities);
builder.resourceQuotas(queueResourceQuotas);
return builder;
}
private void setAbsoluteResourceTemplates(CapacitySchedulerConfiguration configuration,
QueueResourceQuotas queueResourceQuotas) throws IOException {
QueuePath templateQueuePath = configuration
.getAutoCreatedQueueObjectTemplateConfPrefix(getQueuePath());
Set<String> templateConfiguredNodeLabels = queueContext
.getQueueManager().getConfiguredNodeLabelsForAllQueues()
.getLabelsByQueue(templateQueuePath.getFullPath());
for (String nodeLabel : templateConfiguredNodeLabels) {
Resource templateMinResource = configuration.getMinimumResourceRequirement(
nodeLabel, templateQueuePath.getFullPath(), resourceTypes);
queueResourceQuotas.setConfiguredMinResource(nodeLabel, templateMinResource);
if (this.capacityConfigType.equals(CapacityConfigType.PERCENTAGE)
&& !templateMinResource.equals(Resources.none())) {
throw new IOException("Managed Parent Queue " + this.getQueuePath()
+ " config type is different from leaf queue template config type");
}
}
}
private void updateQueueCapacities(QueueCapacities queueCapacities) {
CapacitySchedulerConfiguration configuration =
queueContext.getConfiguration();
for (String label : queueCapacities.getExistingNodeLabels()) {
queueCapacities.setCapacity(label,
resourceCalculator.divide(
queueContext.getClusterResource(),
configuration.getMinimumResourceRequirement(
label,
configuration
.getAutoCreatedQueueTemplateConfPrefix(getQueuePath()),
resourceTypes),
getQueueResourceQuotas().getConfiguredMinResource(label)));
Resource childMaxResource = configuration
.getMaximumResourceRequirement(label,
configuration
.getAutoCreatedQueueTemplateConfPrefix(getQueuePath()),
resourceTypes);
Resource parentMaxRes = getQueueResourceQuotas()
.getConfiguredMaxResource(label);
Resource effMaxResource = Resources.min(
resourceCalculator,
queueContext.getClusterResource(),
childMaxResource.equals(Resources.none()) ? parentMaxRes
: childMaxResource,
parentMaxRes);
queueCapacities.setMaximumCapacity(
label, resourceCalculator.divide(
queueContext.getClusterResource(),
effMaxResource,
getQueueResourceQuotas().getConfiguredMaxResource(label)));
queueCapacities.setAbsoluteCapacity(
label, queueCapacities.getCapacity(label)
* getQueueCapacities().getAbsoluteCapacity(label));
queueCapacities.setAbsoluteMaximumCapacity(label,
queueCapacities.getMaximumCapacity(label)
* getQueueCapacities().getAbsoluteMaximumCapacity(label));
}
}
protected void validate(final CSQueue newlyParsedQueue) throws IOException {
// Sanity check
if (!(newlyParsedQueue instanceof ManagedParentQueue) || !newlyParsedQueue
.getQueuePath().equals(getQueuePath())) {
throw new IOException(
"Trying to reinitialize " + getQueuePath() + " from "
+ newlyParsedQueue.getQueuePath());
}
}
@Override
public void addChildQueue(CSQueue childQueue)
throws SchedulerDynamicEditException, IOException {
writeLock.lock();
try {
if (childQueue == null || !(childQueue instanceof AutoCreatedLeafQueue)) {
throw new SchedulerDynamicEditException(
"Expected child queue to be an instance of AutoCreatedLeafQueue");
}
CapacitySchedulerConfiguration conf = queueContext.getConfiguration();
ManagedParentQueue parentQueue =
(ManagedParentQueue) childQueue.getParent();
if (parentQueue == null) {
throw new SchedulerDynamicEditException(
"Parent Queue is null, should not add child queue!");
}
String leafQueuePath = childQueue.getQueuePath();
int maxQueues = conf.getAutoCreatedQueuesMaxChildQueuesLimit(
parentQueue.getQueuePath());
if (parentQueue.getChildQueues().size() >= maxQueues) {
throw new SchedulerDynamicEditException(
"Cannot auto create leaf queue " + leafQueuePath + ".Max Child "
+ "Queue limit exceeded which is configured as : " + maxQueues
+ " and number of child queues is : " + parentQueue
.getChildQueues().size());
}
if (shouldFailAutoCreationWhenGuaranteedCapacityExceeded) {
if (getLeafQueueTemplate().getQueueCapacities().getAbsoluteCapacity()
+ parentQueue.sumOfChildAbsCapacities() > parentQueue
.getAbsoluteCapacity()) {
throw new SchedulerDynamicEditException(
"Cannot auto create leaf queue " + leafQueuePath + ". Child "
+ "queues capacities have reached parent queue : "
+ parentQueue.getQueuePath() + "'s guaranteed capacity");
}
}
((GuaranteedOrZeroCapacityOverTimePolicy) queueManagementPolicy)
.updateTemplateAbsoluteCapacities(parentQueue.getQueueCapacities());
AutoCreatedLeafQueue leafQueue = (AutoCreatedLeafQueue) childQueue;
super.addChildQueue(leafQueue);
/* Below is to avoid Setting Queue Capacity to NaN when ClusterResource
is zero during RM Startup with DominantResourceCalculator */
if (this.capacityConfigType.equals(
CapacityConfigType.ABSOLUTE_RESOURCE)) {
QueueCapacities queueCapacities =
getLeafQueueTemplate().getQueueCapacities();
updateQueueCapacities(queueCapacities);
}
final AutoCreatedLeafQueueConfig initialLeafQueueTemplate =
queueManagementPolicy.getInitialLeafQueueConfiguration(leafQueue);
leafQueue.reinitializeFromTemplate(initialLeafQueueTemplate);
// Do one update cluster resource call to make sure all absolute resources
// effective resources are updated.
updateClusterResource(queueContext.getClusterResource(),
new ResourceLimits(queueContext.getClusterResource()));
} finally {
writeLock.unlock();
}
}
public List<FiCaSchedulerApp> getScheduleableApplications() {
readLock.lock();
try {
List<FiCaSchedulerApp> apps = new ArrayList<>();
for (CSQueue childQueue : getChildQueues()) {
apps.addAll(((AbstractLeafQueue) childQueue).getApplications());
}
return Collections.unmodifiableList(apps);
} finally {
readLock.unlock();
}
}
public List<FiCaSchedulerApp> getPendingApplications() {
readLock.lock();
try {
List<FiCaSchedulerApp> apps = new ArrayList<>();
for (CSQueue childQueue : getChildQueues()) {
apps.addAll(((AbstractLeafQueue) childQueue).getPendingApplications());
}
return Collections.unmodifiableList(apps);
} finally {
readLock.unlock();
}
}
public List<FiCaSchedulerApp> getAllApplications() {
readLock.lock();
try {
List<FiCaSchedulerApp> apps = new ArrayList<>();
for (CSQueue childQueue : getChildQueues()) {
apps.addAll(((AbstractLeafQueue) childQueue).getAllApplications());
}
return Collections.unmodifiableList(apps);
} finally {
readLock.unlock();
}
}
public String getLeafQueueConfigPrefix(CapacitySchedulerConfiguration conf) {
return CapacitySchedulerConfiguration.PREFIX + conf
.getAutoCreatedQueueTemplateConfPrefix(getQueuePath());
}
public boolean shouldFailAutoCreationWhenGuaranteedCapacityExceeded() {
return shouldFailAutoCreationWhenGuaranteedCapacityExceeded;
}
/**
* Asynchronously called from scheduler to apply queue management changes
*
* @param queueManagementChanges
*/
public void validateAndApplyQueueManagementChanges(
List<QueueManagementChange> queueManagementChanges)
throws IOException, SchedulerDynamicEditException {
writeLock.lock();
try {
validateQueueManagementChanges(queueManagementChanges);
applyQueueManagementChanges(queueManagementChanges);
AutoCreatedQueueManagementPolicy policy =
getAutoCreatedQueueManagementPolicy();
//acquires write lock on policy
policy.commitQueueManagementChanges(queueManagementChanges);
} finally {
writeLock.unlock();
}
}
public void validateQueueManagementChanges(
List<QueueManagementChange> queueManagementChanges)
throws SchedulerDynamicEditException {
for (QueueManagementChange queueManagementChange : queueManagementChanges) {
CSQueue childQueue = queueManagementChange.getQueue();
if (!(childQueue instanceof AutoCreatedLeafQueue)) {
throw new SchedulerDynamicEditException(
"queue should be " + "AutoCreatedLeafQueue. Found " + childQueue
.getClass());
}
if (!(AbstractManagedParentQueue.class.
isAssignableFrom(childQueue.getParent().getClass()))) {
LOG.error("Queue " + getQueuePath()
+ " is not an instance of PlanQueue or ManagedParentQueue." + " "
+ "Ignoring update " + queueManagementChanges);
throw new SchedulerDynamicEditException(
"Queue " + getQueuePath() + " is not a AutoEnabledParentQueue."
+ " Ignoring update " + queueManagementChanges);
}
if (queueManagementChange.getQueueAction() ==
QueueManagementChange.QueueAction.UPDATE_QUEUE) {
AutoCreatedLeafQueueConfig template =
queueManagementChange.getUpdatedQueueTemplate();
((AutoCreatedLeafQueue) childQueue).validateConfigurations(template);
}
}
}
private void applyQueueManagementChanges(
List<QueueManagementChange> queueManagementChanges)
throws SchedulerDynamicEditException, IOException {
for (QueueManagementChange queueManagementChange : queueManagementChanges) {
if (queueManagementChange.getQueueAction() ==
QueueManagementChange.QueueAction.UPDATE_QUEUE) {
AutoCreatedLeafQueue childQueueToBeUpdated =
(AutoCreatedLeafQueue) queueManagementChange.getQueue();
//acquires write lock on leaf queue
childQueueToBeUpdated.reinitializeFromTemplate(
queueManagementChange.getUpdatedQueueTemplate());
}
}
}
public void setLeafQueueConfigs(String leafQueueName) {
CapacitySchedulerConfiguration templateConfig = leafQueueTemplate.getLeafQueueConfigs();
for (Map.Entry<String, String> confKeyValuePair : templateConfig) {
final String name = confKeyValuePair.getKey()
.replaceFirst(CapacitySchedulerConfiguration.AUTO_CREATED_LEAF_QUEUE_TEMPLATE_PREFIX,
leafQueueName);
queueContext.setConfigurationEntry(name, confKeyValuePair.getValue());
}
}
} | apache-2.0 |
spinnaker/deck | packages/google/src/loadBalancer/configure/http/hostRule/hostRule.component.html | 2086 | <hr class="host-rule" ng-if="$ctrl.index > 0" />
<div class="container-fluid form-horizontal">
<div class="form-group">
<div class="col-md-4 sm-label-right">
Host Patterns
<help-field key="gce.httpLoadBalancer.hostRule.hostPattern"> </help-field>
</div>
<div class="col-md-4">
<ui-select multiple tagging tagging-label="" ng-model="$ctrl.hostRule.hostPatterns" class="form-control input-sm">
<ui-select-match>{{ $item }}</ui-select-match>
<ui-select-choices repeat="hostPattern in []">
{{ hostPattern }}
</ui-select-choices>
</ui-select>
</div>
<div class="col-md-1">
<button class="btn btn-sm btn-default" ng-click="$ctrl.deleteHostRule()">
<span class="glyphicon glyphicon-trash visible-lg-inline"></span>
<span>Delete</span>
</button>
</div>
</div>
<div class="form-group">
<div class="col-md-4 sm-label-right">
<b>Default Service</b>
</div>
<div class="col-md-4">
<ui-select
ng-model="$ctrl.hostRule.pathMatcher.defaultService"
on-select="$ctrl.command.onBackendServiceSelected($item, $ctrl.command)"
class="form-control input-sm"
>
<ui-select-match placeholder="Select...">
{{ $select.selected }}
</ui-select-match>
<ui-select-choices
repeat="backendService in $ctrl.command.getAllBackendServices($ctrl.command) | filter: $select.search"
>
<div ng-bind-html="backendService | highlight: $select.search"></div>
</ui-select-choices>
</ui-select>
</div>
</div>
<gce-path-rule
path-rule="pathRule"
index="$index"
command="$ctrl.command"
delete-path-rule="$ctrl.deletePathRule($index)"
ng-repeat="pathRule in $ctrl.hostRule.pathMatcher.pathRules"
>
</gce-path-rule>
<div class="row">
<div class="col-md-12">
<button class="add-new btn btn-block" ng-click="$ctrl.addPathRule()">
<span class="glyphicon glyphicon-plus-sign"></span> Add path rule
</button>
</div>
</div>
</div>
| apache-2.0 |
dbmalkovsky/flowable-engine | modules/flowable5-engine/src/main/java/org/activiti/engine/impl/jobexecutor/TimerDeclarationImpl.java | 8975 | /* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.activiti.engine.impl.jobexecutor;
import java.io.Serializable;
import java.text.SimpleDateFormat;
import java.util.Date;
import org.activiti.engine.ActivitiException;
import org.activiti.engine.ActivitiIllegalArgumentException;
import org.activiti.engine.impl.context.Context;
import org.activiti.engine.impl.el.NoExecutionVariableScope;
import org.activiti.engine.impl.persistence.entity.ExecutionEntity;
import org.activiti.engine.impl.persistence.entity.TimerJobEntity;
import org.flowable.common.engine.api.delegate.Expression;
import org.flowable.common.engine.impl.calendar.BusinessCalendar;
import org.flowable.engine.impl.jobexecutor.TimerDeclarationType;
import org.flowable.variable.api.delegate.VariableScope;
import org.joda.time.DateTime;
/**
* @author Tom Baeyens
*/
public class TimerDeclarationImpl implements Serializable {
private static final long serialVersionUID = 1L;
protected Expression description;
protected TimerDeclarationType type;
protected Expression endDateExpression;
protected Expression calendarNameExpression;
protected String jobHandlerType;
protected String jobHandlerConfiguration;
protected String repeat;
protected boolean exclusive = TimerJobEntity.DEFAULT_EXCLUSIVE;
protected int retries = TimerJobEntity.DEFAULT_RETRIES;
protected boolean isInterruptingTimer; // For boundary timers
public TimerDeclarationImpl(Expression expression, TimerDeclarationType type, String jobHandlerType, Expression endDateExpression, Expression calendarNameExpression) {
this(expression, type, jobHandlerType);
this.endDateExpression = endDateExpression;
this.calendarNameExpression = calendarNameExpression;
}
public TimerDeclarationImpl(Expression expression, TimerDeclarationType type, String jobHandlerType) {
this.jobHandlerType = jobHandlerType;
this.description = expression;
this.type = type;
}
public Expression getDescription() {
return description;
}
public String getJobHandlerType() {
return jobHandlerType;
}
public String getJobHandlerConfiguration() {
return jobHandlerConfiguration;
}
public void setJobHandlerConfiguration(String jobHandlerConfiguration) {
this.jobHandlerConfiguration = jobHandlerConfiguration;
}
public String getRepeat() {
return repeat;
}
public void setRepeat(String repeat) {
this.repeat = repeat;
}
public boolean isExclusive() {
return exclusive;
}
public void setExclusive(boolean exclusive) {
this.exclusive = exclusive;
}
public int getRetries() {
return retries;
}
public void setRetries(int retries) {
this.retries = retries;
}
public void setJobHandlerType(String jobHandlerType) {
this.jobHandlerType = jobHandlerType;
}
public boolean isInterruptingTimer() {
return isInterruptingTimer;
}
public void setInterruptingTimer(boolean isInterruptingTimer) {
this.isInterruptingTimer = isInterruptingTimer;
}
public TimerJobEntity prepareTimerEntity(ExecutionEntity executionEntity) {
// ACT-1415: timer-declaration on start-event may contain expressions NOT
// evaluating variables but other context, evaluating should happen nevertheless
VariableScope scopeForExpression = executionEntity;
if (scopeForExpression == null) {
scopeForExpression = NoExecutionVariableScope.getSharedInstance();
}
String calendarNameValue = type.calendarName;
if (this.calendarNameExpression != null) {
calendarNameValue = (String) this.calendarNameExpression.getValue(scopeForExpression);
}
BusinessCalendar businessCalendar = Context
.getProcessEngineConfiguration()
.getBusinessCalendarManager()
.getBusinessCalendar(calendarNameValue);
if (description == null) {
// Prevent NPE from happening in the next line
throw new ActivitiIllegalArgumentException("Timer '" + executionEntity.getActivityId() + "' was not configured with a valid duration/time");
}
String endDateString = null;
String dueDateString = null;
Date duedate = null;
Date endDate = null;
if (endDateExpression != null && !(scopeForExpression instanceof NoExecutionVariableScope)) {
Object endDateValue = endDateExpression.getValue(scopeForExpression);
if (endDateValue instanceof String) {
endDateString = (String) endDateValue;
} else if (endDateValue instanceof Date) {
endDate = (Date) endDateValue;
} else if (endDateValue instanceof DateTime) {
// Joda DateTime support
duedate = ((DateTime) endDateValue).toDate();
} else {
throw new ActivitiException("Timer '" + executionEntity.getActivityId() + "' was not configured with a valid duration/time, either hand in a java.util.Date or a String in format 'yyyy-MM-dd'T'hh:mm:ss'");
}
if (endDate == null) {
endDate = businessCalendar.resolveEndDate(endDateString);
}
}
Object dueDateValue = description.getValue(scopeForExpression);
if (dueDateValue instanceof String) {
dueDateString = (String) dueDateValue;
} else if (dueDateValue instanceof Date) {
duedate = (Date) dueDateValue;
} else if (dueDateValue instanceof DateTime) {
// Joda DateTime support
duedate = ((DateTime) dueDateValue).toDate();
} else if (dueDateValue != null) {
// dueDateValue==null is OK - but unexpected class type must throw an error.
throw new ActivitiException("Timer '" + executionEntity.getActivityId() + "' was not configured with a valid duration/time, either hand in a java.util.Date or a String in format 'yyyy-MM-dd'T'hh:mm:ss'");
}
if (duedate == null && dueDateString != null) {
duedate = businessCalendar.resolveDuedate(dueDateString);
}
TimerJobEntity timer = null;
// if dueDateValue is null -> this is OK - timer will be null and job not scheduled
if (duedate != null) {
timer = new TimerJobEntity(this);
timer.setDuedate(duedate);
timer.setEndDate(endDate);
if (executionEntity != null) {
timer.setExecution(executionEntity);
timer.setProcessDefinitionId(executionEntity.getProcessDefinitionId());
timer.setProcessInstanceId(executionEntity.getProcessInstanceId());
// Inherit tenant identifier (if applicable)
if (executionEntity.getTenantId() != null) {
timer.setTenantId(executionEntity.getTenantId());
}
}
if (type == TimerDeclarationType.CYCLE) {
// See ACT-1427: A boundary timer with a cancelActivity='true', doesn't need to repeat itself
boolean repeat = !isInterruptingTimer;
// ACT-1951: intermediate catching timer events shouldn't repeat according to spec
if (TimerCatchIntermediateEventJobHandler.TYPE.equals(jobHandlerType)) {
repeat = false;
if (endDate != null) {
long endDateMiliss = endDate.getTime();
long dueDateMiliss = duedate.getTime();
long dueDate = Math.min(endDateMiliss, dueDateMiliss);
timer.setDuedate(new Date(dueDate));
}
}
if (repeat) {
String prepared = prepareRepeat(dueDateString);
timer.setRepeat(prepared);
}
}
}
return timer;
}
private String prepareRepeat(String dueDate) {
if (dueDate.startsWith("R") && dueDate.split("/").length == 2) {
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss");
return dueDate.replace("/", "/" + sdf.format(Context.getProcessEngineConfiguration().getClock().getCurrentTime()) + "/");
}
return dueDate;
}
}
| apache-2.0 |
madhav123/gkmaster | appdomain/src/main/java/org/mifos/customers/checklist/business/CheckListBO.java | 5902 | /*
* Copyright (c) 2005-2011 Grameen Foundation USA
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*
* See also http://www.apache.org/licenses/LICENSE-2.0.html for an
* explanation of the license and how it is applied.
*/
package org.mifos.customers.checklist.business;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
import org.apache.commons.lang.StringUtils;
import org.mifos.customers.checklist.exceptions.CheckListException;
import org.mifos.customers.checklist.persistence.CheckListPersistence;
import org.mifos.customers.checklist.util.helpers.CheckListConstants;
import org.mifos.customers.checklist.util.helpers.CheckListType;
import org.mifos.framework.business.AbstractBusinessObject;
import org.mifos.framework.exceptions.PersistenceException;
import org.mifos.framework.util.DateTimeService;
public abstract class CheckListBO extends AbstractBusinessObject {
private final Short checklistId;
private String checklistName;
private Short checklistStatus;
private Set<CheckListDetailEntity> checklistDetails;
private Short supportedLocales;
protected CheckListBO() {
this.checklistId = null;
checklistDetails = new LinkedHashSet<CheckListDetailEntity>();
}
protected CheckListBO(String checkListName, Short checkListStatus, List<String> details, Short localeId,
Short userId) throws CheckListException {
setCreateDetails(userId, new DateTimeService().getCurrentJavaDateTime());
this.checklistId = null;
if (details.size() > 0) {
setCheckListDetails(details, localeId);
} else {
throw new CheckListException(CheckListConstants.CHECKLIST_CREATION_EXCEPTION);
}
if (checkListName != null) {
this.checklistName = checkListName;
} else {
throw new CheckListException(CheckListConstants.CHECKLIST_CREATION_EXCEPTION);
}
this.checklistStatus = checkListStatus;
this.supportedLocales = localeId;
}
public Short getChecklistId() {
return checklistId;
}
public String getChecklistName() {
return this.checklistName;
}
@SuppressWarnings("unused")
// see .hbm.xml file
private void setChecklistName(String checklistName) {
this.checklistName = checklistName;
}
public Short getChecklistStatus() {
return this.checklistStatus;
}
@SuppressWarnings("unused")
// see .hbm.xml file
private void setChecklistStatus(Short checklistStatus) {
this.checklistStatus = checklistStatus;
}
public Set<CheckListDetailEntity> getChecklistDetails() {
return this.checklistDetails;
}
@SuppressWarnings("unused")
// see .hbm.xml file
private void setChecklistDetails(Set<CheckListDetailEntity> checklistDetailSet) {
this.checklistDetails = checklistDetailSet;
}
public Short getSupportedLocales() {
return this.supportedLocales;
}
@SuppressWarnings("unused")
// see .hbm.xml file
private void setSupportedLocales(Short supportedLocales) {
this.supportedLocales = supportedLocales;
}
public void addChecklistDetail(CheckListDetailEntity checkListDetailEntity) {
checklistDetails.add(checkListDetailEntity);
}
protected CheckListPersistence getCheckListPersistence() {
return new CheckListPersistence();
}
private void setCheckListDetails(List<String> details, Short locale) {
checklistDetails = new HashSet<CheckListDetailEntity>();
for (String detail : details) {
CheckListDetailEntity checkListDetailEntity = new CheckListDetailEntity(detail, Short.valueOf("1"), this,
locale);
checklistDetails.add(checkListDetailEntity);
}
}
public abstract CheckListType getCheckListType();
protected void update(String checkListName, Short checkListStatus, List<String> details, Short localeId,
Short userId) throws CheckListException {
setUpdateDetails(userId);
if (details == null || details.size() <= 0) {
throw new CheckListException(CheckListConstants.CHECKLIST_CREATION_EXCEPTION);
}
if (StringUtils.isBlank(checkListName)) {
throw new CheckListException(CheckListConstants.CHECKLIST_CREATION_EXCEPTION);
}
this.checklistName = checkListName;
getChecklistDetails().clear();
for (String detail : details) {
CheckListDetailEntity checkListDetailEntity = new CheckListDetailEntity(detail, Short.valueOf("1"), this,localeId);
getChecklistDetails().add(checkListDetailEntity);
}
this.checklistStatus = checkListStatus;
this.supportedLocales = localeId;
}
protected void validateCheckListState(Short masterTypeId, Short stateId, boolean isCustomer)
throws CheckListException {
try {
Long records = getCheckListPersistence().isValidCheckListState(masterTypeId, stateId, isCustomer);
if (records.intValue() != 0) {
throw new CheckListException(CheckListConstants.EXCEPTION_STATE_ALREADY_EXIST);
}
} catch (PersistenceException pe) {
throw new CheckListException(pe);
}
}
}
| apache-2.0 |
adfernandes/mbed | targets/TARGET_Cypress/TARGET_PSOC6/mtb-hal-cat1/include/cyhal_interconnect.h | 4924 | /***************************************************************************//**
* \file cyhal_interconnect.h
*
* \brief
* Provides a high level interface for interacting with the internal digital
* routing on the chip. This interface abstracts out the chip specific details.
* If any chip specific functionality is necessary, or performance is critical
* the low level functions can be used directly.
*
********************************************************************************
* \copyright
* Copyright 2018-2021 Cypress Semiconductor Corporation
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
/**
* \addtogroup group_hal_interconnect Interconnect (Internal Digital Routing)
* \ingroup group_hal
* \{
* High level interface to the Cypress digital routing.
*
* \section subsection_interconnect_features Features
* Facilities for runtime manipulation of the on chip routing.
* The following types of connections are supported:
* * Connection from a peripheral to a pin. (A dedicated connection must exist
between the pin and the peripheral; see the device datasheet for more details)
* * Connecting two peripherals in hardware using the on-chip trigger signaling
*
* \section subsection_interconnect_quickstart Quick Start
* * \ref cyhal_connect_pin can be used to connect a pin to a peripheral.(A dedicated connection must exist
* between the pin and the peripheral; see the device datasheet for more details)
* * \ref cyhal_disconnect_pin can be used to disconnect a pin from a peripheral.
* The drive mode will be reset to High-Z after disconnecting
*
* \section section_interconnect_snippets Code Snippets
*
* \subsection subsection_interconnect_snippet1 Snippet 1: Connecting a pin to TCPWM block
* The following code snippet demonstrates connecting a GPIO pin to an active TCPWM block on a device
* using the \ref cyhal_connect_pin. It is assumed that the TCPWM is already configured and active.<br>
* \snippet hal_interconnect.c snippet_cyhal_interconnect_connect_pin
*
* \subsection subsection_interconnect_snippet2 Snippet 2: Connecting a Timer output signal to a DMA input signal
* The following code snippet demonstrates configuring and connecting a Timer
* which will overflow every 2 seconds and, in doing so, trigger a DMA channel
* start.
* \snippet hal_interconnect.c snippet_cyhal_interconnect_timer_to_dma
*/
#pragma once
#include <stdint.h>
#include <stdbool.h>
#include "cy_result.h"
#include "cyhal_hw_types.h"
#if defined(__cplusplus)
extern "C" {
#endif
/** \addtogroup group_hal_results_interconnect Interconnect HAL Results
* Interconnect specific return codes
* \ingroup group_hal_results
* \{ *//**
*/
/** The source and destination are already connected */
#define CYHAL_INTERCONNECT_RSLT_ALREADY_CONNECTED \
(CYHAL_RSLT_CREATE(CY_RSLT_TYPE_ERROR, CYHAL_RSLT_MODULE_INTERCONNECT, 0))
/** Connection is invalid */
#define CYHAL_INTERCONNECT_RSLT_INVALID_CONNECTION \
(CYHAL_RSLT_CREATE(CY_RSLT_TYPE_ERROR, CYHAL_RSLT_MODULE_INTERCONNECT, 1))
/** Cannot disconnect. Either no connection in the first place or a bad argument */
#define CYHAL_INTERCONNECT_RSLT_CANNOT_DISCONNECT \
(CYHAL_RSLT_CREATE(CY_RSLT_TYPE_ERROR, CYHAL_RSLT_MODULE_INTERCONNECT, 2))
/**
* \}
*/
/** Trigger type */
typedef enum
{
CYHAL_SIGNAL_TYPE_LEVEL = 0, //!< Level triggered
CYHAL_SIGNAL_TYPE_EDGE = 1, //!< Edge triggered
} cyhal_signal_type_t;
/** Connect a pin to a peripheral terminal. This will route a direct connection from the pin to the peripheral.
* Any previous direct connection from the pin will be overriden.<br>
* See \ref subsection_interconnect_snippet1
* @param[in] pin_connection The pin and target peripheral terminal to be connected
* @return The status of the connect request
*/
cy_rslt_t cyhal_connect_pin(const cyhal_resource_pin_mapping_t *pin_connection);
/** Disconnect a peripheral from a pin. This will also reset the pin's drive mode to High-Z.
* @param[in] pin The pin to be disconnected
* @return The status of the disconnect request
*/
cy_rslt_t cyhal_disconnect_pin(cyhal_gpio_t pin);
#if defined(__cplusplus)
}
#endif
#ifdef CYHAL_INTERCONNECT_IMPL_HEADER
#include CYHAL_INTERCONNECT_IMPL_HEADER
#endif /* CYHAL_INTERCONNECT_IMPL_HEADER */
/** \} group_hal_interconnect */
| apache-2.0 |
pfalabella/rust | src/doc/README.md | 2417 | # Dependencies
[Pandoc](http://johnmacfarlane.net/pandoc/installing.html), a universal
document converter, is required to generate docs as HTML from Rust's
source code.
[po4a](http://po4a.alioth.debian.org/) is required for generating translated
docs from the master (English) docs.
[GNU gettext](http://www.gnu.org/software/gettext/) is required for managing
the translation data.
# Building
To generate all the docs, just run `make docs` from the root of the repository.
This will convert the distributed Markdown docs to HTML and generate HTML doc
for the 'std' and 'extra' libraries.
To generate HTML documentation from one source file/crate, do something like:
~~~~
rustdoc --output-dir html-doc/ --output-format html ../src/libstd/path.rs
~~~~
(This, of course, requires a working build of the `rustdoc` tool.)
# Additional notes
To generate an HTML version of a doc from Markdown manually, you can do
something like:
~~~~
pandoc --from=markdown --to=html5 --number-sections -o rust.html rust.md
~~~~
(rust.md being the Rust Reference Manual.)
The syntax for pandoc flavored markdown can be found at:
http://johnmacfarlane.net/pandoc/README.html#pandocs-markdown
A nice quick reference (for non-pandoc markdown) is at:
http://kramdown.rubyforge.org/quickref.html
# Notes for translators
Notice: The procedure described below is a work in progress. We are working on
translation system but the procedure contains some manual operations for now.
To start the translation for a new language, see po4a.conf at first.
To generate .pot and .po files, do something like:
~~~~
po4a --copyright-holder="The Rust Project Developers" \
--package-name="Rust" \
--package-version="0.11.0-pre" \
-M UTF-8 -L UTF-8 \
src/doc/po4a.conf
~~~~
(the version number must be changed if it is not 0.11.0-pre now.)
Now you can translate documents with .po files, commonly used with gettext. If
you are not familiar with gettext-based translation, please read the online
manual linked from http://www.gnu.org/software/gettext/ . We use UTF-8 as the
file encoding of .po files.
When you want to make a commit, do the command below before staging your
change:
~~~~
for f in src/doc/po/**/*.po; do
msgattrib --translated $f -o $f.strip
if [ -e $f.strip ]; then
mv $f.strip $f
else
rm $f
fi
done
~~~~
This removes untranslated entries from .po files to save disk space.
| apache-2.0 |
alanorozco/amphtml | validator/cpp/htmlparser/iterators.h | 2557 | #ifndef CPP_HTMLPARSER_ITERATOR_H_
#define CPP_HTMLPARSER_ITERATOR_H_
#include <iterator>
#include <stack>
#include "cpp/htmlparser/node.h"
namespace htmlparser {
class Document;
// A forward iterator that facilitates iterating dom tree (through root node),
// in depth first traversal.
//
// Example usage:
// auto doc = parser.Parse(html);
// for (auto iter = doc.begin(); iter != doc.end(); ++iter) {
// ProcessNode(*iter);
// }
//
// The above dom without NodeIterator require a lot of boiler plate code like
// defining a stack class and data structure, knowledge of Node data structure.
//
// Clients should not access this class directly but get handle from Document
// object.
// auto iter = doc.begin();
// auto const_iter = doc.cbegin();
template <bool Const>
class NodeIterator {
public:
// Member typdefs required by std::iterator_traits
// Not the correct type, and not used anyway.
using difference_type = std::ptrdiff_t;
using value_type = Node;
using pointer = std::conditional_t<Const, const Node*, Node*>;
using reference = std::conditional_t<Const, const Node&, Node&>;
using iterator_category = std::forward_iterator_tag;
reference operator*() const { return *current_node_; }
pointer operator->() const { return current_node_; }
// Prefix increment.
auto& operator++() {
if (current_node_->FirstChild()) {
if (current_node_->NextSibling()) {
stack_.push(current_node_->NextSibling());
}
current_node_ = current_node_->FirstChild();
} else {
current_node_ = current_node_->NextSibling();
}
if (!current_node_) {
if (!stack_.empty()) {
current_node_ = stack_.top();
stack_.pop();
}
}
return *this;
}
// Postfix increment.
auto operator++(int) {
auto result = *this; ++*this; return result;
}
template<bool R>
bool operator==(const NodeIterator<R>& rhs) const {
return current_node_ == rhs.current_node_;
}
template<bool R>
bool operator!=(const NodeIterator<R>& rhs) const {
return current_node_ != rhs.current_node_;
}
operator NodeIterator<true>() const {
return NodeIterator<true>{current_node_};
}
private:
explicit NodeIterator(Node* node) : current_node_(node) {}
friend class Document;
friend class NodeIterator<!Const>;
using node_pointer = std::conditional_t<Const, const Node*, Node*>;
node_pointer current_node_;
// Facilitates depth first traversal.
std::stack<Node*> stack_;
};
} // namespace htmlparser
#endif // CPP_HTMLPARSER_ITERATOR_H_
| apache-2.0 |
GoogleCloudPlatform/prometheus-engine | third_party/prometheus_ui/base/web/ui/react-app/node_modules/jsdom/lib/jsdom/living/nodes/HTMLTemplateElement-impl.js | 2038 | "use strict";
const HTMLElementImpl = require("./HTMLElement-impl").implementation;
const Document = require("../generated/Document");
const DocumentFragment = require("../generated/DocumentFragment");
const { cloningSteps, domSymbolTree } = require("../helpers/internal-constants");
const { clone } = require("../node");
class HTMLTemplateElementImpl extends HTMLElementImpl {
constructor(globalObject, args, privateData) {
super(globalObject, args, privateData);
const doc = this._appropriateTemplateContentsOwnerDocument(this._ownerDocument);
this._templateContents = DocumentFragment.createImpl(this._globalObject, [], {
ownerDocument: doc,
host: this
});
}
// https://html.spec.whatwg.org/multipage/scripting.html#appropriate-template-contents-owner-document
_appropriateTemplateContentsOwnerDocument(doc) {
if (!doc._isInertTemplateDocument) {
if (doc._associatedInertTemplateDocument === undefined) {
const newDoc = Document.createImpl(this._globalObject, [], {
options: {
parsingMode: doc._parsingMode,
encoding: doc._encoding
}
});
newDoc._isInertTemplateDocument = true;
doc._associatedInertTemplateDocument = newDoc;
}
doc = doc._associatedInertTemplateDocument;
}
return doc;
}
// https://html.spec.whatwg.org/multipage/scripting.html#template-adopting-steps
_adoptingSteps() {
const doc = this._appropriateTemplateContentsOwnerDocument(this._ownerDocument);
doc._adoptNode(this._templateContents);
}
get content() {
return this._templateContents;
}
[cloningSteps](copy, node, document, cloneChildren) {
if (!cloneChildren) {
return;
}
for (const child of domSymbolTree.childrenIterator(node._templateContents)) {
const childCopy = clone(child, copy._templateContents._ownerDocument, true);
copy._templateContents.appendChild(childCopy);
}
}
}
module.exports = {
implementation: HTMLTemplateElementImpl
};
| apache-2.0 |
atul-bhouraskar/closure-templates | java/src/com/google/template/soy/types/SoyObjectType.java | 3172 | /*
* Copyright 2013 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.template.soy.types;
import com.google.common.collect.ImmutableSet;
import com.google.template.soy.base.SoyBackendKind;
/**
* Type representing an object. Object types have a unique name,
* and can have zero or more member fields.
*
* <p>Object types are always referred to by their fully-qualified name; That
* is, there's no concept of packages or scopes in this type system (those
* concepts are already factored out before the type definition reaches this
* point.)
*
* <p> Important: Do not use outside of Soy code (treat as superpackage-private).
*
*/
public interface SoyObjectType extends SoyType {
/**
* Return the fully-qualified name of this object type.
*/
String getName();
/**
* Return the fully-qualified name of this type for a given output context.
*
* @param backend Which backend we're generating code for.
*/
String getNameForBackend(SoyBackendKind backend);
/**
* Return the data type of the field with the given name; If there's no such
* field, then return {@code null}.
*
* @param fieldName The name of the field.
* @return The field type, or null.
*/
SoyType getFieldType(String fieldName);
/**
* Return all the possible field names that can be referenced from this ObjectType.
*/
ImmutableSet<String> getFieldNames();
/**
* Return the expression used to access the value of the field, for a given output context.
*
* @param fieldContainerExpr An expression that evaluates to the container of the named field.
* This expression may have any operator precedence that binds more tightly than unary
* operators.
* @param fieldName Name of the field.
* @param backend Which backend we're generating code for.
* @return Expression used to access the field data.
*/
String getFieldAccessExpr(String fieldContainerExpr, String fieldName, SoyBackendKind backend);
/**
* In some cases, {@link #getFieldAccessExpr accessing a field} requires importing
* symbols into the generated code (example being protobuf extension fields which
* require importing the extension type). If this field requires imports, then this
* method will return the strings representing the symbol needed to import.
* Otherwise, returns the empty set.
*
* @param fieldName The name of the field being accessed.
* @param backend Which backend we're generating code for.
* @return String Symbols in the backend's output language.
*/
ImmutableSet<String> getFieldAccessImports(String fieldName, SoyBackendKind backend);
}
| apache-2.0 |
quarian/dataverse | src/main/java/edu/harvard/iq/dataverse/api/imports/ImportUtil.java | 352 | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package edu.harvard.iq.dataverse.api.imports;
/**
*
* @author ellenk
*/
public interface ImportUtil {
public enum ImportType{ NEW, MIGRATION, HARVEST};
}
| apache-2.0 |
apache/incubator-asterixdb-hyracks | hyracks/hyracks-tests/hyracks-storage-am-btree-test/src/test/java/org/apache/hyracks/storage/am/btree/BTreeStatsTest.java | 8109 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.hyracks.storage.am.btree;
import java.io.DataOutput;
import java.util.Random;
import java.util.logging.Level;
import org.apache.hyracks.storage.am.common.api.*;
import org.junit.Test;
import org.apache.hyracks.api.comm.IFrame;
import org.apache.hyracks.api.comm.IFrameTupleAccessor;
import org.apache.hyracks.api.comm.VSizeFrame;
import org.apache.hyracks.api.context.IHyracksTaskContext;
import org.apache.hyracks.api.dataflow.value.IBinaryComparatorFactory;
import org.apache.hyracks.api.dataflow.value.ISerializerDeserializer;
import org.apache.hyracks.api.dataflow.value.ITypeTraits;
import org.apache.hyracks.api.dataflow.value.RecordDescriptor;
import org.apache.hyracks.data.std.accessors.PointableBinaryComparatorFactory;
import org.apache.hyracks.data.std.primitive.IntegerPointable;
import org.apache.hyracks.dataflow.common.comm.io.ArrayTupleBuilder;
import org.apache.hyracks.dataflow.common.comm.io.FrameTupleAccessor;
import org.apache.hyracks.dataflow.common.comm.io.FrameTupleAppender;
import org.apache.hyracks.dataflow.common.data.accessors.FrameTupleReference;
import org.apache.hyracks.dataflow.common.data.marshalling.IntegerSerializerDeserializer;
import org.apache.hyracks.storage.am.btree.api.IBTreeInteriorFrame;
import org.apache.hyracks.storage.am.btree.api.IBTreeLeafFrame;
import org.apache.hyracks.storage.am.btree.frames.BTreeNSMInteriorFrameFactory;
import org.apache.hyracks.storage.am.btree.frames.BTreeNSMLeafFrameFactory;
import org.apache.hyracks.storage.am.btree.impls.BTree;
import org.apache.hyracks.storage.am.btree.util.AbstractBTreeTest;
import org.apache.hyracks.storage.am.common.TestOperationCallback;
import org.apache.hyracks.storage.am.common.api.IMetaDataPageManager;
import org.apache.hyracks.storage.am.common.frames.LIFOMetaDataFrameFactory;
import org.apache.hyracks.storage.am.common.freepage.LinkedMetaDataPageManager;
import org.apache.hyracks.storage.am.common.tuples.TypeAwareTupleWriterFactory;
import org.apache.hyracks.storage.am.common.util.TreeIndexBufferCacheWarmup;
import org.apache.hyracks.storage.am.common.util.TreeIndexStats;
import org.apache.hyracks.storage.am.common.util.TreeIndexStatsGatherer;
import org.apache.hyracks.storage.common.buffercache.IBufferCache;
import org.apache.hyracks.storage.common.file.IFileMapProvider;
import org.apache.hyracks.test.support.TestStorageManagerComponentHolder;
import org.apache.hyracks.test.support.TestUtils;
@SuppressWarnings("rawtypes")
public class BTreeStatsTest extends AbstractBTreeTest {
private static final int PAGE_SIZE = 4096;
private static final int NUM_PAGES = 1000;
private static final int MAX_OPEN_FILES = 10;
private static final int HYRACKS_FRAME_SIZE = 128;
private final IHyracksTaskContext ctx = TestUtils.create(HYRACKS_FRAME_SIZE);
@Test
public void test01() throws Exception {
TestStorageManagerComponentHolder.init(PAGE_SIZE, NUM_PAGES, MAX_OPEN_FILES);
IBufferCache bufferCache = harness.getBufferCache();
IFileMapProvider fmp = harness.getFileMapProvider();
// declare fields
int fieldCount = 2;
ITypeTraits[] typeTraits = new ITypeTraits[fieldCount];
typeTraits[0] = IntegerPointable.TYPE_TRAITS;
typeTraits[1] = IntegerPointable.TYPE_TRAITS;
// declare keys
int keyFieldCount = 1;
IBinaryComparatorFactory[] cmpFactories = new IBinaryComparatorFactory[keyFieldCount];
cmpFactories[0] = PointableBinaryComparatorFactory.of(IntegerPointable.FACTORY);
TypeAwareTupleWriterFactory tupleWriterFactory = new TypeAwareTupleWriterFactory(typeTraits);
ITreeIndexFrameFactory leafFrameFactory = new BTreeNSMLeafFrameFactory(tupleWriterFactory);
ITreeIndexFrameFactory interiorFrameFactory = new BTreeNSMInteriorFrameFactory(tupleWriterFactory);
ITreeIndexMetaDataFrameFactory metaFrameFactory = new LIFOMetaDataFrameFactory();
IBTreeLeafFrame leafFrame = (IBTreeLeafFrame) leafFrameFactory.createFrame();
IBTreeInteriorFrame interiorFrame = (IBTreeInteriorFrame) interiorFrameFactory.createFrame();
ITreeIndexMetaDataFrame metaFrame = metaFrameFactory.createFrame();
IMetaDataPageManager freePageManager = new LinkedMetaDataPageManager(bufferCache, metaFrameFactory);
BTree btree = new BTree(bufferCache, fmp, freePageManager, interiorFrameFactory, leafFrameFactory,
cmpFactories, fieldCount, harness.getFileReference());
btree.create();
btree.activate();
Random rnd = new Random();
rnd.setSeed(50);
long start = System.currentTimeMillis();
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("INSERTING INTO TREE");
}
IFrame frame = new VSizeFrame(ctx);
FrameTupleAppender appender = new FrameTupleAppender();
ArrayTupleBuilder tb = new ArrayTupleBuilder(fieldCount);
DataOutput dos = tb.getDataOutput();
ISerializerDeserializer[] recDescSers = { IntegerSerializerDeserializer.INSTANCE,
IntegerSerializerDeserializer.INSTANCE };
RecordDescriptor recDesc = new RecordDescriptor(recDescSers);
IFrameTupleAccessor accessor = new FrameTupleAccessor(recDesc);
accessor.reset(frame.getBuffer());
FrameTupleReference tuple = new FrameTupleReference();
ITreeIndexAccessor indexAccessor = btree.createAccessor(TestOperationCallback.INSTANCE,
TestOperationCallback.INSTANCE);
// 10000
for (int i = 0; i < 100000; i++) {
int f0 = rnd.nextInt() % 100000;
int f1 = 5;
tb.reset();
IntegerSerializerDeserializer.INSTANCE.serialize(f0, dos);
tb.addFieldEndOffset();
IntegerSerializerDeserializer.INSTANCE.serialize(f1, dos);
tb.addFieldEndOffset();
appender.reset(frame, true);
appender.append(tb.getFieldEndOffsets(), tb.getByteArray(), 0, tb.getSize());
tuple.reset(accessor, 0);
if (LOGGER.isLoggable(Level.INFO)) {
if (i % 10000 == 0) {
long end = System.currentTimeMillis();
LOGGER.info("INSERTING " + i + " : " + f0 + " " + f1 + " " + (end - start));
}
}
try {
indexAccessor.insert(tuple);
} catch (TreeIndexException e) {
} catch (Exception e) {
e.printStackTrace();
}
}
int fileId = fmp.lookupFileId(harness.getFileReference());
TreeIndexStatsGatherer statsGatherer = new TreeIndexStatsGatherer(bufferCache, freePageManager, fileId,
btree.getRootPageId());
TreeIndexStats stats = statsGatherer.gatherStats(leafFrame, interiorFrame, metaFrame);
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("\n" + stats.toString());
}
TreeIndexBufferCacheWarmup bufferCacheWarmup = new TreeIndexBufferCacheWarmup(bufferCache, freePageManager,
fileId);
bufferCacheWarmup.warmup(leafFrame, metaFrame, new int[] { 1, 2 }, new int[] { 2, 5 });
btree.deactivate();
btree.destroy();
bufferCache.close();
}
}
| apache-2.0 |
schatt/lightwave | vmdir/server/vmkdc/globals.c | 842 | /*
* Copyright © 2012-2015 VMware, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an “AS IS” BASIS, without
* warranties or conditions of any kind, EITHER EXPRESS OR IMPLIED. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
/*
* Module Name: Kdc main
*
* Filename: globals.c
*
* Abstract:
*
* Globals
*
*/
#include "includes.h"
// All global variables are automatically initialized to 0 for free
VMKDC_GLOBALS gVmkdcGlobals;
| apache-2.0 |
andrewpsp/chef | spec/integration/recipes/recipe_dsl_spec.rb | 55483 | require 'support/shared/integration/integration_helper'
describe "Recipe DSL methods" do
include IntegrationSupport
module Namer
extend self
attr_accessor :current_index
end
before(:all) { Namer.current_index = 1 }
before { Namer.current_index += 1 }
context "with resource 'base_thingy' declared as BaseThingy" do
before(:context) {
class BaseThingy < Chef::Resource
resource_name 'base_thingy'
default_action :create
class<<self
attr_accessor :created_name
attr_accessor :created_resource
attr_accessor :created_provider
end
def provider
Provider
end
class Provider < Chef::Provider
def load_current_resource
end
def action_create
BaseThingy.created_name = new_resource.name
BaseThingy.created_resource = new_resource.class
BaseThingy.created_provider = self.class
end
end
end
# Modules to put stuff in
module RecipeDSLSpecNamespace; end
module RecipeDSLSpecNamespace::Bar; end
}
before :each do
BaseThingy.created_resource = nil
BaseThingy.created_provider = nil
end
it "creates base_thingy when you call base_thingy in a recipe" do
recipe = converge {
base_thingy 'blah' do; end
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_name).to eq 'blah'
expect(BaseThingy.created_resource).to eq BaseThingy
end
it "errors out when you call base_thingy do ... end in a recipe" do
expect_converge {
base_thingy do; end
}.to raise_error(ArgumentError, 'You must supply a name when declaring a base_thingy resource')
end
it "emits a warning when you call base_thingy 'foo', 'bar' do ... end in a recipe" do
Chef::Config[:treat_deprecation_warnings_as_errors] = false
recipe = converge {
base_thingy 'foo', 'bar' do
end
}
expect(recipe.logged_warnings).to match(/Cannot create resource base_thingy with more than one argument. All arguments except the name \("foo"\) will be ignored. This will cause an error in Chef 13. Arguments: \["foo", "bar"\]/)
expect(BaseThingy.created_name).to eq 'foo'
expect(BaseThingy.created_resource).to eq BaseThingy
end
context "Deprecated automatic resource DSL" do
before do
Chef::Config[:treat_deprecation_warnings_as_errors] = false
end
context "with a resource 'backcompat_thingy' declared in Chef::Resource and Chef::Provider" do
before(:context) {
class Chef::Resource::BackcompatThingy < Chef::Resource
default_action :create
end
class Chef::Provider::BackcompatThingy < Chef::Provider
def load_current_resource
end
def action_create
BaseThingy.created_resource = new_resource.class
BaseThingy.created_provider = self.class
end
end
}
it "backcompat_thingy creates a Chef::Resource::BackcompatThingy" do
recipe = converge {
backcompat_thingy 'blah' do; end
}
expect(BaseThingy.created_resource).to eq Chef::Resource::BackcompatThingy
expect(BaseThingy.created_provider).to eq Chef::Provider::BackcompatThingy
end
context "and another resource 'backcompat_thingy' in BackcompatThingy with 'provides'" do
before(:context) {
class RecipeDSLSpecNamespace::BackcompatThingy < BaseThingy
provides :backcompat_thingy
resource_name :backcompat_thingy
end
}
it "backcompat_thingy creates a BackcompatThingy" do
recipe = converge {
backcompat_thingy 'blah' do; end
}
expect(recipe.logged_warnings).to match(/Class Chef::Provider::BackcompatThingy does not declare 'resource_name :backcompat_thingy'./)
expect(BaseThingy.created_resource).not_to be_nil
end
end
end
context "with a resource named RecipeDSLSpecNamespace::Bar::BarThingy" do
before(:context) {
class RecipeDSLSpecNamespace::Bar::BarThingy < BaseThingy
end
}
it "bar_thingy does not work" do
expect_converge {
bar_thingy 'blah' do; end
}.to raise_error(NoMethodError)
end
end
context "with a resource named Chef::Resource::NoNameThingy with resource_name nil" do
before(:context) {
class Chef::Resource::NoNameThingy < BaseThingy
resource_name nil
end
}
it "no_name_thingy does not work" do
expect_converge {
no_name_thingy 'blah' do; end
}.to raise_error(NoMethodError)
end
end
context "with a resource named AnotherNoNameThingy with resource_name :another_thingy_name" do
before(:context) {
class AnotherNoNameThingy < BaseThingy
resource_name :another_thingy_name
end
}
it "another_no_name_thingy does not work" do
expect_converge {
another_no_name_thingy 'blah' do; end
}.to raise_error(NoMethodError)
end
it "another_thingy_name works" do
recipe = converge {
another_thingy_name 'blah' do; end
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_resource).to eq(AnotherNoNameThingy)
end
end
context "with a resource named AnotherNoNameThingy2 with resource_name :another_thingy_name2; resource_name :another_thingy_name3" do
before(:context) {
class AnotherNoNameThingy2 < BaseThingy
resource_name :another_thingy_name2
resource_name :another_thingy_name3
end
}
it "another_no_name_thingy does not work" do
expect_converge {
another_no_name_thingy2 'blah' do; end
}.to raise_error(NoMethodError)
end
it "another_thingy_name2 does not work" do
expect_converge {
another_thingy_name2 'blah' do; end
}.to raise_error(NoMethodError)
end
it "yet_another_thingy_name3 works" do
recipe = converge {
another_thingy_name3 'blah' do; end
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_resource).to eq(AnotherNoNameThingy2)
end
end
context "provides overriding resource_name" do
context "with a resource named AnotherNoNameThingy3 with provides :another_no_name_thingy3, os: 'blarghle'" do
before(:context) {
class AnotherNoNameThingy3 < BaseThingy
resource_name :another_no_name_thingy_3
provides :another_no_name_thingy3, os: 'blarghle'
end
}
it "and os = linux, another_no_name_thingy3 does not work" do
expect_converge {
# TODO this is an ugly way to test, make Cheffish expose node attrs
run_context.node.automatic[:os] = 'linux'
another_no_name_thingy3 'blah' do; end
}.to raise_error(Chef::Exceptions::NoSuchResourceType)
end
it "and os = blarghle, another_no_name_thingy3 works" do
recipe = converge {
# TODO this is an ugly way to test, make Cheffish expose node attrs
run_context.node.automatic[:os] = 'blarghle'
another_no_name_thingy3 'blah' do; end
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_resource).to eq (AnotherNoNameThingy3)
end
end
context "with a resource named AnotherNoNameThingy4 with two provides" do
before(:context) {
class AnotherNoNameThingy4 < BaseThingy
resource_name :another_no_name_thingy_4
provides :another_no_name_thingy4, os: 'blarghle'
provides :another_no_name_thingy4, platform_family: 'foo'
end
}
it "and os = linux, another_no_name_thingy4 does not work" do
expect_converge {
# TODO this is an ugly way to test, make Cheffish expose node attrs
run_context.node.automatic[:os] = 'linux'
another_no_name_thingy4 'blah' do; end
}.to raise_error(Chef::Exceptions::NoSuchResourceType)
end
it "and os = blarghle, another_no_name_thingy4 works" do
recipe = converge {
# TODO this is an ugly way to test, make Cheffish expose node attrs
run_context.node.automatic[:os] = 'blarghle'
another_no_name_thingy4 'blah' do; end
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_resource).to eq (AnotherNoNameThingy4)
end
it "and platform_family = foo, another_no_name_thingy4 works" do
recipe = converge {
# TODO this is an ugly way to test, make Cheffish expose node attrs
run_context.node.automatic[:platform_family] = 'foo'
another_no_name_thingy4 'blah' do; end
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_resource).to eq (AnotherNoNameThingy4)
end
end
context "with a resource named AnotherNoNameThingy5, a different resource_name, and a provides with the original resource_name" do
before(:context) {
class AnotherNoNameThingy5 < BaseThingy
resource_name :another_thingy_name_for_another_no_name_thingy5
provides :another_no_name_thingy5, os: 'blarghle'
end
}
it "and os = linux, another_no_name_thingy5 does not work" do
expect_converge {
# this is an ugly way to test, make Cheffish expose node attrs
run_context.node.automatic[:os] = 'linux'
another_no_name_thingy5 'blah' do; end
}.to raise_error(Chef::Exceptions::NoSuchResourceType)
end
it "and os = blarghle, another_no_name_thingy5 works" do
recipe = converge {
# this is an ugly way to test, make Cheffish expose node attrs
run_context.node.automatic[:os] = 'blarghle'
another_no_name_thingy5 'blah' do; end
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_resource).to eq (AnotherNoNameThingy5)
end
it "the new resource name can be used in a recipe" do
recipe = converge {
another_thingy_name_for_another_no_name_thingy5 'blah' do; end
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_resource).to eq (AnotherNoNameThingy5)
end
end
context "with a resource named AnotherNoNameThingy6, a provides with the original resource name, and a different resource_name" do
before(:context) {
class AnotherNoNameThingy6 < BaseThingy
provides :another_no_name_thingy6, os: 'blarghle'
resource_name :another_thingy_name_for_another_no_name_thingy6
end
}
it "and os = linux, another_no_name_thingy6 does not work" do
expect_converge {
# this is an ugly way to test, make Cheffish expose node attrs
run_context.node.automatic[:os] = 'linux'
another_no_name_thingy6 'blah' do; end
}.to raise_error(Chef::Exceptions::NoSuchResourceType)
end
it "and os = blarghle, another_no_name_thingy6 works" do
recipe = converge {
# this is an ugly way to test, make Cheffish expose node attrs
run_context.node.automatic[:os] = 'blarghle'
another_no_name_thingy6 'blah' do; end
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_resource).to eq (AnotherNoNameThingy6)
end
it "the new resource name can be used in a recipe" do
recipe = converge {
another_thingy_name_for_another_no_name_thingy6 'blah' do; end
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_resource).to eq (AnotherNoNameThingy6)
end
end
context "with a resource named AnotherNoNameThingy7, a new resource_name, and provides with that new resource name" do
before(:context) {
class AnotherNoNameThingy7 < BaseThingy
resource_name :another_thingy_name_for_another_no_name_thingy7
provides :another_thingy_name_for_another_no_name_thingy7, os: 'blarghle'
end
}
it "and os = linux, another_thingy_name_for_another_no_name_thingy7 does not work" do
expect_converge {
# this is an ugly way to test, make Cheffish expose node attrs
run_context.node.automatic[:os] = 'linux'
another_thingy_name_for_another_no_name_thingy7 'blah' do; end
}.to raise_error(Chef::Exceptions::NoSuchResourceType)
end
it "and os = blarghle, another_thingy_name_for_another_no_name_thingy7 works" do
recipe = converge {
# this is an ugly way to test, make Cheffish expose node attrs
run_context.node.automatic[:os] = 'blarghle'
another_thingy_name_for_another_no_name_thingy7 'blah' do; end
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_resource).to eq (AnotherNoNameThingy7)
end
it "the old resource name does not work" do
expect_converge {
# this is an ugly way to test, make Cheffish expose node attrs
run_context.node.automatic[:os] = 'linux'
another_no_name_thingy_7 'blah' do; end
}.to raise_error(NoMethodError)
end
end
# opposite order from the previous test (provides, then resource_name)
context "with a resource named AnotherNoNameThingy8, a provides with a new resource name, and resource_name with that new resource name" do
before(:context) {
class AnotherNoNameThingy8 < BaseThingy
provides :another_thingy_name_for_another_no_name_thingy8, os: 'blarghle'
resource_name :another_thingy_name_for_another_no_name_thingy8
end
}
it "and os = linux, another_thingy_name_for_another_no_name_thingy8 does not work" do
expect_converge {
# this is an ugly way to test, make Cheffish expose node attrs
run_context.node.automatic[:os] = 'linux'
another_thingy_name_for_another_no_name_thingy8 'blah' do; end
}.to raise_error(Chef::Exceptions::NoSuchResourceType)
end
it "and os = blarghle, another_thingy_name_for_another_no_name_thingy8 works" do
recipe = converge {
# this is an ugly way to test, make Cheffish expose node attrs
run_context.node.automatic[:os] = 'blarghle'
another_thingy_name_for_another_no_name_thingy8 'blah' do; end
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_resource).to eq (AnotherNoNameThingy8)
end
it "the old resource name does not work" do
expect_converge {
# this is an ugly way to test, make Cheffish expose node attrs
run_context.node.automatic[:os] = 'linux'
another_thingy_name8 'blah' do; end
}.to raise_error(NoMethodError)
end
end
end
end
context "provides" do
context "when MySupplier provides :hemlock" do
before(:context) {
class RecipeDSLSpecNamespace::MySupplier < BaseThingy
resource_name :hemlock
end
}
it "my_supplier does not work in a recipe" do
expect_converge {
my_supplier 'blah' do; end
}.to raise_error(NoMethodError)
end
it "hemlock works in a recipe" do
expect_recipe {
hemlock 'blah' do; end
}.to emit_no_warnings_or_errors
expect(BaseThingy.created_resource).to eq RecipeDSLSpecNamespace::MySupplier
end
end
context "when Thingy3 has resource_name :thingy3" do
before(:context) {
class RecipeDSLSpecNamespace::Thingy3 < BaseThingy
resource_name :thingy3
end
}
it "thingy3 works in a recipe" do
expect_recipe {
thingy3 'blah' do; end
}.to emit_no_warnings_or_errors
expect(BaseThingy.created_resource).to eq RecipeDSLSpecNamespace::Thingy3
end
context "and Thingy4 has resource_name :thingy3" do
before(:context) {
class RecipeDSLSpecNamespace::Thingy4 < BaseThingy
resource_name :thingy3
end
}
it "thingy3 works in a recipe and yields Thingy3 (the alphabetical one)" do
recipe = converge {
thingy3 'blah' do; end
}
expect(BaseThingy.created_resource).to eq RecipeDSLSpecNamespace::Thingy3
end
it "thingy4 does not work in a recipe" do
expect_converge {
thingy4 'blah' do; end
}.to raise_error(NoMethodError)
end
it "resource_matching_short_name returns Thingy4" do
expect(Chef::Resource.resource_matching_short_name(:thingy3)).to eq RecipeDSLSpecNamespace::Thingy3
end
end
end
context "when Thingy5 has resource_name :thingy5 and provides :thingy5reverse, :thingy5_2 and :thingy5_2reverse" do
before(:context) {
class RecipeDSLSpecNamespace::Thingy5 < BaseThingy
resource_name :thingy5
provides :thingy5reverse
provides :thingy5_2
provides :thingy5_2reverse
end
}
it "thingy5 works in a recipe" do
expect_recipe {
thingy5 'blah' do; end
}.to emit_no_warnings_or_errors
expect(BaseThingy.created_resource).to eq RecipeDSLSpecNamespace::Thingy5
end
context "and Thingy6 provides :thingy5" do
before(:context) {
class RecipeDSLSpecNamespace::Thingy6 < BaseThingy
resource_name :thingy6
provides :thingy5
end
}
it "thingy6 works in a recipe and yields Thingy6" do
recipe = converge {
thingy6 'blah' do; end
}
expect(BaseThingy.created_resource).to eq RecipeDSLSpecNamespace::Thingy6
end
it "thingy5 works in a recipe and yields Foo::Thingy5 (the alphabetical one)" do
recipe = converge {
thingy5 'blah' do; end
}
expect(BaseThingy.created_resource).to eq RecipeDSLSpecNamespace::Thingy5
end
it "resource_matching_short_name returns Thingy5" do
expect(Chef::Resource.resource_matching_short_name(:thingy5)).to eq RecipeDSLSpecNamespace::Thingy5
end
context "and AThingy5 provides :thingy5reverse" do
before(:context) {
class RecipeDSLSpecNamespace::AThingy5 < BaseThingy
resource_name :thingy5reverse
end
}
it "thingy5reverse works in a recipe and yields AThingy5 (the alphabetical one)" do
recipe = converge {
thingy5reverse 'blah' do; end
}
expect(BaseThingy.created_resource).to eq RecipeDSLSpecNamespace::AThingy5
end
end
context "and ZRecipeDSLSpecNamespace::Thingy5 provides :thingy5_2" do
before(:context) {
module ZRecipeDSLSpecNamespace
class Thingy5 < BaseThingy
resource_name :thingy5_2
end
end
}
it "thingy5_2 works in a recipe and yields the RecipeDSLSpaceNamespace one (the alphabetical one)" do
recipe = converge {
thingy5_2 'blah' do; end
}
expect(BaseThingy.created_resource).to eq RecipeDSLSpecNamespace::Thingy5
end
end
context "and ARecipeDSLSpecNamespace::Thingy5 provides :thingy5_2" do
before(:context) {
module ARecipeDSLSpecNamespace
class Thingy5 < BaseThingy
resource_name :thingy5_2reverse
end
end
}
it "thingy5_2reverse works in a recipe and yields the ARecipeDSLSpaceNamespace one (the alphabetical one)" do
recipe = converge {
thingy5_2reverse 'blah' do; end
}
expect(BaseThingy.created_resource).to eq ARecipeDSLSpecNamespace::Thingy5
end
end
end
context "when Thingy3 has resource_name :thingy3" do
before(:context) {
class RecipeDSLSpecNamespace::Thingy3 < BaseThingy
resource_name :thingy3
end
}
it "thingy3 works in a recipe" do
expect_recipe {
thingy3 'blah' do; end
}.to emit_no_warnings_or_errors
expect(BaseThingy.created_resource).to eq RecipeDSLSpecNamespace::Thingy3
end
context "and Thingy4 has resource_name :thingy3" do
before(:context) {
class RecipeDSLSpecNamespace::Thingy4 < BaseThingy
resource_name :thingy3
end
}
it "thingy3 works in a recipe and yields Thingy3 (the alphabetical one)" do
recipe = converge {
thingy3 'blah' do; end
}
expect(BaseThingy.created_resource).to eq RecipeDSLSpecNamespace::Thingy3
end
it "thingy4 does not work in a recipe" do
expect_converge {
thingy4 'blah' do; end
}.to raise_error(NoMethodError)
end
it "resource_matching_short_name returns Thingy4" do
expect(Chef::Resource.resource_matching_short_name(:thingy3)).to eq RecipeDSLSpecNamespace::Thingy3
end
end
context "and Thingy4 has resource_name :thingy3" do
before(:context) {
class RecipeDSLSpecNamespace::Thingy4 < BaseThingy
resource_name :thingy3
end
}
it "thingy3 works in a recipe and yields Thingy3 (the alphabetical one)" do
recipe = converge {
thingy3 'blah' do; end
}
expect(BaseThingy.created_resource).to eq RecipeDSLSpecNamespace::Thingy3
end
it "thingy4 does not work in a recipe" do
expect_converge {
thingy4 'blah' do; end
}.to raise_error(NoMethodError)
end
it "resource_matching_short_name returns Thingy4" do
expect(Chef::Resource.resource_matching_short_name(:thingy3)).to eq RecipeDSLSpecNamespace::Thingy3
end
end
end
end
context "when Thingy7 provides :thingy8" do
before(:context) {
class RecipeDSLSpecNamespace::Thingy7 < BaseThingy
resource_name :thingy7
provides :thingy8
end
}
context "and Thingy8 has resource_name :thingy8" do
before(:context) {
class RecipeDSLSpecNamespace::Thingy8 < BaseThingy
resource_name :thingy8
end
}
it "thingy7 works in a recipe and yields Thingy7" do
recipe = converge {
thingy7 'blah' do; end
}
expect(BaseThingy.created_resource).to eq RecipeDSLSpecNamespace::Thingy7
end
it "thingy8 works in a recipe and yields Thingy7 (alphabetical)" do
recipe = converge {
thingy8 'blah' do; end
}
expect(BaseThingy.created_resource).to eq RecipeDSLSpecNamespace::Thingy7
end
it "resource_matching_short_name returns Thingy8" do
expect(Chef::Resource.resource_matching_short_name(:thingy8)).to eq RecipeDSLSpecNamespace::Thingy8
end
end
end
context "when Thingy12 provides :thingy12, :twizzle and :twizzle2" do
before(:context) {
class RecipeDSLSpecNamespace::Thingy12 < BaseThingy
resource_name :thingy12
provides :twizzle
provides :twizzle2
end
}
it "thingy12 works in a recipe and yields Thingy12" do
expect_recipe {
thingy12 'blah' do; end
}.to emit_no_warnings_or_errors
expect(BaseThingy.created_resource).to eq RecipeDSLSpecNamespace::Thingy12
end
it "twizzle works in a recipe and yields Thingy12" do
expect_recipe {
twizzle 'blah' do; end
}.to emit_no_warnings_or_errors
expect(BaseThingy.created_resource).to eq RecipeDSLSpecNamespace::Thingy12
end
it "twizzle2 works in a recipe and yields Thingy12" do
expect_recipe {
twizzle2 'blah' do; end
}.to emit_no_warnings_or_errors
expect(BaseThingy.created_resource).to eq RecipeDSLSpecNamespace::Thingy12
end
end
context "with platform-specific resources 'my_super_thingy_foo' and 'my_super_thingy_bar'" do
before(:context) {
class MySuperThingyFoo < BaseThingy
resource_name :my_super_thingy_foo
provides :my_super_thingy, platform: 'foo'
end
class MySuperThingyBar < BaseThingy
resource_name :my_super_thingy_bar
provides :my_super_thingy, platform: 'bar'
end
}
it "A run with platform 'foo' uses MySuperThingyFoo" do
r = Cheffish::ChefRun.new(chef_config)
r.client.run_context.node.automatic['platform'] = 'foo'
r.compile_recipe {
my_super_thingy 'blah' do; end
}
r.converge
expect(r).to emit_no_warnings_or_errors
expect(BaseThingy.created_resource).to eq MySuperThingyFoo
end
it "A run with platform 'bar' uses MySuperThingyBar" do
r = Cheffish::ChefRun.new(chef_config)
r.client.run_context.node.automatic['platform'] = 'bar'
r.compile_recipe {
my_super_thingy 'blah' do; end
}
r.converge
expect(r).to emit_no_warnings_or_errors
expect(BaseThingy.created_resource).to eq MySuperThingyBar
end
it "A run with platform 'x' reports that my_super_thingy is not supported" do
r = Cheffish::ChefRun.new(chef_config)
r.client.run_context.node.automatic['platform'] = 'x'
expect {
r.compile_recipe {
my_super_thingy 'blah' do; end
}
}.to raise_error(Chef::Exceptions::NoSuchResourceType)
end
end
context "when Thingy9 provides :thingy9" do
before(:context) {
class RecipeDSLSpecNamespace::Thingy9 < BaseThingy
resource_name :thingy9
end
}
it "declaring a resource providing the same :thingy9 produces a warning" do
expect(Chef::Log).to receive(:warn).with("You declared a new resource RecipeDSLSpecNamespace::Thingy9AlternateProvider for resource thingy9, but it comes alphabetically after RecipeDSLSpecNamespace::Thingy9 and has the same filters ({}), so it will not be used. Use override: true if you want to use it for thingy9.")
class RecipeDSLSpecNamespace::Thingy9AlternateProvider < BaseThingy
resource_name :thingy9
end
end
end
context "when Thingy10 provides :thingy10" do
before(:context) {
class RecipeDSLSpecNamespace::Thingy10 < BaseThingy
resource_name :thingy10
end
}
it "declaring a resource providing the same :thingy10 with override: true does not produce a warning" do
expect(Chef::Log).not_to receive(:warn)
class RecipeDSLSpecNamespace::Thingy10AlternateProvider < BaseThingy
provides :thingy10, override: true
end
end
end
context "when Thingy11 provides :thingy11" do
before(:context) {
class RecipeDSLSpecNamespace::Thingy11 < BaseThingy
resource_name :thingy10
end
}
it "declaring a resource providing the same :thingy11 with os: 'linux' does not produce a warning" do
expect(Chef::Log).not_to receive(:warn)
class RecipeDSLSpecNamespace::Thingy11AlternateProvider < BaseThingy
provides :thingy11, os: 'linux'
end
end
end
end
context "with a resource named 'B' with resource name :two_classes_one_dsl" do
let(:two_classes_one_dsl) { :"two_classes_one_dsl#{Namer.current_index}" }
let(:resource_class) {
result = Class.new(BaseThingy) do
def self.name
"B"
end
def self.to_s; name; end
def self.inspect; name.inspect; end
end
result.resource_name two_classes_one_dsl
result
}
before { resource_class } # pull on it so it gets defined before the recipe runs
context "and another resource named 'A' with resource_name :two_classes_one_dsl" do
let(:resource_class_a) {
result = Class.new(BaseThingy) do
def self.name
"A"
end
def self.to_s; name; end
def self.inspect; name.inspect; end
end
result.resource_name two_classes_one_dsl
result
}
before { resource_class_a } # pull on it so it gets defined before the recipe runs
it "two_classes_one_dsl resolves to A (alphabetically earliest)" do
two_classes_one_dsl = self.two_classes_one_dsl
recipe = converge {
instance_eval("#{two_classes_one_dsl} 'blah'")
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_resource).to eq resource_class_a
end
it "resource_matching_short_name returns B" do
expect(Chef::Resource.resource_matching_short_name(two_classes_one_dsl)).to eq resource_class_a
end
end
context "and another resource named 'Z' with resource_name :two_classes_one_dsl" do
let(:resource_class_z) {
result = Class.new(BaseThingy) do
def self.name
"Z"
end
def self.to_s; name; end
def self.inspect; name.inspect; end
end
result.resource_name two_classes_one_dsl
result
}
before { resource_class_z } # pull on it so it gets defined before the recipe runs
it "two_classes_one_dsl resolves to B (alphabetically earliest)" do
two_classes_one_dsl = self.two_classes_one_dsl
recipe = converge {
instance_eval("#{two_classes_one_dsl} 'blah'")
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_resource).to eq resource_class
end
it "resource_matching_short_name returns B" do
expect(Chef::Resource.resource_matching_short_name(two_classes_one_dsl)).to eq resource_class
end
context "and a priority array [ Z, B ]" do
before do
Chef.set_resource_priority_array(two_classes_one_dsl, [ resource_class_z, resource_class ])
end
it "two_classes_one_dsl resolves to Z (respects the priority array)" do
two_classes_one_dsl = self.two_classes_one_dsl
recipe = converge {
instance_eval("#{two_classes_one_dsl} 'blah'")
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_resource).to eq resource_class_z
end
it "resource_matching_short_name returns B" do
expect(Chef::Resource.resource_matching_short_name(two_classes_one_dsl)).to eq resource_class
end
context "when Z provides(:two_classes_one_dsl) { false }" do
before do
resource_class_z.provides(two_classes_one_dsl) { false }
end
it "two_classes_one_dsl resolves to B (picks the next thing in the priority array)" do
two_classes_one_dsl = self.two_classes_one_dsl
recipe = converge {
instance_eval("#{two_classes_one_dsl} 'blah'")
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_resource).to eq resource_class
end
it "resource_matching_short_name returns B" do
expect(Chef::Resource.resource_matching_short_name(two_classes_one_dsl)).to eq resource_class
end
end
end
context "and priority arrays [ B ] and [ Z ]" do
before do
Chef.set_resource_priority_array(two_classes_one_dsl, [ resource_class ])
Chef.set_resource_priority_array(two_classes_one_dsl, [ resource_class_z ])
end
it "two_classes_one_dsl resolves to Z (respects the most recent priority array)" do
two_classes_one_dsl = self.two_classes_one_dsl
recipe = converge {
instance_eval("#{two_classes_one_dsl} 'blah'")
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_resource).to eq resource_class_z
end
it "resource_matching_short_name returns B" do
expect(Chef::Resource.resource_matching_short_name(two_classes_one_dsl)).to eq resource_class
end
context "when Z provides(:two_classes_one_dsl) { false }" do
before do
resource_class_z.provides(two_classes_one_dsl) { false }
end
it "two_classes_one_dsl resolves to B (picks the first match from the other priority array)" do
two_classes_one_dsl = self.two_classes_one_dsl
recipe = converge {
instance_eval("#{two_classes_one_dsl} 'blah'")
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_resource).to eq resource_class
end
it "resource_matching_short_name returns B" do
expect(Chef::Resource.resource_matching_short_name(two_classes_one_dsl)).to eq resource_class
end
end
end
context "and a priority array [ Z ]" do
before do
Chef.set_resource_priority_array(two_classes_one_dsl, [ resource_class_z ])
end
context "when Z provides(:two_classes_one_dsl) { false }" do
before do
resource_class_z.provides(two_classes_one_dsl) { false }
end
it "two_classes_one_dsl resolves to B (picks the first match outside the priority array)" do
two_classes_one_dsl = self.two_classes_one_dsl
recipe = converge {
instance_eval("#{two_classes_one_dsl} 'blah'")
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_resource).to eq resource_class
end
it "resource_matching_short_name returns B" do
expect(Chef::Resource.resource_matching_short_name(two_classes_one_dsl)).to eq resource_class
end
end
end
end
context "and a provider named 'B' which provides :two_classes_one_dsl" do
before do
resource_class.send(:define_method, :provider) { nil }
end
let(:provider_class) {
result = Class.new(BaseThingy::Provider) do
def self.name
"B"
end
def self.to_s; name; end
def self.inspect; name.inspect; end
end
result.provides two_classes_one_dsl
result
}
before { provider_class } # pull on it so it gets defined before the recipe runs
context "and another provider named 'A'" do
let(:provider_class_a) {
result = Class.new(BaseThingy::Provider) do
def self.name
"A"
end
def self.to_s; name; end
def self.inspect; name.inspect; end
end
result
}
context "which provides :two_classes_one_dsl" do
before { provider_class_a.provides two_classes_one_dsl }
it "two_classes_one_dsl resolves to A (alphabetically earliest)" do
two_classes_one_dsl = self.two_classes_one_dsl
recipe = converge {
instance_eval("#{two_classes_one_dsl} 'blah'")
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_provider).to eq provider_class_a
end
end
context "which provides(:two_classes_one_dsl) { false }" do
before { provider_class_a.provides(two_classes_one_dsl) { false } }
it "two_classes_one_dsl resolves to B (since A declined)" do
two_classes_one_dsl = self.two_classes_one_dsl
recipe = converge {
instance_eval("#{two_classes_one_dsl} 'blah'")
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_provider).to eq provider_class
end
end
end
context "and another provider named 'Z'" do
let(:provider_class_z) {
result = Class.new(BaseThingy::Provider) do
def self.name
"Z"
end
def self.to_s; name; end
def self.inspect; name.inspect; end
end
result
}
before { provider_class_z } # pull on it so it gets defined before the recipe runs
context "which provides :two_classes_one_dsl" do
before { provider_class_z.provides two_classes_one_dsl }
it "two_classes_one_dsl resolves to B (alphabetically earliest)" do
two_classes_one_dsl = self.two_classes_one_dsl
recipe = converge {
instance_eval("#{two_classes_one_dsl} 'blah'")
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_provider).to eq provider_class
end
context "with a priority array [ Z, B ]" do
before { Chef.set_provider_priority_array two_classes_one_dsl, [ provider_class_z, provider_class ] }
it "two_classes_one_dsl resolves to Z (respects the priority map)" do
two_classes_one_dsl = self.two_classes_one_dsl
recipe = converge {
instance_eval("#{two_classes_one_dsl} 'blah'")
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_provider).to eq provider_class_z
end
end
end
context "which provides(:two_classes_one_dsl) { false }" do
before { provider_class_z.provides(two_classes_one_dsl) { false } }
context "with a priority array [ Z, B ]" do
before { Chef.set_provider_priority_array two_classes_one_dsl, [ provider_class_z, provider_class ] }
it "two_classes_one_dsl resolves to B (the next one in the priority map)" do
two_classes_one_dsl = self.two_classes_one_dsl
recipe = converge {
instance_eval("#{two_classes_one_dsl} 'blah'")
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_provider).to eq provider_class
end
end
context "with priority arrays [ B ] and [ Z ]" do
before { Chef.set_provider_priority_array two_classes_one_dsl, [ provider_class_z ] }
before { Chef.set_provider_priority_array two_classes_one_dsl, [ provider_class ] }
it "two_classes_one_dsl resolves to B (the one in the next priority map)" do
two_classes_one_dsl = self.two_classes_one_dsl
recipe = converge {
instance_eval("#{two_classes_one_dsl} 'blah'")
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_provider).to eq provider_class
end
end
end
end
end
context "and another resource Blarghle with provides :two_classes_one_dsl, os: 'blarghle'" do
let(:resource_class_blarghle) {
result = Class.new(BaseThingy) do
def self.name
"Blarghle"
end
def self.to_s; name; end
def self.inspect; name.inspect; end
end
result.resource_name two_classes_one_dsl
result.provides two_classes_one_dsl, os: 'blarghle'
result
}
before { resource_class_blarghle } # pull on it so it gets defined before the recipe runs
it "on os = blarghle, two_classes_one_dsl resolves to Blarghle" do
two_classes_one_dsl = self.two_classes_one_dsl
recipe = converge {
# this is an ugly way to test, make Cheffish expose node attrs
run_context.node.automatic[:os] = 'blarghle'
instance_eval("#{two_classes_one_dsl} 'blah' do; end")
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_resource).to eq resource_class_blarghle
end
it "on os = linux, two_classes_one_dsl resolves to B" do
two_classes_one_dsl = self.two_classes_one_dsl
recipe = converge {
# this is an ugly way to test, make Cheffish expose node attrs
run_context.node.automatic[:os] = 'linux'
instance_eval("#{two_classes_one_dsl} 'blah' do; end")
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_resource).to eq resource_class
end
end
end
context "with a resource MyResource" do
let(:resource_class) { Class.new(BaseThingy) do
def self.called_provides
@called_provides
end
def to_s
"MyResource"
end
end }
let(:my_resource) { :"my_resource#{Namer.current_index}" }
let(:blarghle_blarghle_little_star) { :"blarghle_blarghle_little_star#{Namer.current_index}" }
context "with resource_name :my_resource" do
before {
resource_class.resource_name my_resource
}
context "with provides? returning true to my_resource" do
before {
my_resource = self.my_resource
resource_class.define_singleton_method(:provides?) do |node, resource_name|
@called_provides = true
resource_name == my_resource
end
}
it "my_resource returns the resource and calls provides?, but does not emit a warning" do
dsl_name = self.my_resource
recipe = converge {
instance_eval("#{dsl_name} 'foo'")
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_resource).to eq resource_class
expect(resource_class.called_provides).to be_truthy
end
end
context "with provides? returning true to blarghle_blarghle_little_star and not resource_name" do
before do
blarghle_blarghle_little_star = self.blarghle_blarghle_little_star
resource_class.define_singleton_method(:provides?) do |node, resource_name|
@called_provides = true
resource_name == blarghle_blarghle_little_star
end
end
it "my_resource does not return the resource" do
dsl_name = self.my_resource
expect_converge {
instance_eval("#{dsl_name} 'foo'")
}.to raise_error(Chef::Exceptions::NoSuchResourceType)
expect(resource_class.called_provides).to be_truthy
end
it "blarghle_blarghle_little_star 'foo' returns the resource and emits a warning" do
Chef::Config[:treat_deprecation_warnings_as_errors] = false
dsl_name = self.blarghle_blarghle_little_star
recipe = converge {
instance_eval("#{dsl_name} 'foo'")
}
expect(recipe.logged_warnings).to include "WARN: #{resource_class}.provides? returned true when asked if it provides DSL #{dsl_name}, but provides :#{dsl_name} was never called!"
expect(BaseThingy.created_resource).to eq resource_class
expect(resource_class.called_provides).to be_truthy
end
end
context "and a provider" do
let(:provider_class) do
Class.new(BaseThingy::Provider) do
def self.name
"MyProvider"
end
def self.to_s; name; end
def self.inspect; name.inspect; end
def self.called_provides
@called_provides
end
end
end
before do
resource_class.send(:define_method, :provider) { nil }
end
context "that provides :my_resource" do
before do
provider_class.provides my_resource
end
context "with supports? returning true" do
before do
provider_class.define_singleton_method(:supports?) { |resource,action| true }
end
it "my_resource runs the provider and does not emit a warning" do
my_resource = self.my_resource
recipe = converge {
instance_eval("#{my_resource} 'foo'")
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_provider).to eq provider_class
end
context "and another provider supporting :my_resource with supports? false" do
let(:provider_class2) do
Class.new(BaseThingy::Provider) do
def self.name
"MyProvider2"
end
def self.to_s; name; end
def self.inspect; name.inspect; end
def self.called_provides
@called_provides
end
provides my_resource
def self.supports?(resource, action)
false
end
end
end
it "my_resource runs the first provider" do
my_resource = self.my_resource
recipe = converge {
instance_eval("#{my_resource} 'foo'")
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_provider).to eq provider_class
end
end
end
context "with supports? returning false" do
before do
provider_class.define_singleton_method(:supports?) { |resource,action| false }
end
# TODO no warning? ick
it "my_resource runs the provider anyway" do
my_resource = self.my_resource
recipe = converge {
instance_eval("#{my_resource} 'foo'")
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_provider).to eq provider_class
end
context "and another provider supporting :my_resource with supports? true" do
let(:provider_class2) do
my_resource = self.my_resource
Class.new(BaseThingy::Provider) do
def self.name
"MyProvider2"
end
def self.to_s; name; end
def self.inspect; name.inspect; end
def self.called_provides
@called_provides
end
provides my_resource
def self.supports?(resource, action)
true
end
end
end
before { provider_class2 } # make sure the provider class shows up
it "my_resource runs the other provider" do
my_resource = self.my_resource
recipe = converge {
instance_eval("#{my_resource} 'foo'")
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_provider).to eq provider_class2
end
end
end
end
context "with provides? returning true" do
before {
my_resource = self.my_resource
provider_class.define_singleton_method(:provides?) do |node, resource|
@called_provides = true
resource.declared_type == my_resource
end
}
context "that provides :my_resource" do
before {
provider_class.provides my_resource
}
it "my_resource calls the provider (and calls provides?), but does not emit a warning" do
my_resource = self.my_resource
recipe = converge {
instance_eval("#{my_resource} 'foo'")
}
expect(recipe.logged_warnings).to eq ''
expect(BaseThingy.created_provider).to eq provider_class
expect(provider_class.called_provides).to be_truthy
end
end
context "that does not call provides :my_resource" do
it "my_resource calls the provider (and calls provides?), and emits a warning" do
Chef::Config[:treat_deprecation_warnings_as_errors] = false
my_resource = self.my_resource
recipe = converge {
instance_eval("#{my_resource} 'foo'")
}
expect(recipe.logged_warnings).to include("WARN: #{provider_class}.provides? returned true when asked if it provides DSL #{my_resource}, but provides :#{my_resource} was never called!")
expect(BaseThingy.created_provider).to eq provider_class
expect(provider_class.called_provides).to be_truthy
end
end
end
context "with provides? returning false to my_resource" do
before {
my_resource = self.my_resource
provider_class.define_singleton_method(:provides?) do |node, resource|
@called_provides = true
false
end
}
context "that provides :my_resource" do
before {
provider_class.provides my_resource
}
it "my_resource fails to find a provider (and calls provides)" do
my_resource = self.my_resource
expect_converge {
instance_eval("#{my_resource} 'foo'")
}.to raise_error(Chef::Exceptions::ProviderNotFound)
expect(provider_class.called_provides).to be_truthy
end
end
context "that does not provide :my_resource" do
it "my_resource fails to find a provider (and calls provides)" do
my_resource = self.my_resource
expect_converge {
instance_eval("#{my_resource} 'foo'")
}.to raise_error(Chef::Exceptions::ProviderNotFound)
expect(provider_class.called_provides).to be_truthy
end
end
end
end
end
end
end
before(:all) { Namer.current_index = 0 }
before { Namer.current_index += 1 }
context "with an LWRP that declares actions" do
let(:resource_class) {
Class.new(Chef::Resource::LWRPBase) do
provides :"recipe_dsl_spec#{Namer.current_index}"
actions :create
end
}
let(:resource) {
resource_class.new("blah", run_context)
}
it "The actions are part of actions along with :nothing" do
expect(resource_class.actions).to eq [ :nothing, :create ]
end
it "The actions are part of allowed_actions along with :nothing" do
expect(resource.allowed_actions).to eq [ :nothing, :create ]
end
context "and a subclass that declares more actions" do
let(:subresource_class) {
Class.new(Chef::Resource::LWRPBase) do
provides :"recipe_dsl_spec_sub#{Namer.current_index}"
actions :delete
end
}
let(:subresource) {
subresource_class.new("subblah", run_context)
}
it "The parent class actions are not part of actions" do
expect(subresource_class.actions).to eq [ :nothing, :delete ]
end
it "The parent class actions are not part of allowed_actions" do
expect(subresource.allowed_actions).to eq [ :nothing, :delete ]
end
it "The parent class actions do not change" do
expect(resource_class.actions).to eq [ :nothing, :create ]
expect(resource.allowed_actions).to eq [ :nothing, :create ]
end
end
end
context "with a dynamically defined resource and regular provider" do
before(:context) do
Class.new(Chef::Resource) do
resource_name :lw_resource_with_hw_provider_test_case
default_action :create
attr_accessor :created_provider
end
class Chef::Provider::LwResourceWithHwProviderTestCase < Chef::Provider
def load_current_resource
end
def action_create
new_resource.created_provider = self.class
end
end
end
it "looks up the provider in Chef::Provider converting the resource name from snake case to camel case" do
resource = nil
recipe = converge {
resource = lw_resource_with_hw_provider_test_case 'blah' do; end
}
expect(resource.created_provider).to eq(Chef::Provider::LwResourceWithHwProviderTestCase)
end
end
end
| apache-2.0 |
luna1x/chef-server | vendor/ruby/1.9.1/gems/chef-11.6.2/spec/integration/knife/upload_spec.rb | 41092 | #
# Author:: John Keiser (<[email protected]>)
# Copyright:: Copyright (c) 2013 Opscode, Inc.
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require 'support/shared/integration/integration_helper'
require 'chef/knife/upload'
require 'chef/knife/diff'
require 'chef/knife/raw'
describe 'knife upload' do
extend IntegrationSupport
include KnifeSupport
context 'without versioned cookbooks' do
when_the_chef_server "has one of each thing" do
client 'x', {}
cookbook 'x', '1.0.0', { 'metadata.rb' => 'version "1.0.0"' }
data_bag 'x', { 'y' => {} }
environment 'x', {}
node 'x', {}
role 'x', {}
user 'x', {}
when_the_repository 'has only top-level directories' do
directory 'clients'
directory 'cookbooks'
directory 'data_bags'
directory 'environments'
directory 'nodes'
directory 'roles'
directory 'users'
it 'knife upload does nothing' do
knife('upload /').should_succeed ''
knife('diff --name-status /').should_succeed <<EOM
D\t/clients/chef-validator.json
D\t/clients/chef-webui.json
D\t/clients/x.json
D\t/cookbooks/x
D\t/data_bags/x
D\t/environments/_default.json
D\t/environments/x.json
D\t/nodes/x.json
D\t/roles/x.json
D\t/users/admin.json
D\t/users/x.json
EOM
end
it 'knife upload --purge deletes everything' do
knife('upload --purge /').should_succeed(<<EOM, :stderr => "WARNING: /environments/_default.json cannot be deleted (default environment cannot be modified).\n")
Deleted extra entry /clients/chef-validator.json (purge is on)
Deleted extra entry /clients/chef-webui.json (purge is on)
Deleted extra entry /clients/x.json (purge is on)
Deleted extra entry /cookbooks/x (purge is on)
Deleted extra entry /data_bags/x (purge is on)
Deleted extra entry /environments/x.json (purge is on)
Deleted extra entry /nodes/x.json (purge is on)
Deleted extra entry /roles/x.json (purge is on)
Deleted extra entry /users/admin.json (purge is on)
Deleted extra entry /users/x.json (purge is on)
EOM
knife('diff --name-status /').should_succeed <<EOM
D\t/environments/_default.json
EOM
end
end
when_the_repository 'has an identical copy of each thing' do
file 'clients/chef-validator.json', { 'validator' => true, 'public_key' => ChefZero::PUBLIC_KEY }
file 'clients/chef-webui.json', { 'admin' => true, 'public_key' => ChefZero::PUBLIC_KEY }
file 'clients/x.json', { 'public_key' => ChefZero::PUBLIC_KEY }
file 'cookbooks/x/metadata.rb', 'version "1.0.0"'
file 'data_bags/x/y.json', {}
file 'environments/_default.json', { "description" => "The default Chef environment" }
file 'environments/x.json', {}
file 'nodes/x.json', {}
file 'roles/x.json', {}
file 'users/admin.json', { 'admin' => true, 'public_key' => ChefZero::PUBLIC_KEY }
file 'users/x.json', { 'public_key' => ChefZero::PUBLIC_KEY }
it 'knife upload makes no changes' do
knife('upload /cookbooks/x').should_succeed ''
knife('diff --name-status /').should_succeed ''
end
it 'knife upload --purge makes no changes' do
knife('upload --purge /').should_succeed ''
knife('diff --name-status /').should_succeed ''
end
context 'except the role file' do
file 'roles/x.json', { 'description' => 'blarghle' }
it 'knife upload changes the role' do
knife('upload /').should_succeed "Updated /roles/x.json\n"
knife('diff --name-status /').should_succeed ''
end
it 'knife upload --no-diff does not change the role' do
knife('upload --no-diff /').should_succeed ''
knife('diff --name-status /').should_succeed "M\t/roles/x.json\n"
end
end
context 'except the role file is textually different, but not ACTUALLY different' do
file 'roles/x.json', <<EOM
{
"chef_type": "role",
"default_attributes": {
},
"env_run_lists": {
},
"json_class": "Chef::Role",
"name": "x",
"description": "",
"override_attributes": {
},
"run_list": [
]
}
EOM
it 'knife upload / does not change anything' do
knife('upload /').should_succeed ''
knife('diff --name-status /').should_succeed ''
end
end
context 'as well as one extra copy of each thing' do
file 'clients/y.json', { 'public_key' => ChefZero::PUBLIC_KEY }
file 'cookbooks/x/blah.rb', ''
file 'cookbooks/y/metadata.rb', 'version "1.0.0"'
file 'data_bags/x/z.json', {}
file 'data_bags/y/zz.json', {}
file 'environments/y.json', {}
file 'nodes/y.json', {}
file 'roles/y.json', {}
file 'users/y.json', { 'public_key' => ChefZero::PUBLIC_KEY }
it 'knife upload adds the new files' do
knife('upload /').should_succeed <<EOM
Created /clients/y.json
Updated /cookbooks/x
Created /cookbooks/y
Created /data_bags/x/z.json
Created /data_bags/y
Created /data_bags/y/zz.json
Created /environments/y.json
Created /nodes/y.json
Created /roles/y.json
Created /users/y.json
EOM
knife('diff --name-status /').should_succeed ''
end
it 'knife upload --no-diff adds the new files' do
knife('upload --no-diff /').should_succeed <<EOM
Created /clients/y.json
Updated /cookbooks/x
Created /cookbooks/y
Created /data_bags/x/z.json
Created /data_bags/y
Created /data_bags/y/zz.json
Created /environments/y.json
Created /nodes/y.json
Created /roles/y.json
Created /users/y.json
EOM
knife('diff --name-status /').should_succeed ''
end
end
end
when_the_repository 'is empty' do
it 'knife upload does nothing' do
knife('upload /').should_succeed ''
knife('diff --name-status /').should_succeed <<EOM
D\t/clients
D\t/cookbooks
D\t/data_bags
D\t/environments
D\t/nodes
D\t/roles
D\t/users
EOM
end
it 'knife upload --purge deletes nothing' do
knife('upload --purge /').should_fail <<EOM
ERROR: /clients cannot be deleted.
ERROR: /cookbooks cannot be deleted.
ERROR: /data_bags cannot be deleted.
ERROR: /environments cannot be deleted.
ERROR: /nodes cannot be deleted.
ERROR: /roles cannot be deleted.
ERROR: /users cannot be deleted.
EOM
knife('diff --name-status /').should_succeed <<EOM
D\t/clients
D\t/cookbooks
D\t/data_bags
D\t/environments
D\t/nodes
D\t/roles
D\t/users
EOM
end
context 'when current directory is top level' do
cwd '.'
it 'knife upload with no parameters reports an error' do
knife('upload').should_fail "FATAL: Must specify at least one argument. If you want to upload everything in this directory, type \"knife upload .\"\n", :stdout => /USAGE/
end
end
end
end
when_the_chef_server 'is empty' do
when_the_repository 'has a data bag item' do
file 'data_bags/x/y.json', { 'foo' => 'bar' }
it 'knife upload of the data bag uploads only the values in the data bag item and no other' do
knife('upload /data_bags/x/y.json').should_succeed <<EOM
Created /data_bags/x
Created /data_bags/x/y.json
EOM
knife('diff --name-status /data_bags').should_succeed <<EOM
EOM
JSON.parse(knife('raw /data/x/y').stdout, :create_additions => false).keys.sort.should == [ 'foo', 'id' ]
end
end
when_the_repository 'has a data bag item with keys chef_type and data_bag' do
file 'data_bags/x/y.json', { 'chef_type' => 'aaa', 'data_bag' => 'bbb' }
it 'upload preserves chef_type and data_bag' do
knife('upload /data_bags/x/y.json').should_succeed <<EOM
Created /data_bags/x
Created /data_bags/x/y.json
EOM
knife('diff --name-status /data_bags').should_succeed ''
result = JSON.parse(knife('raw /data/x/y').stdout, :create_additions => false)
result.keys.sort.should == [ 'chef_type', 'data_bag', 'id' ]
result['chef_type'].should == 'aaa'
result['data_bag'].should == 'bbb'
end
end
# Test upload of an item when the other end doesn't even have the container
when_the_repository 'has two data bag items' do
file 'data_bags/x/y.json', {}
file 'data_bags/x/z.json', {}
it 'knife upload of one data bag item itself succeeds' do
knife('upload /data_bags/x/y.json').should_succeed <<EOM
Created /data_bags/x
Created /data_bags/x/y.json
EOM
knife('diff --name-status /data_bags').should_succeed <<EOM
A\t/data_bags/x/z.json
EOM
end
end
end
when_the_chef_server 'has three data bag items' do
data_bag 'x', { 'deleted' => {}, 'modified' => {}, 'unmodified' => {} }
when_the_repository 'has a modified, unmodified, added and deleted data bag item' do
file 'data_bags/x/added.json', {}
file 'data_bags/x/modified.json', { 'foo' => 'bar' }
file 'data_bags/x/unmodified.json', {}
it 'knife upload of the modified file succeeds' do
knife('upload /data_bags/x/modified.json').should_succeed <<EOM
Updated /data_bags/x/modified.json
EOM
knife('diff --name-status /data_bags').should_succeed <<EOM
D\t/data_bags/x/deleted.json
A\t/data_bags/x/added.json
EOM
end
it 'knife upload of the unmodified file does nothing' do
knife('upload /data_bags/x/unmodified.json').should_succeed ''
knife('diff --name-status /data_bags').should_succeed <<EOM
D\t/data_bags/x/deleted.json
M\t/data_bags/x/modified.json
A\t/data_bags/x/added.json
EOM
end
it 'knife upload of the added file succeeds' do
knife('upload /data_bags/x/added.json').should_succeed <<EOM
Created /data_bags/x/added.json
EOM
knife('diff --name-status /data_bags').should_succeed <<EOM
D\t/data_bags/x/deleted.json
M\t/data_bags/x/modified.json
EOM
end
it 'knife upload of the deleted file does nothing' do
knife('upload /data_bags/x/deleted.json').should_succeed ''
knife('diff --name-status /data_bags').should_succeed <<EOM
D\t/data_bags/x/deleted.json
M\t/data_bags/x/modified.json
A\t/data_bags/x/added.json
EOM
end
it 'knife upload --purge of the deleted file deletes it' do
knife('upload --purge /data_bags/x/deleted.json').should_succeed <<EOM
Deleted extra entry /data_bags/x/deleted.json (purge is on)
EOM
knife('diff --name-status /data_bags').should_succeed <<EOM
M\t/data_bags/x/modified.json
A\t/data_bags/x/added.json
EOM
end
it 'knife upload of the entire data bag uploads everything' do
knife('upload /data_bags/x').should_succeed <<EOM
Created /data_bags/x/added.json
Updated /data_bags/x/modified.json
EOM
knife('diff --name-status /data_bags').should_succeed <<EOM
D\t/data_bags/x/deleted.json
EOM
end
it 'knife upload --purge of the entire data bag uploads everything' do
knife('upload --purge /data_bags/x').should_succeed <<EOM
Created /data_bags/x/added.json
Updated /data_bags/x/modified.json
Deleted extra entry /data_bags/x/deleted.json (purge is on)
EOM
knife('diff --name-status /data_bags').should_succeed ''
end
context 'when cwd is the /data_bags directory' do
cwd 'data_bags'
it 'knife upload fails' do
knife('upload').should_fail "FATAL: Must specify at least one argument. If you want to upload everything in this directory, type \"knife upload .\"\n", :stdout => /USAGE/
end
it 'knife upload --purge . uploads everything' do
knife('upload --purge .').should_succeed <<EOM
Created x/added.json
Updated x/modified.json
Deleted extra entry x/deleted.json (purge is on)
EOM
knife('diff --name-status /data_bags').should_succeed ''
end
it 'knife upload --purge * uploads everything' do
knife('upload --purge *').should_succeed <<EOM
Created x/added.json
Updated x/modified.json
Deleted extra entry x/deleted.json (purge is on)
EOM
knife('diff --name-status /data_bags').should_succeed ''
end
end
end
end
# Cookbook upload is a funny thing ... direct cookbook upload works, but
# upload of a file is designed not to work at present. Make sure that is the
# case.
when_the_chef_server 'has a cookbook' do
cookbook 'x', '1.0.0', { 'metadata.rb' => 'version "1.0.0"', 'z.rb' => '' }
when_the_repository 'has a modified, extra and missing file for the cookbook' do
file 'cookbooks/x/metadata.rb', 'version "1.0.0"'
file 'cookbooks/x/y.rb', 'hi'
it 'knife upload of any individual file fails' do
knife('upload /cookbooks/x/metadata.rb').should_fail "ERROR: /cookbooks/x/metadata.rb cannot be updated.\n"
knife('upload /cookbooks/x/y.rb').should_fail "ERROR: /cookbooks/x cannot have a child created under it.\n"
knife('upload --purge /cookbooks/x/z.rb').should_fail "ERROR: /cookbooks/x/z.rb cannot be deleted.\n"
end
# TODO this is a bit of an inconsistency: if we didn't specify --purge,
# technically we shouldn't have deleted missing files. But ... cookbooks
# are a special case.
it 'knife upload of the cookbook itself succeeds' do
knife('upload /cookbooks/x').should_succeed <<EOM
Updated /cookbooks/x
EOM
knife('diff --name-status /cookbooks').should_succeed ''
end
it 'knife upload --purge of the cookbook itself succeeds' do
knife('upload /cookbooks/x').should_succeed <<EOM
Updated /cookbooks/x
EOM
knife('diff --name-status /cookbooks').should_succeed ''
end
end
when_the_repository 'has a missing file for the cookbook' do
file 'cookbooks/x/metadata.rb', 'version "1.0.0"'
it 'knife upload of the cookbook succeeds' do
knife('upload /cookbooks/x').should_succeed <<EOM
Updated /cookbooks/x
EOM
knife('diff --name-status /cookbooks').should_succeed ''
end
end
when_the_repository 'has an extra file for the cookbook' do
file 'cookbooks/x/metadata.rb', 'version "1.0.0"'
file 'cookbooks/x/z.rb', ''
file 'cookbooks/x/blah.rb', ''
it 'knife upload of the cookbook succeeds' do
knife('upload /cookbooks/x').should_succeed <<EOM
Updated /cookbooks/x
EOM
knife('diff --name-status /cookbooks').should_succeed ''
end
end
when_the_repository 'has a different file in the cookbook' do
file 'cookbooks/x/metadata.rb', 'version "1.0.0"'
it 'knife upload --freeze freezes the cookbook' do
knife('upload --freeze /cookbooks/x').should_succeed <<EOM
Updated /cookbooks/x
EOM
# Modify a file and attempt to upload
file 'cookbooks/x/metadata.rb', 'version "1.0.0" # This is different'
knife('upload /cookbooks/x').should_fail "ERROR: /cookbooks failed to write: Cookbook x is frozen\n"
end
end
end
when_the_chef_server 'has a frozen cookbook' do
cookbook 'frozencook', '1.0.0', {
'metadata.rb' => 'version "1.0.0"'
}, :frozen => true
when_the_repository 'has an update to said cookbook' do
file 'cookbooks/frozencook/metadata.rb', 'version "1.0.0" # This is different'
it 'knife upload fails to upload the frozen cookbook' do
knife('upload /cookbooks/frozencook').should_fail "ERROR: /cookbooks failed to write: Cookbook frozencook is frozen\n"
end
it 'knife upload --force uploads the frozen cookbook' do
knife('upload --force /cookbooks/frozencook').should_succeed <<EOM
Updated /cookbooks/frozencook
EOM
end
end
end
when_the_repository 'has a cookbook' do
file 'cookbooks/x/metadata.rb', 'version "1.0.0"'
file 'cookbooks/x/onlyin1.0.0.rb', 'old_text'
when_the_chef_server 'has a later version for the cookbook' do
cookbook 'x', '1.0.0', { 'metadata.rb' => 'version "1.0.0"', 'onlyin1.0.0.rb' => '' }
cookbook 'x', '1.0.1', { 'metadata.rb' => 'version "1.0.1"', 'onlyin1.0.1.rb' => 'hi' }
it 'knife upload /cookbooks/x uploads the local version' do
knife('diff --name-status /cookbooks').should_succeed <<EOM
M\t/cookbooks/x/metadata.rb
D\t/cookbooks/x/onlyin1.0.1.rb
A\t/cookbooks/x/onlyin1.0.0.rb
EOM
knife('upload --purge /cookbooks/x').should_succeed <<EOM
Updated /cookbooks/x
EOM
knife('diff --name-status /cookbooks').should_succeed <<EOM
M\t/cookbooks/x/metadata.rb
D\t/cookbooks/x/onlyin1.0.1.rb
A\t/cookbooks/x/onlyin1.0.0.rb
EOM
end
end
when_the_chef_server 'has an earlier version for the cookbook' do
cookbook 'x', '1.0.0', { 'metadata.rb' => 'version "1.0.0"', 'onlyin1.0.0.rb' => ''}
cookbook 'x', '0.9.9', { 'metadata.rb' => 'version "0.9.9"', 'onlyin0.9.9.rb' => 'hi' }
it 'knife upload /cookbooks/x uploads the local version' do
knife('upload --purge /cookbooks/x').should_succeed <<EOM
Updated /cookbooks/x
EOM
knife('diff --name-status /cookbooks').should_succeed ''
end
end
when_the_chef_server 'has a later version for the cookbook, and no current version' do
cookbook 'x', '1.0.1', { 'metadata.rb' => 'version "1.0.1"', 'onlyin1.0.1.rb' => 'hi' }
it 'knife upload /cookbooks/x uploads the local version' do
knife('diff --name-status /cookbooks').should_succeed <<EOM
M\t/cookbooks/x/metadata.rb
D\t/cookbooks/x/onlyin1.0.1.rb
A\t/cookbooks/x/onlyin1.0.0.rb
EOM
knife('upload --purge /cookbooks/x').should_succeed <<EOM
Updated /cookbooks/x
EOM
knife('diff --name-status /cookbooks').should_succeed <<EOM
M\t/cookbooks/x/metadata.rb
D\t/cookbooks/x/onlyin1.0.1.rb
A\t/cookbooks/x/onlyin1.0.0.rb
EOM
end
end
when_the_chef_server 'has an earlier version for the cookbook, and no current version' do
cookbook 'x', '0.9.9', { 'metadata.rb' => 'version "0.9.9"', 'onlyin0.9.9.rb' => 'hi' }
it 'knife upload /cookbooks/x uploads the new version' do
knife('upload --purge /cookbooks/x').should_succeed <<EOM
Updated /cookbooks/x
EOM
knife('diff --name-status /cookbooks').should_succeed ''
end
end
end
when_the_chef_server 'has an environment' do
environment 'x', {}
when_the_repository 'has an environment with bad JSON' do
file 'environments/x.json', '{'
it 'knife upload tries and fails' do
knife('upload /environments/x.json').should_fail "WARN: Parse error reading #{path_to('environments/x.json')} as JSON: A JSON text must at least contain two octets!\nERROR: /environments/x.json failed to write: Parse error reading JSON: A JSON text must at least contain two octets!\n"
knife('diff --name-status /environments/x.json').should_succeed "M\t/environments/x.json\n", :stderr => "WARN: Parse error reading #{path_to('environments/x.json')} as JSON: A JSON text must at least contain two octets!\n"
end
end
when_the_repository 'has the same environment with the wrong name in the file' do
file 'environments/x.json', { 'name' => 'y' }
it 'knife upload fails' do
knife('upload /environments/x.json').should_fail "ERROR: /environments/x.json failed to write: Name must be 'x' (is 'y')\n"
knife('diff --name-status /environments/x.json').should_succeed "M\t/environments/x.json\n"
end
end
when_the_repository 'has the same environment with no name in the file' do
file 'environments/x.json', { 'description' => 'hi' }
it 'knife upload succeeds' do
knife('upload /environments/x.json').should_succeed "Updated /environments/x.json\n"
knife('diff --name-status /environments/x.json').should_succeed ''
end
end
end
when_the_chef_server 'is empty' do
when_the_repository 'has an environment with bad JSON' do
file 'environments/x.json', '{'
it 'knife upload tries and fails' do
knife('upload /environments/x.json').should_fail "ERROR: /environments failed to create_child: Parse error reading JSON creating child 'x.json': A JSON text must at least contain two octets!\n"
knife('diff --name-status /environments/x.json').should_succeed "A\t/environments/x.json\n"
end
end
when_the_repository 'has an environment with the wrong name in the file' do
file 'environments/x.json', { 'name' => 'y' }
it 'knife upload fails' do
knife('upload /environments/x.json').should_fail "ERROR: /environments failed to create_child: Error creating 'x.json': Name must be 'x' (is 'y')\n"
knife('diff --name-status /environments/x.json').should_succeed "A\t/environments/x.json\n"
end
end
when_the_repository 'has an environment with no name in the file' do
file 'environments/x.json', { 'description' => 'hi' }
it 'knife upload succeeds' do
knife('upload /environments/x.json').should_succeed "Created /environments/x.json\n"
knife('diff --name-status /environments/x.json').should_succeed ''
end
end
when_the_repository 'has a data bag with no id in the file' do
file 'data_bags/bag/x.json', { 'foo' => 'bar' }
it 'knife upload succeeds' do
knife('upload /data_bags/bag/x.json').should_succeed "Created /data_bags/bag\nCreated /data_bags/bag/x.json\n"
knife('diff --name-status /data_bags/bag/x.json').should_succeed ''
end
end
end
end # without versioned cookbooks
with_versioned_cookbooks do
when_the_chef_server "has one of each thing" do
client 'x', {}
cookbook 'x', '1.0.0', { 'metadata.rb' => 'version "1.0.0"' }
data_bag 'x', { 'y' => {} }
environment 'x', {}
node 'x', {}
role 'x', {}
user 'x', {}
when_the_repository 'has only top-level directories' do
directory 'clients'
directory 'cookbooks'
directory 'data_bags'
directory 'environments'
directory 'nodes'
directory 'roles'
directory 'users'
it 'knife upload does nothing' do
knife('upload /').should_succeed ''
knife('diff --name-status /').should_succeed <<EOM
D\t/clients/chef-validator.json
D\t/clients/chef-webui.json
D\t/clients/x.json
D\t/cookbooks/x-1.0.0
D\t/data_bags/x
D\t/environments/_default.json
D\t/environments/x.json
D\t/nodes/x.json
D\t/roles/x.json
D\t/users/admin.json
D\t/users/x.json
EOM
end
it 'knife upload --purge deletes everything' do
knife('upload --purge /').should_succeed(<<EOM, :stderr => "WARNING: /environments/_default.json cannot be deleted (default environment cannot be modified).\n")
Deleted extra entry /clients/chef-validator.json (purge is on)
Deleted extra entry /clients/chef-webui.json (purge is on)
Deleted extra entry /clients/x.json (purge is on)
Deleted extra entry /cookbooks/x-1.0.0 (purge is on)
Deleted extra entry /data_bags/x (purge is on)
Deleted extra entry /environments/x.json (purge is on)
Deleted extra entry /nodes/x.json (purge is on)
Deleted extra entry /roles/x.json (purge is on)
Deleted extra entry /users/admin.json (purge is on)
Deleted extra entry /users/x.json (purge is on)
EOM
knife('diff --name-status /').should_succeed <<EOM
D\t/environments/_default.json
EOM
end
end
when_the_repository 'has an identical copy of each thing' do
file 'clients/chef-validator.json', { 'validator' => true, 'public_key' => ChefZero::PUBLIC_KEY }
file 'clients/chef-webui.json', { 'admin' => true, 'public_key' => ChefZero::PUBLIC_KEY }
file 'clients/x.json', { 'public_key' => ChefZero::PUBLIC_KEY }
file 'cookbooks/x-1.0.0/metadata.rb', 'version "1.0.0"'
file 'data_bags/x/y.json', {}
file 'environments/_default.json', { 'description' => 'The default Chef environment' }
file 'environments/x.json', {}
file 'nodes/x.json', {}
file 'roles/x.json', {}
file 'users/admin.json', { 'admin' => true, 'public_key' => ChefZero::PUBLIC_KEY }
file 'users/x.json', { 'public_key' => ChefZero::PUBLIC_KEY }
it 'knife upload makes no changes' do
knife('upload /cookbooks/x-1.0.0').should_succeed ''
knife('diff --name-status /').should_succeed ''
end
it 'knife upload --purge makes no changes' do
knife('upload --purge /').should_succeed ''
knife('diff --name-status /').should_succeed ''
end
context 'except the role file' do
file 'roles/x.json', { 'description' => 'blarghle' }
it 'knife upload changes the role' do
knife('upload /').should_succeed "Updated /roles/x.json\n"
knife('diff --name-status /').should_succeed ''
end
end
context 'except the role file is textually different, but not ACTUALLY different' do
file 'roles/x.json', <<EOM
{
"chef_type": "role",
"default_attributes": {
},
"env_run_lists": {
},
"json_class": "Chef::Role",
"name": "x",
"description": "",
"override_attributes": {
},
"run_list": [
]
}
EOM
it 'knife upload / does not change anything' do
knife('upload /').should_succeed ''
knife('diff --name-status /').should_succeed ''
end
end
context 'as well as one extra copy of each thing' do
file 'clients/y.json', { 'public_key' => ChefZero::PUBLIC_KEY }
file 'cookbooks/x-1.0.0/blah.rb', ''
file 'cookbooks/x-2.0.0/metadata.rb', 'version "2.0.0"'
file 'cookbooks/y-1.0.0/metadata.rb', 'version "1.0.0"'
file 'data_bags/x/z.json', {}
file 'data_bags/y/zz.json', {}
file 'environments/y.json', {}
file 'nodes/y.json', {}
file 'roles/y.json', {}
file 'users/y.json', { 'public_key' => ChefZero::PUBLIC_KEY }
it 'knife upload adds the new files' do
knife('upload /').should_succeed <<EOM
Created /clients/y.json
Updated /cookbooks/x-1.0.0
Created /cookbooks/x-2.0.0
Created /cookbooks/y-1.0.0
Created /data_bags/x/z.json
Created /data_bags/y
Created /data_bags/y/zz.json
Created /environments/y.json
Created /nodes/y.json
Created /roles/y.json
Created /users/y.json
EOM
knife('diff --name-status /').should_succeed ''
end
end
end
when_the_repository 'is empty' do
it 'knife upload does nothing' do
knife('upload /').should_succeed ''
knife('diff --name-status /').should_succeed <<EOM
D\t/clients
D\t/cookbooks
D\t/data_bags
D\t/environments
D\t/nodes
D\t/roles
D\t/users
EOM
end
it 'knife upload --purge deletes nothing' do
knife('upload --purge /').should_fail <<EOM
ERROR: /clients cannot be deleted.
ERROR: /cookbooks cannot be deleted.
ERROR: /data_bags cannot be deleted.
ERROR: /environments cannot be deleted.
ERROR: /nodes cannot be deleted.
ERROR: /roles cannot be deleted.
ERROR: /users cannot be deleted.
EOM
knife('diff --name-status /').should_succeed <<EOM
D\t/clients
D\t/cookbooks
D\t/data_bags
D\t/environments
D\t/nodes
D\t/roles
D\t/users
EOM
end
context 'when current directory is top level' do
cwd '.'
it 'knife upload with no parameters reports an error' do
knife('upload').should_fail "FATAL: Must specify at least one argument. If you want to upload everything in this directory, type \"knife upload .\"\n", :stdout => /USAGE/
end
end
end
end
# Test upload of an item when the other end doesn't even have the container
when_the_chef_server 'is empty' do
when_the_repository 'has two data bag items' do
file 'data_bags/x/y.json', {}
file 'data_bags/x/z.json', {}
it 'knife upload of one data bag item itself succeeds' do
knife('upload /data_bags/x/y.json').should_succeed <<EOM
Created /data_bags/x
Created /data_bags/x/y.json
EOM
knife('diff --name-status /data_bags').should_succeed <<EOM
A\t/data_bags/x/z.json
EOM
end
end
end
when_the_chef_server 'has three data bag items' do
data_bag 'x', { 'deleted' => {}, 'modified' => {}, 'unmodified' => {} }
when_the_repository 'has a modified, unmodified, added and deleted data bag item' do
file 'data_bags/x/added.json', {}
file 'data_bags/x/modified.json', { 'foo' => 'bar' }
file 'data_bags/x/unmodified.json', {}
it 'knife upload of the modified file succeeds' do
knife('upload /data_bags/x/modified.json').should_succeed <<EOM
Updated /data_bags/x/modified.json
EOM
knife('diff --name-status /data_bags').should_succeed <<EOM
D\t/data_bags/x/deleted.json
A\t/data_bags/x/added.json
EOM
end
it 'knife upload of the unmodified file does nothing' do
knife('upload /data_bags/x/unmodified.json').should_succeed ''
knife('diff --name-status /data_bags').should_succeed <<EOM
D\t/data_bags/x/deleted.json
M\t/data_bags/x/modified.json
A\t/data_bags/x/added.json
EOM
end
it 'knife upload of the added file succeeds' do
knife('upload /data_bags/x/added.json').should_succeed <<EOM
Created /data_bags/x/added.json
EOM
knife('diff --name-status /data_bags').should_succeed <<EOM
D\t/data_bags/x/deleted.json
M\t/data_bags/x/modified.json
EOM
end
it 'knife upload of the deleted file does nothing' do
knife('upload /data_bags/x/deleted.json').should_succeed ''
knife('diff --name-status /data_bags').should_succeed <<EOM
D\t/data_bags/x/deleted.json
M\t/data_bags/x/modified.json
A\t/data_bags/x/added.json
EOM
end
it 'knife upload --purge of the deleted file deletes it' do
knife('upload --purge /data_bags/x/deleted.json').should_succeed <<EOM
Deleted extra entry /data_bags/x/deleted.json (purge is on)
EOM
knife('diff --name-status /data_bags').should_succeed <<EOM
M\t/data_bags/x/modified.json
A\t/data_bags/x/added.json
EOM
end
it 'knife upload of the entire data bag uploads everything' do
knife('upload /data_bags/x').should_succeed <<EOM
Created /data_bags/x/added.json
Updated /data_bags/x/modified.json
EOM
knife('diff --name-status /data_bags').should_succeed <<EOM
D\t/data_bags/x/deleted.json
EOM
end
it 'knife upload --purge of the entire data bag uploads everything' do
knife('upload --purge /data_bags/x').should_succeed <<EOM
Created /data_bags/x/added.json
Updated /data_bags/x/modified.json
Deleted extra entry /data_bags/x/deleted.json (purge is on)
EOM
knife('diff --name-status /data_bags').should_succeed ''
end
context 'when cwd is the /data_bags directory' do
cwd 'data_bags'
it 'knife upload fails' do
knife('upload').should_fail "FATAL: Must specify at least one argument. If you want to upload everything in this directory, type \"knife upload .\"\n", :stdout => /USAGE/
end
it 'knife upload --purge . uploads everything' do
knife('upload --purge .').should_succeed <<EOM
Created x/added.json
Updated x/modified.json
Deleted extra entry x/deleted.json (purge is on)
EOM
knife('diff --name-status /data_bags').should_succeed ''
end
it 'knife upload --purge * uploads everything' do
knife('upload --purge *').should_succeed <<EOM
Created x/added.json
Updated x/modified.json
Deleted extra entry x/deleted.json (purge is on)
EOM
knife('diff --name-status /data_bags').should_succeed ''
end
end
end
end
# Cookbook upload is a funny thing ... direct cookbook upload works, but
# upload of a file is designed not to work at present. Make sure that is the
# case.
when_the_chef_server 'has a cookbook' do
cookbook 'x', '1.0.0', { 'metadata.rb' => 'version "1.0.0"', 'z.rb' => '' }
when_the_repository 'has a modified, extra and missing file for the cookbook' do
file 'cookbooks/x-1.0.0/metadata.rb', 'version "1.0.0"'
file 'cookbooks/x-1.0.0/y.rb', 'hi'
it 'knife upload of any individual file fails' do
knife('upload /cookbooks/x-1.0.0/metadata.rb').should_fail "ERROR: /cookbooks/x-1.0.0/metadata.rb cannot be updated.\n"
knife('upload /cookbooks/x-1.0.0/y.rb').should_fail "ERROR: /cookbooks/x-1.0.0 cannot have a child created under it.\n"
knife('upload --purge /cookbooks/x-1.0.0/z.rb').should_fail "ERROR: /cookbooks/x-1.0.0/z.rb cannot be deleted.\n"
end
# TODO this is a bit of an inconsistency: if we didn't specify --purge,
# technically we shouldn't have deleted missing files. But ... cookbooks
# are a special case.
it 'knife upload of the cookbook itself succeeds' do
knife('upload /cookbooks/x-1.0.0').should_succeed <<EOM
Updated /cookbooks/x-1.0.0
EOM
knife('diff --name-status /cookbooks').should_succeed ''
end
it 'knife upload --purge of the cookbook itself succeeds' do
knife('upload /cookbooks/x-1.0.0').should_succeed <<EOM
Updated /cookbooks/x-1.0.0
EOM
knife('diff --name-status /cookbooks').should_succeed ''
end
end
when_the_repository 'has a missing file for the cookbook' do
file 'cookbooks/x-1.0.0/metadata.rb', 'version "1.0.0"'
it 'knife upload of the cookbook succeeds' do
knife('upload /cookbooks/x-1.0.0').should_succeed <<EOM
Updated /cookbooks/x-1.0.0
EOM
knife('diff --name-status /cookbooks').should_succeed ''
end
end
when_the_repository 'has an extra file for the cookbook' do
file 'cookbooks/x-1.0.0/metadata.rb', 'version "1.0.0"'
file 'cookbooks/x-1.0.0/z.rb', ''
file 'cookbooks/x-1.0.0/blah.rb', ''
it 'knife upload of the cookbook succeeds' do
knife('upload /cookbooks/x-1.0.0').should_succeed <<EOM
Updated /cookbooks/x-1.0.0
EOM
knife('diff --name-status /cookbooks').should_succeed ''
end
end
end
when_the_repository 'has a cookbook' do
file 'cookbooks/x-1.0.0/metadata.rb', 'version "1.0.0"'
file 'cookbooks/x-1.0.0/onlyin1.0.0.rb', 'old_text'
when_the_chef_server 'has a later version for the cookbook' do
cookbook 'x', '1.0.0', { 'metadata.rb' => 'version "1.0.0"', 'onlyin1.0.0.rb' => '' }
cookbook 'x', '1.0.1', { 'metadata.rb' => 'version "1.0.1"', 'onlyin1.0.1.rb' => 'hi' }
it 'knife upload /cookbooks uploads the local version' do
knife('diff --name-status /cookbooks').should_succeed <<EOM
M\t/cookbooks/x-1.0.0/onlyin1.0.0.rb
D\t/cookbooks/x-1.0.1
EOM
knife('upload --purge /cookbooks').should_succeed <<EOM
Updated /cookbooks/x-1.0.0
Deleted extra entry /cookbooks/x-1.0.1 (purge is on)
EOM
knife('diff --name-status /cookbooks').should_succeed ''
end
end
when_the_chef_server 'has an earlier version for the cookbook' do
cookbook 'x', '1.0.0', { 'metadata.rb' => 'version "1.0.0"', 'onlyin1.0.0.rb' => ''}
cookbook 'x', '0.9.9', { 'metadata.rb' => 'version "0.9.9"', 'onlyin0.9.9.rb' => 'hi' }
it 'knife upload /cookbooks uploads the local version' do
knife('upload --purge /cookbooks').should_succeed <<EOM
Updated /cookbooks/x-1.0.0
Deleted extra entry /cookbooks/x-0.9.9 (purge is on)
EOM
knife('diff --name-status /cookbooks').should_succeed ''
end
end
when_the_chef_server 'has a later version for the cookbook, and no current version' do
cookbook 'x', '1.0.1', { 'metadata.rb' => 'version "1.0.1"', 'onlyin1.0.1.rb' => 'hi' }
it 'knife upload /cookbooks/x uploads the local version' do
knife('diff --name-status /cookbooks').should_succeed <<EOM
D\t/cookbooks/x-1.0.1
A\t/cookbooks/x-1.0.0
EOM
knife('upload --purge /cookbooks').should_succeed <<EOM
Created /cookbooks/x-1.0.0
Deleted extra entry /cookbooks/x-1.0.1 (purge is on)
EOM
knife('diff --name-status /cookbooks').should_succeed ''
end
end
when_the_chef_server 'has an earlier version for the cookbook, and no current version' do
cookbook 'x', '0.9.9', { 'metadata.rb' => 'version "0.9.9"', 'onlyin0.9.9.rb' => 'hi' }
it 'knife upload /cookbooks/x uploads the new version' do
knife('upload --purge /cookbooks').should_succeed <<EOM
Created /cookbooks/x-1.0.0
Deleted extra entry /cookbooks/x-0.9.9 (purge is on)
EOM
knife('diff --name-status /cookbooks').should_succeed ''
end
end
end
when_the_chef_server 'has an environment' do
environment 'x', {}
when_the_repository 'has an environment with bad JSON' do
file 'environments/x.json', '{'
it 'knife upload tries and fails' do
knife('upload /environments/x.json').should_fail "WARN: Parse error reading #{path_to('environments/x.json')} as JSON: A JSON text must at least contain two octets!\nERROR: /environments/x.json failed to write: Parse error reading JSON: A JSON text must at least contain two octets!\n"
knife('diff --name-status /environments/x.json').should_succeed "M\t/environments/x.json\n", :stderr => "WARN: Parse error reading #{path_to('environments/x.json')} as JSON: A JSON text must at least contain two octets!\n"
end
end
when_the_repository 'has the same environment with the wrong name in the file' do
file 'environments/x.json', { 'name' => 'y' }
it 'knife upload fails' do
knife('upload /environments/x.json').should_fail "ERROR: /environments/x.json failed to write: Name must be 'x' (is 'y')\n"
knife('diff --name-status /environments/x.json').should_succeed "M\t/environments/x.json\n"
end
end
when_the_repository 'has the same environment with no name in the file' do
file 'environments/x.json', { 'description' => 'hi' }
it 'knife upload succeeds' do
knife('upload /environments/x.json').should_succeed "Updated /environments/x.json\n"
knife('diff --name-status /environments/x.json').should_succeed ''
end
end
end
when_the_chef_server 'is empty' do
when_the_repository 'has an environment with bad JSON' do
file 'environments/x.json', '{'
it 'knife upload tries and fails' do
knife('upload /environments/x.json').should_fail "ERROR: /environments failed to create_child: Parse error reading JSON creating child 'x.json': A JSON text must at least contain two octets!\n"
knife('diff --name-status /environments/x.json').should_succeed "A\t/environments/x.json\n"
end
end
when_the_repository 'has an environment with the wrong name in the file' do
file 'environments/x.json', { 'name' => 'y' }
it 'knife upload fails' do
knife('upload /environments/x.json').should_fail "ERROR: /environments failed to create_child: Error creating 'x.json': Name must be 'x' (is 'y')\n"
knife('diff --name-status /environments/x.json').should_succeed "A\t/environments/x.json\n"
end
end
when_the_repository 'has an environment with no name in the file' do
file 'environments/x.json', { 'description' => 'hi' }
it 'knife upload succeeds' do
knife('upload /environments/x.json').should_succeed "Created /environments/x.json\n"
knife('diff --name-status /environments/x.json').should_succeed ''
end
end
when_the_repository 'has a data bag with no id in the file' do
file 'data_bags/bag/x.json', { 'foo' => 'bar' }
it 'knife upload succeeds' do
knife('upload /data_bags/bag/x.json').should_succeed "Created /data_bags/bag\nCreated /data_bags/bag/x.json\n"
knife('diff --name-status /data_bags/bag/x.json').should_succeed ''
end
end
end
end # with versioned cookbooks
end
| apache-2.0 |
gearpump/gearpump.github.io | releases/latest/api/scala/io/gearpump/cluster/AppJar.html | 26118 | <!DOCTYPE html >
<html>
<head>
<title>AppJar - io.gearpump.cluster.AppJar</title>
<meta name="description" content="AppJar - io.gearpump.cluster.AppJar" />
<meta name="keywords" content="AppJar io.gearpump.cluster.AppJar" />
<meta http-equiv="content-type" content="text/html; charset=UTF-8" />
<link href="../../../lib/template.css" media="screen" type="text/css" rel="stylesheet" />
<link href="../../../lib/diagrams.css" media="screen" type="text/css" rel="stylesheet" id="diagrams-css" />
<script type="text/javascript" src="../../../lib/jquery.js" id="jquery-js"></script>
<script type="text/javascript" src="../../../lib/jquery-ui.js"></script>
<script type="text/javascript" src="../../../lib/template.js"></script>
<script type="text/javascript" src="../../../lib/tools.tooltip.js"></script>
<script type="text/javascript">
if(top === self) {
var url = '../../../index.html';
var hash = 'io.gearpump.cluster.AppJar';
var anchor = window.location.hash;
var anchor_opt = '';
if (anchor.length >= 1)
anchor_opt = '@' + anchor.substring(1);
window.location.href = url + '#' + hash + anchor_opt;
}
</script>
</head>
<body class="type">
<div id="definition">
<img alt="Class" src="../../../lib/class_big.png" />
<p id="owner"><a href="../../package.html" class="extype" name="io">io</a>.<a href="../package.html" class="extype" name="io.gearpump">gearpump</a>.<a href="package.html" class="extype" name="io.gearpump.cluster">cluster</a></p>
<h1>AppJar</h1><h3><span class="morelinks"><div>Related Doc:
<a href="package.html" class="extype" name="io.gearpump.cluster">package cluster</a>
</div></span></h3><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.AppJar" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
</div>
<h4 id="signature" class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">case class</span>
</span>
<span class="symbol">
<span class="name">AppJar</span><span class="params">(<span name="name">name: <span class="extype" name="scala.Predef.String">String</span></span>, <span name="filePath">filePath: <a href="../jarstore/FilePath.html" class="extype" name="io.gearpump.jarstore.FilePath">FilePath</a></span>)</span><span class="result"> extends <span class="extype" name="scala.Product">Product</span> with <span class="extype" name="scala.Serializable">Serializable</span></span>
</span>
</h4>
<div id="comment" class="fullcommenttop"><div class="comment cmt"><p>Jar file container in the cluster
</p></div><div class="toggleContainer block">
<span class="toggle">Linear Supertypes</span>
<div class="superTypes hiddenContent"><span class="extype" name="scala.Serializable">Serializable</span>, <span class="extype" name="java.io.Serializable">Serializable</span>, <span class="extype" name="scala.Product">Product</span>, <span class="extype" name="scala.Equals">Equals</span>, <span class="extype" name="scala.AnyRef">AnyRef</span>, <span class="extype" name="scala.Any">Any</span></div>
</div></div>
<div id="mbrsel">
<div id="textfilter"><span class="pre"></span><span class="input"><input id="mbrsel-input" type="text" accesskey="/" /></span><span class="post"></span></div>
<div id="order">
<span class="filtertype">Ordering</span>
<ol>
<li class="alpha in"><span>Alphabetic</span></li>
<li class="inherit out"><span>By Inheritance</span></li>
</ol>
</div>
<div id="ancestors">
<span class="filtertype">Inherited<br />
</span>
<ol id="linearization">
<li class="in" name="io.gearpump.cluster.AppJar"><span>AppJar</span></li><li class="in" name="scala.Serializable"><span>Serializable</span></li><li class="in" name="java.io.Serializable"><span>Serializable</span></li><li class="in" name="scala.Product"><span>Product</span></li><li class="in" name="scala.Equals"><span>Equals</span></li><li class="in" name="scala.AnyRef"><span>AnyRef</span></li><li class="in" name="scala.Any"><span>Any</span></li>
</ol>
</div><div id="ancestors">
<span class="filtertype"></span>
<ol>
<li class="hideall out"><span>Hide All</span></li>
<li class="showall in"><span>Show All</span></li>
</ol>
</div>
<div id="visbl">
<span class="filtertype">Visibility</span>
<ol><li class="public in"><span>Public</span></li><li class="all out"><span>All</span></li></ol>
</div>
</div>
<div id="template">
<div id="allMembers">
<div id="constructors" class="members">
<h3>Instance Constructors</h3>
<ol><li name="io.gearpump.cluster.AppJar#<init>" visbl="pub" data-isabs="false" fullComment="no" group="Ungrouped">
<a id="<init>(name:String,filePath:io.gearpump.jarstore.FilePath):io.gearpump.cluster.AppJar"></a>
<a id="<init>:AppJar"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">new</span>
</span>
<span class="symbol">
<span class="name">AppJar</span><span class="params">(<span name="name">name: <span class="extype" name="scala.Predef.String">String</span></span>, <span name="filePath">filePath: <a href="../jarstore/FilePath.html" class="extype" name="io.gearpump.jarstore.FilePath">FilePath</a></span>)</span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.AppJar@<init>(name:String,filePath:io.gearpump.jarstore.FilePath):io.gearpump.cluster.AppJar" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<p class="shortcomment cmt"></p>
</li></ol>
</div>
<div id="values" class="values members">
<h3>Value Members</h3>
<ol><li name="scala.AnyRef#!=" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="!=(x$1:Any):Boolean"></a>
<a id="!=(Any):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span title="gt4s: $bang$eq" class="name">!=</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Any">Any</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.AppJar@!=(x$1:Any):Boolean" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef → Any</dd></dl></div>
</li><li name="scala.AnyRef###" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="##():Int"></a>
<a id="##():Int"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span title="gt4s: $hash$hash" class="name">##</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Int">Int</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.AppJar@##():Int" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef → Any</dd></dl></div>
</li><li name="scala.AnyRef#==" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="==(x$1:Any):Boolean"></a>
<a id="==(Any):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span title="gt4s: $eq$eq" class="name">==</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Any">Any</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.AppJar@==(x$1:Any):Boolean" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef → Any</dd></dl></div>
</li><li name="scala.Any#asInstanceOf" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="asInstanceOf[T0]:T0"></a>
<a id="asInstanceOf[T0]:T0"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">asInstanceOf</span><span class="tparams">[<span name="T0">T0</span>]</span><span class="result">: <span class="extype" name="scala.Any.asInstanceOf.T0">T0</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.AppJar@asInstanceOf[T0]:T0" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>Any</dd></dl></div>
</li><li name="scala.AnyRef#clone" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="clone():Object"></a>
<a id="clone():AnyRef"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">clone</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.AnyRef">AnyRef</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.AppJar@clone():Object" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected[<a href="../../../java$lang.html" class="extype" name="java.lang">java.lang</a>] </dd><dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd>
<span class="name">@throws</span><span class="args">(<span>
<span class="defval" name="classOf[java.lang.CloneNotSupportedException]">...</span>
</span>)</span>
</dd></dl></div>
</li><li name="scala.AnyRef#eq" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="eq(x$1:AnyRef):Boolean"></a>
<a id="eq(AnyRef):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">eq</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.AnyRef">AnyRef</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.AppJar@eq(x$1:AnyRef):Boolean" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="io.gearpump.cluster.AppJar#filePath" visbl="pub" data-isabs="false" fullComment="no" group="Ungrouped">
<a id="filePath:io.gearpump.jarstore.FilePath"></a>
<a id="filePath:FilePath"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">val</span>
</span>
<span class="symbol">
<span class="name">filePath</span><span class="result">: <a href="../jarstore/FilePath.html" class="extype" name="io.gearpump.jarstore.FilePath">FilePath</a></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.AppJar@filePath:io.gearpump.jarstore.FilePath" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
</li><li name="scala.AnyRef#finalize" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="finalize():Unit"></a>
<a id="finalize():Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">finalize</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.AppJar@finalize():Unit" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected[<a href="../../../java$lang.html" class="extype" name="java.lang">java.lang</a>] </dd><dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd>
<span class="name">@throws</span><span class="args">(<span>
<span class="symbol">classOf[java.lang.Throwable]</span>
</span>)</span>
</dd></dl></div>
</li><li name="scala.AnyRef#getClass" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="getClass():Class[_]"></a>
<a id="getClass():Class[_]"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">getClass</span><span class="params">()</span><span class="result">: <span class="extype" name="java.lang.Class">Class</span>[_]</span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.AppJar@getClass():Class[_]" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef → Any</dd></dl></div>
</li><li name="scala.Any#isInstanceOf" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="isInstanceOf[T0]:Boolean"></a>
<a id="isInstanceOf[T0]:Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">isInstanceOf</span><span class="tparams">[<span name="T0">T0</span>]</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.AppJar@isInstanceOf[T0]:Boolean" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>Any</dd></dl></div>
</li><li name="io.gearpump.cluster.AppJar#name" visbl="pub" data-isabs="false" fullComment="no" group="Ungrouped">
<a id="name:String"></a>
<a id="name:String"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">val</span>
</span>
<span class="symbol">
<span class="name">name</span><span class="result">: <span class="extype" name="scala.Predef.String">String</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.AppJar@name:String" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
</li><li name="scala.AnyRef#ne" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="ne(x$1:AnyRef):Boolean"></a>
<a id="ne(AnyRef):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">ne</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.AnyRef">AnyRef</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.AppJar@ne(x$1:AnyRef):Boolean" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="scala.AnyRef#notify" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="notify():Unit"></a>
<a id="notify():Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">notify</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.AppJar@notify():Unit" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="scala.AnyRef#notifyAll" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="notifyAll():Unit"></a>
<a id="notifyAll():Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">notifyAll</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.AppJar@notifyAll():Unit" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="scala.AnyRef#synchronized" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="synchronized[T0](x$1:=>T0):T0"></a>
<a id="synchronized[T0](⇒T0):T0"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">synchronized</span><span class="tparams">[<span name="T0">T0</span>]</span><span class="params">(<span name="arg0">arg0: ⇒ <span class="extype" name="java.lang.AnyRef.synchronized.T0">T0</span></span>)</span><span class="result">: <span class="extype" name="java.lang.AnyRef.synchronized.T0">T0</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.AppJar@synchronized[T0](x$1:=>T0):T0" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="scala.AnyRef#wait" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="wait():Unit"></a>
<a id="wait():Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">wait</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.AppJar@wait():Unit" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd>
<span class="name">@throws</span><span class="args">(<span>
<span class="defval" name="classOf[java.lang.InterruptedException]">...</span>
</span>)</span>
</dd></dl></div>
</li><li name="scala.AnyRef#wait" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="wait(x$1:Long,x$2:Int):Unit"></a>
<a id="wait(Long,Int):Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">wait</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Long">Long</span></span>, <span name="arg1">arg1: <span class="extype" name="scala.Int">Int</span></span>)</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.AppJar@wait(x$1:Long,x$2:Int):Unit" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd>
<span class="name">@throws</span><span class="args">(<span>
<span class="defval" name="classOf[java.lang.InterruptedException]">...</span>
</span>)</span>
</dd></dl></div>
</li><li name="scala.AnyRef#wait" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="wait(x$1:Long):Unit"></a>
<a id="wait(Long):Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">wait</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Long">Long</span></span>)</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#io.gearpump.cluster.AppJar@wait(x$1:Long):Unit" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd>
<span class="name">@throws</span><span class="args">(<span>
<span class="defval" name="classOf[java.lang.InterruptedException]">...</span>
</span>)</span>
</dd></dl></div>
</li></ol>
</div>
</div>
<div id="inheritedMembers">
<div class="parent" name="scala.Serializable">
<h3>Inherited from <span class="extype" name="scala.Serializable">Serializable</span></h3>
</div><div class="parent" name="java.io.Serializable">
<h3>Inherited from <span class="extype" name="java.io.Serializable">Serializable</span></h3>
</div><div class="parent" name="scala.Product">
<h3>Inherited from <span class="extype" name="scala.Product">Product</span></h3>
</div><div class="parent" name="scala.Equals">
<h3>Inherited from <span class="extype" name="scala.Equals">Equals</span></h3>
</div><div class="parent" name="scala.AnyRef">
<h3>Inherited from <span class="extype" name="scala.AnyRef">AnyRef</span></h3>
</div><div class="parent" name="scala.Any">
<h3>Inherited from <span class="extype" name="scala.Any">Any</span></h3>
</div>
</div>
<div id="groupedMembers">
<div class="group" name="Ungrouped">
<h3>Ungrouped</h3>
</div>
</div>
</div>
<div id="tooltip"></div>
<div id="footer"> </div>
</body>
</html>
| apache-2.0 |
wswenyue/metadata-extractor | Source/com/drew/metadata/exif/makernotes/PentaxMakernoteDescriptor.java | 4966 | /*
* Copyright 2002-2015 Drew Noakes
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* More information about this project is available at:
*
* https://drewnoakes.com/code/exif/
* https://github.com/drewnoakes/metadata-extractor
*/
package com.drew.metadata.exif.makernotes;
import com.drew.lang.annotations.NotNull;
import com.drew.lang.annotations.Nullable;
import com.drew.metadata.TagDescriptor;
import static com.drew.metadata.exif.makernotes.PentaxMakernoteDirectory.*;
/**
* Provides human-readable string representations of tag values stored in a {@link PentaxMakernoteDirectory}.
* <p>
* Some information about this makernote taken from here:
* http://www.ozhiker.com/electronics/pjmt/jpeg_info/pentax_mn.html
*
* @author Drew Noakes https://drewnoakes.com
*/
public class PentaxMakernoteDescriptor extends TagDescriptor<PentaxMakernoteDirectory>
{
public PentaxMakernoteDescriptor(@NotNull PentaxMakernoteDirectory directory)
{
super(directory);
}
@Override
@Nullable
public String getDescription(int tagType)
{
switch (tagType) {
case TAG_CAPTURE_MODE:
return getCaptureModeDescription();
case TAG_QUALITY_LEVEL:
return getQualityLevelDescription();
case TAG_FOCUS_MODE:
return getFocusModeDescription();
case TAG_FLASH_MODE:
return getFlashModeDescription();
case TAG_WHITE_BALANCE:
return getWhiteBalanceDescription();
case TAG_DIGITAL_ZOOM:
return getDigitalZoomDescription();
case TAG_SHARPNESS:
return getSharpnessDescription();
case TAG_CONTRAST:
return getContrastDescription();
case TAG_SATURATION:
return getSaturationDescription();
case TAG_ISO_SPEED:
return getIsoSpeedDescription();
case TAG_COLOUR:
return getColourDescription();
default:
return super.getDescription(tagType);
}
}
@Nullable
public String getColourDescription()
{
return getIndexedDescription(TAG_COLOUR, 1, "Normal", "Black & White", "Sepia");
}
@Nullable
public String getIsoSpeedDescription()
{
Integer value = _directory.getInteger(TAG_ISO_SPEED);
if (value == null)
return null;
switch (value) {
// TODO there must be other values which aren't catered for here
case 10: return "ISO 100";
case 16: return "ISO 200";
case 100: return "ISO 100";
case 200: return "ISO 200";
default: return "Unknown (" + value + ")";
}
}
@Nullable
public String getSaturationDescription()
{
return getIndexedDescription(TAG_SATURATION, "Normal", "Low", "High");
}
@Nullable
public String getContrastDescription()
{
return getIndexedDescription(TAG_CONTRAST, "Normal", "Low", "High");
}
@Nullable
public String getSharpnessDescription()
{
return getIndexedDescription(TAG_SHARPNESS, "Normal", "Soft", "Hard");
}
@Nullable
public String getDigitalZoomDescription()
{
Float value = _directory.getFloatObject(TAG_DIGITAL_ZOOM);
if (value == null)
return null;
if (value == 0)
return "Off";
return Float.toString(value);
}
@Nullable
public String getWhiteBalanceDescription()
{
return getIndexedDescription(TAG_WHITE_BALANCE,
"Auto", "Daylight", "Shade", "Tungsten", "Fluorescent", "Manual");
}
@Nullable
public String getFlashModeDescription()
{
return getIndexedDescription(TAG_FLASH_MODE,
1, "Auto", "Flash On", null, "Flash Off", null, "Red-eye Reduction");
}
@Nullable
public String getFocusModeDescription()
{
return getIndexedDescription(TAG_FOCUS_MODE, 2, "Custom", "Auto");
}
@Nullable
public String getQualityLevelDescription()
{
return getIndexedDescription(TAG_QUALITY_LEVEL, "Good", "Better", "Best");
}
@Nullable
public String getCaptureModeDescription()
{
return getIndexedDescription(TAG_CAPTURE_MODE,
"Auto", "Night-scene", "Manual", null, "Multiple");
}
}
| apache-2.0 |
tkremenek/swift | include/swift/AST/ModuleLoader.h | 10653 | //===--- ModuleLoader.h - Module Loader Interface ---------------*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
//
// This file implements an abstract interface for loading modules.
//
//===----------------------------------------------------------------------===//
#ifndef SWIFT_AST_MODULE_LOADER_H
#define SWIFT_AST_MODULE_LOADER_H
#include "swift/AST/Identifier.h"
#include "swift/AST/Import.h"
#include "swift/AST/ModuleDependencies.h"
#include "swift/Basic/ArrayRefView.h"
#include "swift/Basic/Fingerprint.h"
#include "swift/Basic/LLVM.h"
#include "swift/Basic/Located.h"
#include "swift/Basic/SourceLoc.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Support/VersionTuple.h"
#include <system_error>
namespace llvm {
class FileCollectorBase;
}
namespace clang {
class DependencyCollector;
}
namespace swift {
class AbstractFunctionDecl;
struct AutoDiffConfig;
class ClangImporterOptions;
class ClassDecl;
class FileUnit;
class ModuleDecl;
class ModuleDependencies;
class ModuleDependenciesCache;
class NominalTypeDecl;
class SourceFile;
class TypeDecl;
class CompilerInstance;
enum class KnownProtocolKind : uint8_t;
enum class Bridgeability : unsigned {
/// This context does not permit bridging at all. For example, the
/// target of a C pointer.
None,
/// This context permits all kinds of bridging. For example, the
/// imported result of a method declaration.
Full
};
/// Specifies which dependencies the intermodule dependency tracker records.
enum class IntermoduleDepTrackingMode {
/// Records both system and non-system dependencies.
IncludeSystem,
/// Records only non-system dependencies.
ExcludeSystem,
};
/// Records dependencies on files outside of the current module;
/// implemented in terms of a wrapped clang::DependencyCollector.
class DependencyTracker {
public:
/// A representation of a first-class incremental dependency known to the
/// Swift compiler.
struct IncrementalDependency {
std::string path;
Fingerprint fingerprint;
IncrementalDependency(std::string Path, Fingerprint FP)
: path(std::move(Path)), fingerprint(std::move(FP)) {}
};
inline static std::string getPath(const IncrementalDependency &id) {
return id.path;
}
typedef ArrayRefView<IncrementalDependency, std::string, getPath>
DependencyPathArrayRef;
std::shared_ptr<clang::DependencyCollector> clangCollector;
SmallVector<IncrementalDependency, 8> incrementalDeps;
llvm::StringSet<> incrementalDepsUniquer;
public:
explicit DependencyTracker(
IntermoduleDepTrackingMode Mode,
std::shared_ptr<llvm::FileCollectorBase> FileCollector = {});
/// Adds a file as a dependency.
///
/// The contents of \p File are taken literally, and should be appropriate
/// for appearing in a list of dependencies suitable for tooling like Make.
/// No path canonicalization is done.
void addDependency(StringRef File, bool IsSystem);
/// Adds a file as an incremental dependency.
///
/// No additional canonicalization or adulteration of the file path in
/// \p File is performed.
void addIncrementalDependency(StringRef File, Fingerprint FP);
/// Fetches the list of dependencies.
ArrayRef<std::string> getDependencies() const;
/// Fetches the list of dependencies that are known to have incremental swift
/// dependency information embedded inside of them.
ArrayRef<IncrementalDependency> getIncrementalDependencies() const;
/// A view of the paths of the dependencies known to have incremental swift
/// dependency information embedded inside of them.
DependencyPathArrayRef getIncrementalDependencyPaths() const {
return DependencyPathArrayRef(getIncrementalDependencies());
}
/// Return the underlying clang::DependencyCollector that this
/// class wraps.
std::shared_ptr<clang::DependencyCollector> getClangCollector();
};
struct SubCompilerInstanceInfo {
StringRef CompilerVersion;
CompilerInstance* Instance;
StringRef Hash;
ArrayRef<StringRef> BuildArguments;
ArrayRef<StringRef> ExtraPCMArgs;
};
/// Abstract interface for a checker of module interfaces and prebuilt modules.
class ModuleInterfaceChecker {
public:
virtual std::vector<std::string>
getCompiledModuleCandidatesForInterface(StringRef moduleName,
StringRef interfacePath) = 0;
/// Given a list of potential ready-to-use compiled modules for \p interfacePath,
/// check if any one of them is up-to-date. If so, emit a forwarding module
/// to the candidate binary module to \p outPath.
virtual bool tryEmitForwardingModule(StringRef moduleName,
StringRef interfacePath,
ArrayRef<std::string> candidates,
StringRef outPath) = 0;
virtual ~ModuleInterfaceChecker() = default;
};
/// Abstract interface to run an action in a sub ASTContext.
struct InterfaceSubContextDelegate {
virtual std::error_code runInSubContext(StringRef moduleName,
StringRef interfacePath,
StringRef outputPath,
SourceLoc diagLoc,
llvm::function_ref<std::error_code(ASTContext&, ModuleDecl*,
ArrayRef<StringRef>,
ArrayRef<StringRef>, StringRef)> action) = 0;
virtual std::error_code runInSubCompilerInstance(StringRef moduleName,
StringRef interfacePath,
StringRef outputPath,
SourceLoc diagLoc,
llvm::function_ref<std::error_code(SubCompilerInstanceInfo&)> action) = 0;
virtual ~InterfaceSubContextDelegate() = default;
};
/// Abstract interface that loads named modules into the AST.
class ModuleLoader {
virtual void anchor();
protected:
DependencyTracker * const dependencyTracker;
ModuleLoader(DependencyTracker *tracker) : dependencyTracker(tracker) {}
public:
virtual ~ModuleLoader() = default;
/// Collect visible module names.
///
/// Append visible module names to \p names. Note that names are possibly
/// duplicated, and not guaranteed to be ordered in any way.
virtual void collectVisibleTopLevelModuleNames(
SmallVectorImpl<Identifier> &names) const = 0;
/// Check whether the module with a given name can be imported without
/// importing it.
///
/// Note that even if this check succeeds, errors may still occur if the
/// module is loaded in full.
virtual bool canImportModule(ImportPath::Element named,
llvm::VersionTuple version,
bool underlyingVersion) = 0;
/// Import a module with the given module path.
///
/// \param importLoc The location of the 'import' keyword.
///
/// \param path A sequence of (identifier, location) pairs that denote
/// the dotted module name to load, e.g., AppKit.NSWindow.
///
/// \returns the module referenced, if it could be loaded. Otherwise,
/// emits a diagnostic and returns NULL.
virtual
ModuleDecl *loadModule(SourceLoc importLoc, ImportPath::Module path) = 0;
/// Load extensions to the given nominal type.
///
/// \param nominal The nominal type whose extensions should be loaded.
///
/// \param previousGeneration The previous generation number. The AST already
/// contains extensions loaded from any generation up to and including this
/// one.
virtual void loadExtensions(NominalTypeDecl *nominal,
unsigned previousGeneration) { }
/// Load the methods within the given class that produce
/// Objective-C class or instance methods with the given selector.
///
/// \param classDecl The class in which we are searching for @objc methods.
/// The search only considers this class and its extensions; not any
/// superclasses.
///
/// \param selector The selector to search for.
///
/// \param isInstanceMethod Whether we are looking for an instance method
/// (vs. a class method).
///
/// \param previousGeneration The previous generation with which this
/// callback was invoked. The list of methods will already contain all of
/// the results from generations up and including \c previousGeneration.
///
/// \param methods The list of @objc methods in this class that have this
/// selector and are instance/class methods as requested. This list will be
/// extended with any methods found in subsequent generations.
virtual void loadObjCMethods(
ClassDecl *classDecl,
ObjCSelector selector,
bool isInstanceMethod,
unsigned previousGeneration,
llvm::TinyPtrVector<AbstractFunctionDecl *> &methods) = 0;
/// Load derivative function configurations for the given
/// AbstractFunctionDecl.
///
/// \param originalAFD The declaration whose derivative function
/// configurations should be loaded.
///
/// \param previousGeneration The previous generation number. The AST already
/// contains derivative function configurations loaded from any generation up
/// to and including this one.
///
/// \param results The result list of derivative function configurations.
/// This list will be extended with any methods found in subsequent
/// generations.
virtual void loadDerivativeFunctionConfigurations(
AbstractFunctionDecl *originalAFD, unsigned previousGeneration,
llvm::SetVector<AutoDiffConfig> &results) {};
/// Verify all modules loaded by this loader.
virtual void verifyAllModules() { }
/// Discover overlays declared alongside this file and add infomation about
/// them to it.
void findOverlayFiles(SourceLoc diagLoc, ModuleDecl *module, FileUnit *file);
/// Retrieve the dependencies for the given, named module, or \c None
/// if no such module exists.
virtual Optional<ModuleDependencies> getModuleDependencies(
StringRef moduleName,
ModuleDependenciesCache &cache,
InterfaceSubContextDelegate &delegate) = 0;
};
} // namespace swift
#endif
| apache-2.0 |
vaughnick/fco | wp-content/plugins/so-widgets-bundle/compat/visual-composer/styles.css | 435 | .siteorigin-widget-form .siteorigin-widget-field input {
margin: 1px;
padding: 2px 5px;
}
.siteorigin-widget-form .siteorigin-widget-field input[type="radio"] {
width: inherit;
}
.siteorigin-widget-form .siteorigin-widget-field select {
padding: 2px;
height: 28px;
width: inherit;
margin: 1px;
}
.vc_element-icon.so-widget-icon {
background-size: cover;
background-image: url("../../base/css/img/bundle-icon.png");
}
| apache-2.0 |
mxrenkin/hibernate-validator | engine/src/test/java/org/hibernate/validator/test/internal/engine/methodvalidation/service/CustomerRepository.java | 2001 | /*
* Hibernate Validator, declare and validate application constraints
*
* License: Apache License, Version 2.0
* See the license.txt file in the root directory or <http://www.apache.org/licenses/LICENSE-2.0>.
*/
package org.hibernate.validator.test.internal.engine.methodvalidation.service;
import java.util.List;
import java.util.Map;
import javax.validation.Valid;
import javax.validation.constraints.Min;
import javax.validation.constraints.NotNull;
import org.joda.time.DateMidnight;
import org.hibernate.validator.constraints.NotEmpty;
import org.hibernate.validator.test.internal.engine.methodvalidation.model.Customer;
/**
* @author Gunnar Morling
*/
public interface CustomerRepository extends RepositoryBase<Customer> {
@Valid
Customer findCustomerByName(@NotNull String name);
void persistCustomer(@NotNull @Valid Customer customer);
void cascadingMapParameter(@Valid Map<String, Customer> customer);
void cascadingIterableParameter(@Valid List<Customer> customer);
void cascadingArrayParameter(@Valid Customer... customer);
void findCustomerByAgeAndName(@Min(5) Integer age, @NotNull String name);
void cascadingParameter(@NotNull @Valid Customer param1, @NotNull @Valid Customer param2);
@Override
void foo(Long id);
@Override
void bar(Customer customer);
void boz();
@Min(10)
int baz();
@Valid
Customer cascadingReturnValue();
@Valid
List<Customer> cascadingIterableReturnValue();
@Valid
Map<String, Customer> cascadingMapReturnValue();
@Valid
Customer[] cascadingArrayReturnValue();
@Override
Customer overriddenMethodWithCascadingReturnValue();
void parameterConstraintInGroup(@NotNull(groups = { ValidationGroup.class }) String name);
@Override
@Min(10)
int overriddenMethodWithReturnValueConstraint();
int getFoo(int i);
int getFoo(@NotEmpty String s);
@ConsistentDateParameters
void methodWithCrossParameterConstraint(@NotNull DateMidnight start, @NotNull DateMidnight end);
public interface ValidationGroup {
}
}
| apache-2.0 |
winger007/zstack | plugin/loadBalancer/src/main/java/org/zstack/network/service/lb/LoadBalancerBase.java | 44011 | package org.zstack.network.service.lb;
import org.springframework.beans.factory.annotation.Autowire;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Configurable;
import org.springframework.transaction.annotation.Transactional;
import org.zstack.core.Platform;
import org.zstack.core.cloudbus.CloudBus;
import org.zstack.core.db.DatabaseFacade;
import org.zstack.core.db.SimpleQuery;
import org.zstack.core.db.SimpleQuery.Op;
import org.zstack.core.db.UpdateQuery;
import org.zstack.core.errorcode.ErrorFacade;
import org.zstack.core.thread.ChainTask;
import org.zstack.core.thread.SyncTaskChain;
import org.zstack.core.thread.ThreadFacade;
import org.zstack.core.workflow.FlowChainBuilder;
import org.zstack.core.workflow.ShareFlow;
import org.zstack.header.core.Completion;
import org.zstack.header.core.NoErrorCompletion;
import org.zstack.header.core.workflow.*;
import org.zstack.header.errorcode.ErrorCode;
import org.zstack.header.errorcode.OperationFailureException;
import org.zstack.header.exception.CloudRuntimeException;
import org.zstack.header.message.APIMessage;
import org.zstack.header.message.Message;
import org.zstack.header.network.l3.L3NetworkVO;
import org.zstack.header.network.service.NetworkServiceL3NetworkRefVO;
import org.zstack.header.vm.*;
import org.zstack.identity.AccountManager;
import org.zstack.network.service.vip.*;
import org.zstack.tag.TagManager;
import org.zstack.utils.CollectionUtils;
import org.zstack.utils.DebugUtils;
import org.zstack.utils.Utils;
import org.zstack.utils.function.Function;
import org.zstack.utils.logging.CLogger;
import static org.zstack.core.Platform.operr;
import javax.persistence.TypedQuery;
import java.util.*;
import java.util.stream.Collectors;
import static java.util.Arrays.asList;
/**
* Created by frank on 8/8/2015.
*/
@Configurable(preConstruction = true, autowire = Autowire.BY_TYPE)
public class LoadBalancerBase {
private static final CLogger logger = Utils.getLogger(LoadBalancerBase.class);
@Autowired
private CloudBus bus;
@Autowired
private DatabaseFacade dbf;
@Autowired
private LoadBalancerManager lbMgr;
@Autowired
private ThreadFacade thdf;
@Autowired
private ErrorFacade errf;
@Autowired
private AccountManager acntMgr;
@Autowired
private TagManager tagMgr;
private LoadBalancerVO self;
private String getSyncId() {
return String.format("operate-lb-%s", self.getUuid());
}
protected LoadBalancerInventory getInventory() {
return LoadBalancerInventory.valueOf(self);
}
private LoadBalancerInventory reloadAndGetInventory() {
self = dbf.reload(self);
return getInventory();
}
public LoadBalancerBase(LoadBalancerVO self) {
this.self = self;
}
void handleMessage(Message msg) {
if (msg instanceof APIMessage) {
handleApiMessage((APIMessage) msg);
} else {
handleLocalMessage(msg);
}
}
private void handleLocalMessage(Message msg) {
if (msg instanceof LoadBalancerActiveVmNicMsg) {
handle((LoadBalancerActiveVmNicMsg) msg);
} else if (msg instanceof LoadBalancerDeactiveVmNicMsg) {
handle((LoadBalancerDeactiveVmNicMsg) msg);
} else if (msg instanceof LoadBalancerRemoveVmNicMsg) {
handle((LoadBalancerRemoveVmNicMsg) msg);
} else if (msg instanceof RefreshLoadBalancerMsg) {
handle((RefreshLoadBalancerMsg) msg);
} else if (msg instanceof DeleteLoadBalancerMsg) {
handle((DeleteLoadBalancerMsg) msg);
} else if (msg instanceof DeleteLoadBalancerOnlyMsg) {
handle((DeleteLoadBalancerOnlyMsg) msg);
} else {
bus.dealWithUnknownMessage(msg);
}
}
private void handle(DeleteLoadBalancerOnlyMsg msg) {
DeleteLoadBalancerOnlyReply reply = new DeleteLoadBalancerOnlyReply();
thdf.chainSubmit(new ChainTask(msg) {
@Override
public String getSyncSignature() {
return getSyncId();
}
@Override
public void run(SyncTaskChain chain) {
if (self.getProviderType() == null) {
// not initialized yet
dbf.remove(self);
bus.reply(msg, reply);
chain.next();
return;
}
LoadBalancerBackend bkd = getBackend();
bkd.destroyLoadBalancer(makeStruct(), new Completion(msg, chain) {
@Override
public void success() {
dbf.remove(self);
bus.reply(msg, reply);
chain.next();
}
@Override
public void fail(ErrorCode errorCode) {
reply.setError(errorCode);
bus.reply(msg, reply);
chain.next();
}
});
}
@Override
public String getName() {
return "delete-load-balancer-only";
}
});
}
private void handle(final DeleteLoadBalancerMsg msg) {
final DeleteLoadBalancerReply reply = new DeleteLoadBalancerReply();
thdf.chainSubmit(new ChainTask(msg) {
@Override
public String getSyncSignature() {
return getSyncId();
}
@Override
public void run(final SyncTaskChain chain) {
delete(new Completion(msg, chain) {
@Override
public void success() {
bus.reply(msg ,reply);
chain.next();
}
@Override
public void fail(ErrorCode errorCode) {
reply.setError(errorCode);
bus.reply(msg ,reply);
chain.next();
}
});
}
@Override
public String getName() {
return "delete-lb";
}
});
}
private void handle(final RefreshLoadBalancerMsg msg) {
final RefreshLoadBalancerReply reply = new RefreshLoadBalancerReply();
thdf.chainSubmit(new ChainTask(msg) {
@Override
public String getSyncSignature() {
return getSyncId();
}
@Override
public void run(final SyncTaskChain chain) {
refresh(new Completion(msg, chain) {
@Override
public void success() {
reply.setInventory(getInventory());
bus.reply(msg, reply);
chain.next();
}
@Override
public void fail(ErrorCode errorCode) {
reply.setError(errorCode);
bus.reply(msg, reply);
chain.next();
}
});
}
@Override
public String getName() {
return "refresh-lb";
}
});
}
private void refresh(final Completion completion) {
LoadBalancerBackend bkd = getBackend();
bkd.refresh(makeStruct(), completion);
}
private void handle(final LoadBalancerRemoveVmNicMsg msg) {
thdf.chainSubmit(new ChainTask(msg) {
@Override
public String getSyncSignature() {
return getSyncId();
}
@Override
public void run(final SyncTaskChain chain) {
final LoadBalancerRemoveVmNicReply reply = new LoadBalancerRemoveVmNicReply();
removeNics(msg.getListenerUuid(), msg.getVmNicUuids(), new Completion(msg, chain) {
@Override
public void success() {
bus.reply(msg, reply);
chain.next();
}
@Override
public void fail(ErrorCode errorCode) {
reply.setError(errorCode);
bus.reply(msg, reply);
chain.next();
}
});
}
@Override
public String getName() {
return "remove-nic-from-lb";
}
});
}
private void checkIfNicIsAdded(List<String> nicUuids) {
List<String> allNicUuids = new ArrayList<String>();
for (LoadBalancerListenerVO l : self.getListeners()) {
allNicUuids.addAll(CollectionUtils.transformToList(l.getVmNicRefs(), new Function<String, LoadBalancerListenerVmNicRefVO>() {
@Override
public String call(LoadBalancerListenerVmNicRefVO arg) {
return arg.getVmNicUuid();
}
}));
}
for (String nicUuid : nicUuids) {
if (!allNicUuids.contains(nicUuid)) {
throw new CloudRuntimeException(String.format("the load balancer[uuid: %s] doesn't have a vm nic[uuid: %s] added", self.getUuid(), nicUuid));
}
}
}
private void handle(final LoadBalancerDeactiveVmNicMsg msg) {
checkIfNicIsAdded(msg.getVmNicUuids());
LoadBalancerListenerVO l = CollectionUtils.find(self.getListeners(), new Function<LoadBalancerListenerVO, LoadBalancerListenerVO>() {
@Override
public LoadBalancerListenerVO call(LoadBalancerListenerVO arg) {
return arg.getUuid().equals(msg.getListenerUuid()) ? arg : null;
}
});
final List<LoadBalancerListenerVmNicRefVO> refs = CollectionUtils.transformToList(l.getVmNicRefs(), new Function<LoadBalancerListenerVmNicRefVO, LoadBalancerListenerVmNicRefVO>() {
@Override
public LoadBalancerListenerVmNicRefVO call(LoadBalancerListenerVmNicRefVO arg) {
return msg.getVmNicUuids().contains(arg.getVmNicUuid()) ? arg : null;
}
});
final LoadBalancerDeactiveVmNicReply reply = new LoadBalancerDeactiveVmNicReply();
FlowChain chain = FlowChainBuilder.newShareFlowChain();
chain.setName(String.format("deactive-vm-nics-on-lb-%s", self.getUuid()));
chain.then(new ShareFlow() {
@Override
public void setup() {
flow(new Flow() {
String __name__ = "set-nics-to-inactive-in-db";
@Override
public void run(FlowTrigger trigger, Map data) {
for (LoadBalancerListenerVmNicRefVO ref : refs) {
ref.setStatus(LoadBalancerVmNicStatus.Inactive);
dbf.update(ref);
}
trigger.next();
}
@Override
public void rollback(FlowRollback trigger, Map data) {
for (LoadBalancerListenerVmNicRefVO ref : refs) {
ref.setStatus(LoadBalancerVmNicStatus.Active);
dbf.update(ref);
}
trigger.rollback();
}
});
flow(new NoRollbackFlow() {
String __name__ = "deactive-nics-on-backend";
@Override
public void run(final FlowTrigger trigger, Map data) {
SimpleQuery<VmNicVO> q = dbf.createQuery(VmNicVO.class);
q.add(VmNicVO_.uuid, Op.IN, CollectionUtils.transformToList(refs, new Function<String, LoadBalancerListenerVmNicRefVO>() {
@Override
public String call(LoadBalancerListenerVmNicRefVO arg) {
return arg.getVmNicUuid();
}
}));
List<VmNicVO> nicvos = q.list();
LoadBalancerBackend bkd = getBackend();
bkd.removeVmNics(makeStruct(), VmNicInventory.valueOf(nicvos), new Completion(trigger) {
@Override
public void success() {
trigger.next();
}
@Override
public void fail(ErrorCode errorCode) {
trigger.fail(errorCode);
}
});
}
});
done(new FlowDoneHandler(msg) {
@Override
public void handle(Map data) {
bus.reply(msg, reply);
}
});
error(new FlowErrorHandler(msg) {
@Override
public void handle(ErrorCode errCode, Map data) {
reply.setError(errCode);
bus.reply(msg, reply);
}
});
}
}).start();
}
private void activeVmNic(final LoadBalancerActiveVmNicMsg msg, final NoErrorCompletion completion) {
checkIfNicIsAdded(msg.getVmNicUuids());
LoadBalancerListenerVO l = CollectionUtils.find(self.getListeners(), new Function<LoadBalancerListenerVO, LoadBalancerListenerVO>() {
@Override
public LoadBalancerListenerVO call(LoadBalancerListenerVO arg) {
return arg.getUuid().equals(msg.getListenerUuid()) ? arg : null;
}
});
final List<LoadBalancerListenerVmNicRefVO> refs = CollectionUtils.transformToList(l.getVmNicRefs(), new Function<LoadBalancerListenerVmNicRefVO, LoadBalancerListenerVmNicRefVO>() {
@Override
public LoadBalancerListenerVmNicRefVO call(LoadBalancerListenerVmNicRefVO arg) {
return msg.getVmNicUuids().contains(arg.getVmNicUuid()) ? arg : null;
}
});
final LoadBalancerActiveVmNicReply reply = new LoadBalancerActiveVmNicReply();
FlowChain chain = FlowChainBuilder.newShareFlowChain();
chain.setName(String.format("active-vm-nics-on-lb-%s", self.getUuid()));
chain.then(new ShareFlow() {
@Override
public void setup() {
flow(new Flow() {
String __name__ = "set-nics-to-active-in-db";
@Override
public void run(FlowTrigger trigger, Map data) {
for (LoadBalancerListenerVmNicRefVO ref : refs) {
ref.setStatus(LoadBalancerVmNicStatus.Active);
dbf.update(ref);
}
trigger.next();
}
@Override
public void rollback(FlowRollback trigger, Map data) {
for (LoadBalancerListenerVmNicRefVO ref : refs) {
ref.setStatus(LoadBalancerVmNicStatus.Inactive);
dbf.update(ref);
}
trigger.rollback();
}
});
flow(new NoRollbackFlow() {
String __name__ = "active-nics-on-backend";
@Override
public void run(final FlowTrigger trigger, Map data) {
SimpleQuery<VmNicVO> q = dbf.createQuery(VmNicVO.class);
q.add(VmNicVO_.uuid, Op.IN, CollectionUtils.transformToList(refs, new Function<String, LoadBalancerListenerVmNicRefVO>() {
@Override
public String call(LoadBalancerListenerVmNicRefVO arg) {
return arg.getVmNicUuid();
}
}));
List<VmNicVO> nicvos = q.list();
LoadBalancerBackend bkd = getBackend();
bkd.addVmNics(makeStruct(), VmNicInventory.valueOf(nicvos), new Completion(trigger) {
@Override
public void success() {
trigger.next();
}
@Override
public void fail(ErrorCode errorCode) {
trigger.fail(errorCode);
}
});
}
});
done(new FlowDoneHandler(msg) {
@Override
public void handle(Map data) {
bus.reply(msg, reply);
completion.done();
}
});
error(new FlowErrorHandler(msg) {
@Override
public void handle(ErrorCode errCode, Map data) {
reply.setError(errCode);
bus.reply(msg, reply);
completion.done();
}
});
}
}).start();
}
private void handle(final LoadBalancerActiveVmNicMsg msg) {
thdf.chainSubmit(new ChainTask(msg) {
@Override
public String getSyncSignature() {
return getSyncId();
}
@Override
public void run(final SyncTaskChain chain) {
activeVmNic(msg, new NoErrorCompletion(msg, chain) {
@Override
public void done() {
chain.next();
}
});
}
@Override
public String getName() {
return "deactive-nic";
}
});
}
private void handleApiMessage(APIMessage msg) {
if (msg instanceof APICreateLoadBalancerListenerMsg) {
handle((APICreateLoadBalancerListenerMsg) msg);
} else if (msg instanceof APIAddVmNicToLoadBalancerMsg) {
handle((APIAddVmNicToLoadBalancerMsg) msg);
} else if (msg instanceof APIRemoveVmNicFromLoadBalancerMsg) {
handle((APIRemoveVmNicFromLoadBalancerMsg) msg);
} else if (msg instanceof APIDeleteLoadBalancerListenerMsg) {
handle((APIDeleteLoadBalancerListenerMsg) msg);
} else if (msg instanceof APIDeleteLoadBalancerMsg) {
handle((APIDeleteLoadBalancerMsg) msg);
} else if (msg instanceof APIRefreshLoadBalancerMsg) {
handle((APIRefreshLoadBalancerMsg) msg);
} else if (msg instanceof APIGetCandidateVmNicsForLoadBalancerMsg) {
handle((APIGetCandidateVmNicsForLoadBalancerMsg) msg);
} else {
bus.dealWithUnknownMessage(msg);
}
}
@Transactional(readOnly = true)
private void handle(APIGetCandidateVmNicsForLoadBalancerMsg msg) {
APIGetCandidateVmNicsForLoadBalancerReply reply = new APIGetCandidateVmNicsForLoadBalancerReply();
String sql = "select vip.peerL3NetworkUuid from VipVO vip where vip.uuid = :uuid";
TypedQuery<String> q = dbf.getEntityManager().createQuery(sql, String.class);
q.setParameter("uuid", self.getVipUuid());
List<String> ret = q.getResultList();
String peerL3Uuid = ret.isEmpty() ? null : ret.get(0);
if (peerL3Uuid != null) {
// the load balancer has been bound to a private L3 network
sql = "select nic from VmNicVO nic, VmInstanceVO vm where nic.l3NetworkUuid = :l3Uuid and nic.uuid not in (select ref.vmNicUuid from LoadBalancerListenerVmNicRefVO ref" +
" where ref.listenerUuid = :luuid) and nic.vmInstanceUuid = vm.uuid and vm.type = :vmType and vm.state in (:vmStates)";
TypedQuery<VmNicVO> pq = dbf.getEntityManager().createQuery(sql, VmNicVO.class);
pq.setParameter("l3Uuid", peerL3Uuid);
pq.setParameter("luuid", msg.getListenerUuid());
pq.setParameter("vmType", VmInstanceConstant.USER_VM_TYPE);
pq.setParameter("vmStates", asList(VmInstanceState.Running, VmInstanceState.Stopped));
List<VmNicVO> nics = pq.getResultList();
reply.setInventories(VmNicInventory.valueOf(nics));
bus.reply(msg, reply);
return;
}
// the load balancer has not been bound to any private L3 network
sql = "select l3.uuid from L3NetworkVO l3, NetworkServiceL3NetworkRefVO ref where l3.uuid = ref.l3NetworkUuid" +
" and ref.networkServiceType = :type";
q = dbf.getEntityManager().createQuery(sql, String.class);
q.setParameter("type", LoadBalancerConstants.LB_NETWORK_SERVICE_TYPE_STRING);
List<String> l3Uuids = q.getResultList();
if (l3Uuids.isEmpty()) {
reply.setInventories(new ArrayList<>());
bus.reply(msg, reply);
return;
}
sql = "select nic from VmNicVO nic, VmInstanceVO vm where nic.l3NetworkUuid in (select l3.uuid from L3NetworkVO l3, NetworkServiceL3NetworkRefVO ref where l3.uuid = ref.l3NetworkUuid" +
" and ref.networkServiceType = :type) and nic.vmInstanceUuid = vm.uuid and vm.type = :vmType and vm.state in (:vmStates)";
TypedQuery<VmNicVO> nq = dbf.getEntityManager().createQuery(sql, VmNicVO.class);
nq.setParameter("type", LoadBalancerConstants.LB_NETWORK_SERVICE_TYPE_STRING);
nq.setParameter("vmType", VmInstanceConstant.USER_VM_TYPE);
nq.setParameter("vmStates", asList(VmInstanceState.Running, VmInstanceState.Stopped));
List<VmNicVO> nics = nq.getResultList();
reply.setInventories(VmNicInventory.valueOf(nics));
bus.reply(msg, reply);
}
private void handle(final APIRefreshLoadBalancerMsg msg) {
final APIRefreshLoadBalancerEvent evt = new APIRefreshLoadBalancerEvent(msg.getId());
thdf.chainSubmit(new ChainTask(msg) {
@Override
public String getSyncSignature() {
return getSyncId();
}
@Override
public void run(final SyncTaskChain chain) {
refresh(new Completion(msg, chain) {
@Override
public void success() {
evt.setInventory(getInventory());
bus.publish(evt);
chain.next();
}
@Override
public void fail(ErrorCode errorCode) {
evt.setError(errorCode);
bus.publish(evt);
chain.next();
}
});
}
@Override
public String getName() {
return "refresh-lb";
}
});
}
private void handle(final APIDeleteLoadBalancerMsg msg) {
final APIDeleteLoadBalancerEvent evt = new APIDeleteLoadBalancerEvent(msg.getId());
thdf.chainSubmit(new ChainTask(msg) {
@Override
public String getSyncSignature() {
return getSyncId();
}
@Override
public void run(final SyncTaskChain chain) {
delete(new Completion(msg, chain) {
@Override
public void success() {
bus.publish(evt);
chain.next();
}
@Override
public void fail(ErrorCode errorCode) {
evt.setError(errorCode);
bus.publish(evt);
chain.next();
}
});
}
@Override
public String getName() {
return "delete-lb";
}
});
}
private void delete(final Completion completion) {
FlowChain chain = FlowChainBuilder.newShareFlowChain();
chain.setName(String.format("delete-lb-%s", self.getUuid()));
chain.then(new ShareFlow() {
@Override
public void setup() {
flow(new NoRollbackFlow() {
String __name__ = "delete-lb";
@Override
public void run(final FlowTrigger trigger, Map data) {
if (self.getProviderType() == null) {
trigger.next();
// not initialized yet
return;
}
LoadBalancerBackend bkd = getBackend();
bkd.destroyLoadBalancer(makeStruct(), new Completion(trigger) {
@Override
public void success() {
trigger.next();
}
@Override
public void fail(ErrorCode errorCode) {
trigger.fail(errorCode);
}
});
}
});
flow(new NoRollbackFlow() {
String __name__ = "release-vip";
@Override
public void run(FlowTrigger trigger, Map data) {
new Vip(self.getVipUuid()).release(new Completion(trigger) {
@Override
public void success() {
trigger.next();
}
@Override
public void fail(ErrorCode errorCode) {
trigger.fail(errorCode);
}
});
}
});
done(new FlowDoneHandler(completion) {
@Override
public void handle(Map data) {
dbf.remove(self);
completion.success();
}
});
error(new FlowErrorHandler(completion) {
@Override
public void handle(ErrorCode errCode, Map data) {
completion.fail(errCode);
}
});
}
}).start();
}
private void handle(final APIDeleteLoadBalancerListenerMsg msg) {
thdf.chainSubmit(new ChainTask(msg) {
@Override
public String getSyncSignature() {
return getSyncId();
}
@Override
public void run(final SyncTaskChain chain) {
deleteListener(msg, new NoErrorCompletion(msg, chain) {
@Override
public void done() {
chain.next();
}
});
}
@Override
public String getName() {
return "delete-listener";
}
});
}
private LoadBalancerStruct removeListenerStruct(LoadBalancerListenerInventory listener) {
LoadBalancerStruct s = makeStruct();
for (LoadBalancerListenerInventory l : s.getListeners()) {
if (l.getUuid().equals(listener.getUuid())) {
l.setVmNicRefs(new ArrayList<>());
}
}
return s;
}
private void deleteListener(APIDeleteLoadBalancerListenerMsg msg, final NoErrorCompletion completion) {
final APIDeleteLoadBalancerListenerEvent evt = new APIDeleteLoadBalancerListenerEvent(msg.getId());
final LoadBalancerListenerVO vo = dbf.findByUuid(msg.getUuid(), LoadBalancerListenerVO.class);
if (vo == null) {
evt.setInventory(getInventory());
bus.publish(evt);
completion.done();
return;
}
if (!needAction()) {
dbf.remove(vo);
evt.setInventory(reloadAndGetInventory());
bus.publish(evt);
completion.done();
return;
}
LoadBalancerListenerInventory listener = LoadBalancerListenerInventory.valueOf(vo);
LoadBalancerBackend bkd = getBackend();
bkd.removeListener(removeListenerStruct(listener), listener, new Completion(msg, completion) {
@Override
public void success() {
dbf.remove(vo);
evt.setInventory(reloadAndGetInventory());
bus.publish(evt);
completion.done();
}
@Override
public void fail(ErrorCode errorCode) {
evt.setError(errorCode);
bus.publish(evt);
completion.done();
}
});
}
private void handle(final APIRemoveVmNicFromLoadBalancerMsg msg) {
thdf.chainSubmit(new ChainTask(msg) {
@Override
public String getSyncSignature() {
return getSyncId();
}
@Override
public void run(final SyncTaskChain chain) {
removeNic(msg, new NoErrorCompletion(msg, chain) {
@Override
public void done() {
chain.next();
}
});
}
@Override
public String getName() {
return "remove-nic";
}
});
}
private LoadBalancerStruct removeNicStruct(String listenerUuid, List<String> nicUuids) {
LoadBalancerStruct s = makeStruct();
Optional<LoadBalancerListenerInventory> opt = s.getListeners().stream().filter(it -> it.getUuid().equals(listenerUuid)).findAny();
DebugUtils.Assert(opt.isPresent(), String.format("cannot find listener[uuid:%s]", listenerUuid));
LoadBalancerListenerInventory l = opt.get();
l.getVmNicRefs().removeIf(loadBalancerListenerVmNicRefInventory -> nicUuids.contains(loadBalancerListenerVmNicRefInventory.getVmNicUuid()));
return s;
}
private void removeNics(String listenerUuid, final List<String> vmNicUuids, final Completion completion) {
SimpleQuery<VmNicVO> q = dbf.createQuery(VmNicVO.class);
q.add(VmNicVO_.uuid, Op.IN, vmNicUuids);
List<VmNicVO> vos = q.list();
List<VmNicInventory> nics = VmNicInventory.valueOf(vos);
LoadBalancerBackend bkd = getBackend();
bkd.removeVmNics(removeNicStruct(listenerUuid, vmNicUuids), nics, new Completion(completion) {
@Override
public void success() {
UpdateQuery.New(LoadBalancerListenerVmNicRefVO.class)
.condAnd(LoadBalancerListenerVmNicRefVO_.vmNicUuid, Op.IN, vmNicUuids)
.condAnd(LoadBalancerListenerVmNicRefVO_.listenerUuid, Op.EQ, listenerUuid)
.delete();
completion.success();
}
@Override
public void fail(ErrorCode errorCode) {
completion.fail(errorCode);
}
});
}
private void removeNic(APIRemoveVmNicFromLoadBalancerMsg msg, final NoErrorCompletion completion) {
final APIRemoveVmNicFromLoadBalancerEvent evt = new APIRemoveVmNicFromLoadBalancerEvent(msg.getId());
removeNics(msg.getListenerUuid(), msg.getVmNicUuids(), new Completion(msg, completion) {
@Override
public void success() {
evt.setInventory(reloadAndGetInventory());
bus.publish(evt);
completion.done();
}
@Override
public void fail(ErrorCode errorCode) {
evt.setError(errorCode);
bus.publish(evt);
completion.done();
}
});
}
@Transactional(readOnly = true)
private String findProviderTypeByVmNicUuid(String nicUuid) {
String sql = "select l3 from L3NetworkVO l3, VmNicVO nic where nic.l3NetworkUuid = l3.uuid and nic.uuid = :uuid";
TypedQuery<L3NetworkVO> q = dbf.getEntityManager().createQuery(sql, L3NetworkVO.class);
q.setParameter("uuid", nicUuid);
L3NetworkVO l3 = q.getSingleResult();
for (NetworkServiceL3NetworkRefVO ref : l3.getNetworkServices()) {
if (LoadBalancerConstants.LB_NETWORK_SERVICE_TYPE_STRING.equals(ref.getNetworkServiceType())) {
sql = "select p.type from NetworkServiceProviderVO p where p.uuid = :uuid";
TypedQuery<String> nq = dbf.getEntityManager().createQuery(sql, String.class);
nq.setParameter("uuid", ref.getNetworkServiceProviderUuid());
return nq.getSingleResult();
}
}
return null;
}
private void handle(final APIAddVmNicToLoadBalancerMsg msg) {
thdf.chainSubmit(new ChainTask(msg) {
@Override
public String getSyncSignature() {
return getSyncId();
}
@Override
public void run(final SyncTaskChain chain) {
addVmNicToListener(msg, new NoErrorCompletion(chain) {
@Override
public void done() {
chain.next();
}
});
}
@Override
public String getName() {
return getSyncSignature();
}
});
}
private void addVmNicToListener(final APIAddVmNicToLoadBalancerMsg msg, final NoErrorCompletion completion) {
final APIAddVmNicToLoadBalancerEvent evt = new APIAddVmNicToLoadBalancerEvent(msg.getId());
final String providerType = findProviderTypeByVmNicUuid(msg.getVmNicUuids().get(0));
if (providerType == null) {
throw new OperationFailureException(operr("the L3 network of vm nic[uuid:%s] doesn't have load balancer service enabled", msg.getVmNicUuids().get(0)));
}
SimpleQuery<VmNicVO> q = dbf.createQuery(VmNicVO.class);
q.add(VmNicVO_.uuid, Op.IN, msg.getVmNicUuids());
List<VmNicVO> nicVOs = q.list();
final List<VmNicInventory> nics = VmNicInventory.valueOf(nicVOs);
FlowChain chain = FlowChainBuilder.newShareFlowChain();
chain.setName(String.format("add-vm-nic-to-lb-listener-%s", msg.getListenerUuid()));
chain.then(new ShareFlow() {
List<LoadBalancerListenerVmNicRefVO> refs = new ArrayList<LoadBalancerListenerVmNicRefVO>();
boolean init = false;
@Override
public void setup() {
flow(new Flow() {
String __name__ = "check-provider-type";
@Override
public void run(FlowTrigger trigger, Map data) {
if (self.getProviderType() == null) {
self.setProviderType(providerType);
self = dbf.updateAndRefresh(self);
init = true;
} else {
if (!providerType.equals(self.getProviderType())) {
throw new OperationFailureException(operr("service provider type mismatching. The load balancer[uuid:%s] is provided by the service provider[type:%s]," +
" but the L3 network of vm nic[uuid:%s] is enabled with the service provider[type: %s]", self.getUuid(), self.getProviderType(),
msg.getVmNicUuids().get(0), providerType));
}
}
trigger.next();
}
@Override
public void rollback(FlowRollback trigger, Map data) {
if (init) {
self = dbf.reload(self);
self.setProviderType(null);
dbf.update(self);
}
trigger.rollback();
}
});
flow(new Flow() {
String __name__ = "write-nic-to-db";
boolean s = false;
@Override
public void run(FlowTrigger trigger, Map data) {
for (String nicUuid : msg.getVmNicUuids()) {
LoadBalancerListenerVmNicRefVO ref = new LoadBalancerListenerVmNicRefVO();
ref.setListenerUuid(msg.getListenerUuid());
ref.setVmNicUuid(nicUuid);
ref.setStatus(LoadBalancerVmNicStatus.Pending);
refs.add(ref);
}
dbf.persistCollection(refs);
s = true;
trigger.next();
}
@Override
public void rollback(FlowRollback trigger, Map data) {
if (s) {
dbf.removeCollection(refs, LoadBalancerListenerVmNicRefVO.class);
}
trigger.rollback();
}
});
flow(new NoRollbackFlow() {
String __name__ = "add-nic-to-lb";
@Override
public void run(final FlowTrigger trigger, Map data) {
LoadBalancerBackend bkd = getBackend();
LoadBalancerStruct s = makeStruct();
s.setInit(init);
bkd.addVmNics(s, nics, new Completion(trigger) {
@Override
public void success() {
trigger.next();
}
@Override
public void fail(ErrorCode errorCode) {
trigger.fail(errorCode);
}
});
}
});
done(new FlowDoneHandler(msg, completion) {
@Override
public void handle(Map data) {
for (LoadBalancerListenerVmNicRefVO ref : refs) {
ref.setStatus(LoadBalancerVmNicStatus.Active);
}
dbf.updateCollection(refs);
evt.setInventory(LoadBalancerListenerInventory.valueOf(dbf.findByUuid(msg.getListenerUuid(), LoadBalancerListenerVO.class)));
bus.publish(evt);
completion.done();
}
});
error(new FlowErrorHandler(msg, completion) {
@Override
public void handle(ErrorCode errCode, Map data) {
evt.setError(errCode);
bus.publish(evt);
completion.done();
}
});
}
}).start();
}
private boolean needAction() {
if (self.getProviderType() == null) {
return false;
}
LoadBalancerListenerVmNicRefVO activeNic = CollectionUtils.find(self.getListeners(), new Function<LoadBalancerListenerVmNicRefVO, LoadBalancerListenerVO>() {
@Override
public LoadBalancerListenerVmNicRefVO call(LoadBalancerListenerVO arg) {
for (LoadBalancerListenerVmNicRefVO ref : arg.getVmNicRefs()) {
if (ref.getStatus() == LoadBalancerVmNicStatus.Active || ref.getStatus() == LoadBalancerVmNicStatus.Pending) {
return ref;
}
}
return null;
}
});
if (activeNic == null) {
return false;
}
return true;
}
private LoadBalancerBackend getBackend() {
DebugUtils.Assert(self.getProviderType() != null, "providerType cannot be null");
return lbMgr.getBackend(self.getProviderType());
}
private LoadBalancerStruct makeStruct() {
LoadBalancerStruct struct = new LoadBalancerStruct();
struct.setLb(reloadAndGetInventory());
List<String> activeNicUuids = new ArrayList<String>();
for (LoadBalancerListenerVO l : self.getListeners()) {
activeNicUuids.addAll(CollectionUtils.transformToList(l.getVmNicRefs(), new Function<String, LoadBalancerListenerVmNicRefVO>() {
@Override
public String call(LoadBalancerListenerVmNicRefVO arg) {
return arg.getStatus() == LoadBalancerVmNicStatus.Active || arg.getStatus() == LoadBalancerVmNicStatus.Pending ? arg.getVmNicUuid() : null;
}
}));
}
if (activeNicUuids.isEmpty()) {
struct.setVmNics(new HashMap<String, VmNicInventory>());
} else {
SimpleQuery<VmNicVO> nq = dbf.createQuery(VmNicVO.class);
nq.add(VmNicVO_.uuid, Op.IN, activeNicUuids);
List<VmNicVO> nicvos = nq.list();
Map<String, VmNicInventory> m = new HashMap<String, VmNicInventory>();
for (VmNicVO n : nicvos) {
m.put(n.getUuid(), VmNicInventory.valueOf(n));
}
struct.setVmNics(m);
}
struct.setListeners(LoadBalancerListenerInventory.valueOf(self.getListeners()));
return struct;
}
private void handle(final APICreateLoadBalancerListenerMsg msg) {
thdf.chainSubmit(new ChainTask(msg) {
@Override
public String getSyncSignature() {
return getSyncId();
}
@Override
public void run(final SyncTaskChain chain) {
createListener(msg, new NoErrorCompletion(chain) {
@Override
public void done() {
chain.next();
}
});
}
@Override
public String getName() {
return "create-listener";
}
});
}
private void createListener(final APICreateLoadBalancerListenerMsg msg, final NoErrorCompletion completion) {
final APICreateLoadBalancerListenerEvent evt = new APICreateLoadBalancerListenerEvent(msg.getId());
LoadBalancerListenerVO vo = new LoadBalancerListenerVO();
vo.setLoadBalancerUuid(self.getUuid());
vo.setUuid(msg.getResourceUuid() == null ? Platform.getUuid() : msg.getResourceUuid());
vo.setDescription(vo.getDescription());
vo.setName(msg.getName());
vo.setInstancePort(msg.getInstancePort());
vo.setLoadBalancerPort(msg.getLoadBalancerPort());
vo.setProtocol(msg.getProtocol());
vo = dbf.persistAndRefresh(vo);
acntMgr.createAccountResourceRef(msg.getSession().getAccountUuid(), vo.getUuid(), LoadBalancerListenerVO.class);
tagMgr.createNonInherentSystemTags(msg.getSystemTags(), vo.getUuid(), LoadBalancerListenerVO.class.getSimpleName());
evt.setInventory(LoadBalancerListenerInventory.valueOf(vo));
bus.publish(evt);
completion.done();
}
}
| apache-2.0 |
tkunicki/geomesa | geomesa-filter/src/main/scala/org/locationtech/geomesa/filter/function/DateToLong.scala | 1224 | /***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.filter.function
import java.{lang => jl}
import org.geotools.filter.FunctionExpressionImpl
import org.geotools.filter.capability.FunctionNameImpl
import org.geotools.filter.capability.FunctionNameImpl._
import org.opengis.feature.simple.SimpleFeature
class DateToLong
extends FunctionExpressionImpl(
new FunctionNameImpl(
"dateToLong",
classOf[java.lang.Long],
parameter("dtg", classOf[java.util.Date])
)
) {
def evaluate(feature: SimpleFeature): jl.Object =
jl.Long.valueOf(getExpression(0).evaluate(feature).asInstanceOf[java.util.Date].getTime)
override def evaluate(o: jl.Object): jl.Object =
jl.Long.valueOf(getExpression(0).evaluate(o).asInstanceOf[java.util.Date].getTime)
}
| apache-2.0 |
nvoron23/chorus | app/controllers/workspace_search_controller.rb | 255 | class WorkspaceSearchController < ApplicationController
before_filter :require_full_search
def show
workspace = Workspace.find(params[:workspace_id])
authorize! :show, workspace
present WorkspaceSearch.new(current_user, params)
end
end
| apache-2.0 |
mvp/presto | presto-iceberg/src/main/java/com/facebook/presto/iceberg/PartitionFields.java | 4926 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.iceberg;
import org.apache.iceberg.PartitionField;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Schema;
import java.util.List;
import java.util.function.Consumer;
import java.util.regex.MatchResult;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static com.google.common.collect.ImmutableList.toImmutableList;
import static java.lang.Integer.parseInt;
import static java.lang.String.format;
public final class PartitionFields
{
private static final String NAME = "[a-z_][a-z0-9_]*";
private static final String FUNCTION_NAME = "\\((" + NAME + ")\\)";
private static final String FUNCTION_NAME_INT = "\\((" + NAME + "), *(\\d+)\\)";
private static final Pattern IDENTITY_PATTERN = Pattern.compile(NAME);
private static final Pattern YEAR_PATTERN = Pattern.compile("year" + FUNCTION_NAME);
private static final Pattern MONTH_PATTERN = Pattern.compile("month" + FUNCTION_NAME);
private static final Pattern DAY_PATTERN = Pattern.compile("day" + FUNCTION_NAME);
private static final Pattern HOUR_PATTERN = Pattern.compile("hour" + FUNCTION_NAME);
private static final Pattern BUCKET_PATTERN = Pattern.compile("bucket" + FUNCTION_NAME_INT);
private static final Pattern TRUNCATE_PATTERN = Pattern.compile("truncate" + FUNCTION_NAME_INT);
private static final Pattern ICEBERG_BUCKET_PATTERN = Pattern.compile("bucket\\[(\\d+)]");
private static final Pattern ICEBERG_TRUNCATE_PATTERN = Pattern.compile("truncate\\[(\\d+)]");
private PartitionFields() {}
public static PartitionSpec parsePartitionFields(Schema schema, List<String> fields)
{
PartitionSpec.Builder builder = PartitionSpec.builderFor(schema);
for (String field : fields) {
parsePartitionField(builder, field);
}
return builder.build();
}
public static void parsePartitionField(PartitionSpec.Builder builder, String field)
{
@SuppressWarnings("PointlessBooleanExpression")
boolean matched = false ||
tryMatch(field, IDENTITY_PATTERN, match -> builder.identity(match.group())) ||
tryMatch(field, YEAR_PATTERN, match -> builder.year(match.group(1))) ||
tryMatch(field, MONTH_PATTERN, match -> builder.month(match.group(1))) ||
tryMatch(field, DAY_PATTERN, match -> builder.day(match.group(1))) ||
tryMatch(field, HOUR_PATTERN, match -> builder.hour(match.group(1))) ||
tryMatch(field, BUCKET_PATTERN, match -> builder.bucket(match.group(1), parseInt(match.group(2)))) ||
tryMatch(field, TRUNCATE_PATTERN, match -> builder.truncate(match.group(1), parseInt(match.group(2))));
if (!matched) {
throw new IllegalArgumentException("Invalid partition field declaration: " + field);
}
}
private static boolean tryMatch(CharSequence value, Pattern pattern, Consumer<MatchResult> match)
{
Matcher matcher = pattern.matcher(value);
if (matcher.matches()) {
match.accept(matcher.toMatchResult());
return true;
}
return false;
}
public static List<String> toPartitionFields(PartitionSpec spec)
{
return spec.fields().stream()
.map(field -> toPartitionField(spec, field))
.collect(toImmutableList());
}
private static String toPartitionField(PartitionSpec spec, PartitionField field)
{
String name = spec.schema().findColumnName(field.sourceId());
String transform = field.transform().toString();
switch (transform) {
case "identity":
return name;
case "year":
case "month":
case "day":
case "hour":
return format("%s(%s)", transform, name);
}
Matcher matcher = ICEBERG_BUCKET_PATTERN.matcher(transform);
if (matcher.matches()) {
return format("bucket(%s, %s)", name, matcher.group(1));
}
matcher = ICEBERG_TRUNCATE_PATTERN.matcher(transform);
if (matcher.matches()) {
return format("truncate(%s, %s)", name, matcher.group(1));
}
throw new UnsupportedOperationException("Unsupported partition transform: " + field);
}
}
| apache-2.0 |
petracvv/cas | docs/cas-server-documentation/installation/JDBC-Drivers.md | 2981 | ---
layout: default
title: CAS - JDBC Drivers
---
# JDBC Drivers
While in most cases this is unnecessary and handled by CAS automatically,
you may need to also include the following module to account for various database drivers:
```xml
<dependency>
<groupId>org.apereo.cas</groupId>
<artifactId>cas-server-support-jdbc-drivers</artifactId>
<version>${cas.version}</version>
</dependency>
```
## Database Support
Automatic support for drivers includes the following databases.
All other drivers need to be manually added to the build configuration.
To see the relevant list of CAS properties, please [review this guide](../installation/Configuration-Properties.html#hibernate--jdbc).
### HSQLDB
Available drivers are:
1. `org.hsqldb.jdbcDriver`
| Dialects
|-------------------------------------
| `org.hibernate.dialect.HSQLDialect`
### Oracle
Note that the Oracle database driver needs to
be [manually installed](http://www.oracle.com/technetwork/database/features/jdbc/index-091264.html)
before the above configuration can take effect. Depending on the driver version, the actual name
of the driver class may vary.
| Dialects
|-------------------------------------
| `org.hibernate.dialect.Oracle8iDialect`
| `org.hibernate.dialect.Oracle9iDialect`
| `org.hibernate.dialect.Oracle10gDialect`
| `org.hibernate.dialect.Oracle12cDialect`
### MYSQL
Available drivers are:
1. `com.mysql.jdbc.Driver`
2. `com.mysql.cj.jdbc.Driver`
| Dialects
|-------------------------------------------------
| `org.hibernate.dialect.MySQLDialect`
| `org.hibernate.dialect.MySQL5Dialect`
| `org.hibernate.dialect.MySQLInnoDBDialect`
| `org.hibernate.dialect.MySQLMyISAMDialect`
| `org.hibernate.dialect.MySQL5InnoDBDialect`
| `org.hibernate.dialect.MySQL57InnoDBDialect`
### PostgreSQL
Available drivers are:
1. `org.postgresql.Driver`
| Dialects
|------------------------------------------------
| `org.hibernate.dialect.PostgreSQL81Dialect`
| `org.hibernate.dialect.PostgreSQL82Dialect`
| `org.hibernate.dialect.PostgreSQL9Dialect`
| `org.hibernate.dialect.PostgreSQL91Dialect`
| `org.hibernate.dialect.PostgreSQL92Dialect`
| `org.hibernate.dialect.PostgreSQL93Dialect`
| `org.hibernate.dialect.PostgreSQL94Dialect`
| `org.hibernate.dialect.PostgreSQL95Dialect`
### MariaDB
Available drivers are:
1. `org.mariadb.jdbc.Driver`
| Dialects
|------------------------------------------------
| `org.hibernate.dialect.MariaDBDialect`
| `org.hibernate.dialect.MariaDBDialect`
### Microsoft SQL Server (JTDS)
Available drivers are:
1. `net.sourceforge.jtds.jdbc.Driver`
| Dialects
|------------------------------------------------
| `org.hibernate.dialect.SQLServerDialect`
| `org.hibernate.dialect.SQLServer2005Dialect`
| `org.hibernate.dialect.SQLServer2008Dialect`
| `org.hibernate.dialect.SQLServer2012Dialect`
| apache-2.0 |
caiomsouza/spark-notebook | project/Shared.scala | 4816 | import Dependencies._
import sbt.Keys._
import sbt._
object Shared {
lazy val sparkVersion = SettingKey[String]("x-spark-version")
lazy val hadoopVersion = SettingKey[String]("x-hadoop-version")
lazy val jets3tVersion = SettingKey[String]("x-jets3t-version")
lazy val jlineDef = SettingKey[(String, String)]("x-jline-def")
lazy val withHive = SettingKey[Boolean]("x-with-hive")
lazy val withParquet = SettingKey[Boolean]("x-with-parquet")
lazy val sharedSettings: Seq[Def.Setting[_]] = Seq(
scalaVersion := defaultScalaVersion,
sparkVersion := defaultSparkVersion,
hadoopVersion := defaultHadoopVersion,
jets3tVersion := defaultJets3tVersion,
jlineDef := (if (defaultScalaVersion.startsWith("2.10")) {
("org.scala-lang", defaultScalaVersion)
} else {
("jline", "2.12")
}),
withHive := defaultWithHive,
withParquet := defaultWithParquet,
libraryDependencies += guava
)
val wispSettings: Seq[Def.Setting[_]] = Seq(
libraryDependencies += wispDepSumac,
unmanagedJars in Compile ++= (
if (scalaVersion.value.startsWith("2.10"))
Seq((baseDirectory in "sparkNotebook").value / "temp" / "wisp_2.10-0.0.5.jar")
else
Seq((baseDirectory in "sparkNotebook").value / "temp" / "wisp_2.11-0.0.5.jar")
)
)
val repl: Seq[Def.Setting[_]] = {
val lib = libraryDependencies <++= (sparkVersion, hadoopVersion, jets3tVersion) {
(sv, hv, jv) => if (sv != "1.2.0") Seq(sparkRepl(sv)) else Seq.empty
}
val unmanaged = unmanagedJars in Compile ++= (
if (sparkVersion.value == "1.2.0" && !scalaVersion.value.startsWith("2.11"))
Seq((baseDirectory in "sparkNotebook").value / "temp/spark-repl_2.10-1.2.0-notebook.jar")
else
Seq.empty
)
val repos = resolvers <++= sparkVersion { (sv) =>
if (sv == "1.2.0") {
Seq("Resolver for spark-yarn 1.2.0" at "https://github.com/adatao/mvnrepos/raw/master/releases") // spark-yarn 1.2.0 is not released
} else {
Nil
}
}
lib ++ unmanaged ++ repos
}
val hive: Seq[Def.Setting[_]] = Seq(
libraryDependencies <++= (withHive, sparkVersion) { (wh, sv) =>
if (wh) List(sparkHive(sv)) else Nil
}
)
val yarnWebProxy: Seq[Def.Setting[_]] = Seq(
libraryDependencies <++= (hadoopVersion) { (hv) =>
if (!hv.startsWith("1")) List(yarnProxy(hv)) else Nil
}
)
lazy val sparkSettings: Seq[Def.Setting[_]] = Seq(
libraryDependencies <++= (scalaVersion, sparkVersion, hadoopVersion, jets3tVersion) { (v, sv, hv, jv) =>
val jets3tVersion = sys.props.get("jets3t.version") match {
case Some(jv) => jets3t(Some(jv), None)
case _ => jets3t(None, Some(hv))
}
val jettyVersion = "8.1.14.v20131031"
val libs = Seq(
breeze,
sparkCore(sv),
sparkYarn(sv),
sparkSQL(sv),
hadoopClient(hv),
jets3tVersion,
commonsCodec
) ++
(
if (!v.startsWith("2.10")) {
// in 2.11
//Boot.scala → HttpServer → eclipse
// eclipse → provided boohooo :'-(
Seq(
"org.eclipse.jetty" % "jetty-http" % jettyVersion,
"org.eclipse.jetty" % "jetty-continuation" % jettyVersion,
"org.eclipse.jetty" % "jetty-servlet" % jettyVersion,
"org.eclipse.jetty" % "jetty-util" % jettyVersion,
"org.eclipse.jetty" % "jetty-security" % jettyVersion,
"org.eclipse.jetty" % "jetty-plus" % jettyVersion,
"org.eclipse.jetty" % "jetty-server" % jettyVersion
)
} else Nil
)
libs
}
) ++ repl ++ hive ++ yarnWebProxy
lazy val tachyonSettings: Seq[Def.Setting[_]] = {
def tachyonVersion(
sv: String) = sv.takeWhile(_ != '-' /*get rid of -SNAPSHOT, -RC or whatever*/).split("\\.").toList.map(_.toInt) match {
case List(1, y, z) if y <= 3 => "0.5.0"
case List(1, y, z) => "0.6.4"
case _ => throw new IllegalArgumentException("Bad spark version for tachyon: " + sv)
}
Seq(
unmanagedSourceDirectories in Compile <+= (sparkVersion, sourceDirectory in Compile) {
(sv, sd) => sd / ("tachyon_" + tachyonVersion(sv))
},
libraryDependencies <++= sparkVersion { sv => Seq(
"org.tachyonproject" % "tachyon" % tachyonVersion(sv) excludeAll ExclusionRule("org.jboss.netty", "netty"),
"org.tachyonproject" % "tachyon-client" % tachyonVersion(sv) excludeAll ExclusionRule("org.jboss.netty", "netty"),
"org.tachyonproject" % "tachyon" % tachyonVersion(sv) classifier "tests" excludeAll ExclusionRule("org.jboss.netty", "netty")
)
}
)
}
} | apache-2.0 |
gstevey/gradle | subprojects/platform-play/src/main/java/org/gradle/play/internal/run/PlayRunSpec.java | 1031 | /*
* Copyright 2014 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.play.internal.run;
import org.gradle.api.tasks.compile.BaseForkOptions;
import java.io.File;
public interface PlayRunSpec {
BaseForkOptions getForkOptions();
Iterable<File> getClasspath();
Iterable<File> getChangingClasspath();
File getApplicationJar();
File getAssetsJar();
Iterable<File> getAssetsDirs();
File getProjectPath();
int getHttpPort();
}
| apache-2.0 |
lkabongoVC/box-python-sdk | demo/example.py | 10222 | # coding: utf-8
from __future__ import print_function, unicode_literals
import os
from boxsdk import Client
from boxsdk.exception import BoxAPIException
from boxsdk.object.collaboration import CollaborationRole
from auth import authenticate
def run_user_example(client):
# 'me' is a handy value to get info on the current authenticated user.
me = client.user(user_id='me').get(fields=['login'])
print('The email of the user is: {0}'.format(me['login']))
def run_folder_examples(client):
root_folder = client.folder(folder_id='0').get()
print('The root folder is owned by: {0}'.format(root_folder.owned_by['login']))
items = root_folder.get_items(limit=100, offset=0)
print('This is the first 100 items in the root folder:')
for item in items:
print(" " + item.name)
def run_collab_examples(client):
root_folder = client.folder(folder_id='0')
collab_folder = root_folder.create_subfolder('collab folder')
try:
print('Folder {0} created'.format(collab_folder.get()['name']))
collaboration = collab_folder.add_collaborator('[email protected]', CollaborationRole.VIEWER)
print('Created a collaboration')
try:
modified_collaboration = collaboration.update_info(role=CollaborationRole.EDITOR)
print('Modified a collaboration: {0}'.format(modified_collaboration.role))
finally:
collaboration.delete()
print('Deleted a collaboration')
finally:
# Clean up
print('Delete folder collab folder succeeded: {0}'.format(collab_folder.delete()))
def rename_folder(client):
root_folder = client.folder(folder_id='0')
foo = root_folder.create_subfolder('foo')
try:
print('Folder {0} created'.format(foo.get()['name']))
bar = foo.rename('bar')
print('Renamed to {0}'.format(bar.get()['name']))
finally:
print('Delete folder bar succeeded: {0}'.format(foo.delete()))
def get_folder_shared_link(client):
root_folder = client.folder(folder_id='0')
collab_folder = root_folder.create_subfolder('shared link folder')
try:
print('Folder {0} created'.format(collab_folder.get().name))
shared_link = collab_folder.get_shared_link()
print('Got shared link:' + shared_link)
finally:
print('Delete folder collab folder succeeded: {0}'.format(collab_folder.delete()))
def upload_file(client):
root_folder = client.folder(folder_id='0')
file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'file.txt')
a_file = root_folder.upload(file_path, file_name='i-am-a-file.txt')
try:
print('{0} uploaded: '.format(a_file.get()['name']))
finally:
print('Delete i-am-a-file.txt succeeded: {0}'.format(a_file.delete()))
def upload_accelerator(client):
root_folder = client.folder(folder_id='0')
file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'file.txt')
a_file = root_folder.upload(file_path, file_name='i-am-a-file.txt', upload_using_accelerator=True)
try:
print('{0} uploaded via Accelerator: '.format(a_file.get()['name']))
file_v2_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'file_v2.txt')
a_file = a_file.update_contents(file_v2_path, upload_using_accelerator=True)
print('{0} updated via Accelerator: '.format(a_file.get()['name']))
finally:
print('Delete i-am-a-file.txt succeeded: {0}'.format(a_file.delete()))
def rename_file(client):
root_folder = client.folder(folder_id='0')
file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'file.txt')
foo = root_folder.upload(file_path, file_name='foo.txt')
try:
print('{0} uploaded '.format(foo.get()['name']))
bar = foo.rename('bar.txt')
print('Rename succeeded: {0}'.format(bool(bar)))
finally:
foo.delete()
def update_file(client):
root_folder = client.folder(folder_id='0')
file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'file.txt')
file_v1 = root_folder.upload(file_path, file_name='file_v1.txt')
try:
# print 'File content after upload: {}'.format(file_v1.content())
file_v2_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'file_v2.txt')
file_v2 = file_v1.update_contents(file_v2_path)
# print 'File content after update: {}'.format(file_v2.content())
finally:
file_v1.delete()
def search_files(client):
search_results = client.search(
'i-am-a-file.txt',
limit=2,
offset=0,
ancestor_folders=[client.folder(folder_id='0')],
file_extensions=['txt'],
)
for item in search_results:
item_with_name = item.get(fields=['name'])
print('matching item: ' + item_with_name.id)
else:
print('no matching items')
def copy_item(client):
root_folder = client.folder(folder_id='0')
file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'file.txt')
a_file = root_folder.upload(file_path, file_name='a file.txt')
try:
subfolder1 = root_folder.create_subfolder('copy_sub')
try:
a_file.copy(subfolder1)
print(subfolder1.get_items(limit=10, offset=0))
subfolder2 = root_folder.create_subfolder('copy_sub2')
try:
subfolder1.copy(subfolder2)
print(subfolder2.get_items(limit=10, offset=0))
finally:
subfolder2.delete()
finally:
subfolder1.delete()
finally:
a_file.delete()
def move_item(client):
root_folder = client.folder(folder_id='0')
file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'file.txt')
a_file = root_folder.upload(file_path, file_name='a file.txt')
try:
subfolder1 = root_folder.create_subfolder('move_sub')
try:
a_file.move(subfolder1)
print(subfolder1.get_items(limit=10, offset=0))
subfolder2 = root_folder.create_subfolder('move_sub2')
try:
subfolder1.move(subfolder2)
print(subfolder2.get_items(limit=10, offset=0))
finally:
subfolder2.delete()
finally:
try:
subfolder1.delete()
except BoxAPIException:
pass
finally:
try:
a_file.delete()
except BoxAPIException:
pass
def get_events(client):
print(client.events().get_events(limit=100, stream_position='now'))
def get_latest_stream_position(client):
print(client.events().get_latest_stream_position())
def long_poll(client):
print(client.events().long_poll())
def _delete_leftover_group(existing_groups, group_name):
"""
delete group if it already exists
"""
existing_group = next((g for g in existing_groups if g.name == group_name), None)
if existing_group:
existing_group.delete()
def run_groups_example(client):
"""
Shows how to interact with 'Groups' in the Box API. How to:
- Get info about all the Groups to which the current user belongs
- Create a Group
- Rename a Group
- Add a member to the group
- Remove a member from a group
- Delete a Group
"""
try:
# First delete group if it already exists
original_groups = client.groups()
_delete_leftover_group(original_groups, 'box_sdk_demo_group')
_delete_leftover_group(original_groups, 'renamed_box_sdk_demo_group')
new_group = client.create_group('box_sdk_demo_group')
except BoxAPIException as ex:
if ex.status != 403:
raise
print('The authenticated user does not have permissions to manage groups. Skipping the test of this demo.')
return
print('New group:', new_group.name, new_group.id)
new_group = new_group.update_info({'name': 'renamed_box_sdk_demo_group'})
print("Group's new name:", new_group.name)
me_dict = client.user().get(fields=['login'])
me = client.user(user_id=me_dict['id'])
group_membership = new_group.add_member(me, 'member')
members = list(new_group.membership())
print('The group has a membership of: ', len(members))
print('The id of that membership: ', group_membership.object_id)
group_membership.delete()
print('After deleting that membership, the group has a membership of: ', len(list(new_group.membership())))
new_group.delete()
groups_after_deleting_demo = client.groups()
has_been_deleted = not any(g.name == 'renamed_box_sdk_demo_group' for g in groups_after_deleting_demo)
print('The new group has been deleted: ', has_been_deleted)
def run_metadata_example(client):
root_folder = client.folder(folder_id='0')
file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'file.txt')
foo = root_folder.upload(file_path, file_name='foo.txt')
print('{0} uploaded '.format(foo.get()['name']))
try:
metadata = foo.metadata()
metadata.create({'foo': 'bar'})
print('Created metadata: {0}'.format(metadata.get()))
update = metadata.start_update()
update.update('/foo', 'baz', 'bar')
print('Updated metadata: {0}'.format(metadata.update(update)))
finally:
foo.delete()
def run_examples(oauth):
client = Client(oauth)
run_user_example(client)
run_folder_examples(client)
run_collab_examples(client)
rename_folder(client)
get_folder_shared_link(client)
upload_file(client)
rename_file(client)
update_file(client)
search_files(client)
copy_item(client)
move_item(client)
get_events(client)
get_latest_stream_position(client)
# long_poll(client)
# Enterprise accounts only
run_groups_example(client)
run_metadata_example(client)
# Premium Apps only
upload_accelerator(client)
def main():
# Please notice that you need to put in your client id and client secret in demo/auth.py in order to make this work.
oauth = authenticate()
run_examples(oauth)
os._exit(0)
if __name__ == '__main__':
main()
| apache-2.0 |
android-ia/platform_tools_idea | java/java-tests/testData/codeInsight/daemonCodeAnalyzer/lambda/methodRef/ConstructorAssignability.java | 978 | class Foo<R> {
public interface Factory<U> {
U make();
}
interface ASink<R, K extends ASink<R, K>> {
public void combine(K other);
}
static <R, S extends ASink<R, S>> R reduce(Factory<S> factory) {
return null;
}
public void foo() {
reduce(Moo::new);
reduce<error descr="'reduce(Foo.Factory<Foo.ASink>)' in 'Foo' cannot be applied to '(<method reference>)'">(AMoo::new)</error>;
reduce(AAMoo::new);
reduce(AAAMoo::new);
}
private class Moo implements ASink<R, Moo> {
@Override
public void combine(Moo other) {
}
}
private class AMoo {
}
private class AAMoo implements ASink<AAMoo, AAMoo> {
@Override
public void combine(AAMoo other) {
}
}
private class AAAMoo implements ASink<R, AAAMoo> {
private AAAMoo() {
}
@Override
public void combine(AAAMoo other) {
}
}
} | apache-2.0 |
jatin9896/incubator-carbondata | processing/src/main/java/org/apache/carbondata/processing/loading/converter/FieldConverter.java | 1743 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.processing.loading.converter;
import org.apache.carbondata.core.datastore.row.CarbonRow;
import org.apache.carbondata.processing.loading.exception.CarbonDataLoadingException;
/**
* This interface converts/transforms the column field.
*/
public interface FieldConverter {
/**
* It converts the column field and updates the data in same location/index in row.
* @param row
* @return the status whether it could be loaded or not, usually when record is added
* to bad records then it returns false.
* @throws CarbonDataLoadingException
*/
void convert(CarbonRow row, BadRecordLogHolder logHolder) throws CarbonDataLoadingException;
/**
* It convert the literal value to carbon internal value
*/
Object convert(Object value, BadRecordLogHolder logHolder) throws RuntimeException;
/**
* This method clears all the dictionary caches being acquired.
*/
void clear();
}
| apache-2.0 |
EArdeleanu/gateway | transport/wsr/src/main/java/org/kaazing/gateway/transport/wsr/RtmpProtocolDispatcher.java | 1627 | /**
* Copyright (c) 2007-2014 Kaazing Corporation. All rights reserved.
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.kaazing.gateway.transport.wsr;
import static java.util.Collections.singleton;
import java.util.Collection;
import org.kaazing.gateway.transport.dispatch.ProtocolDispatcher;
class RtmpProtocolDispatcher implements ProtocolDispatcher {
private static final String RTMP_PROTOCOL = "rtmp/1.0";
private static final Collection<byte[]> RTMP_DISCRIMINATORS = singleton(new byte[] { 0x03 });
@Override
public int compareTo(ProtocolDispatcher pd) {
return protocolDispatchComparator.compare(this, pd);
}
@Override
public String getProtocolName() {
return RTMP_PROTOCOL;
}
@Override
public Collection<byte[]> getDiscriminators() {
return RTMP_DISCRIMINATORS;
}
}
| apache-2.0 |
Esri/ArcREST | samples/publishingGeoJSON.py | 864 | """
GeoJSON example using addItem
Python 2/3
ArcREST version 3.5.0
"""
from __future__ import print_function
import arcrest
if __name__ == "__main__":
username = ""
password = ""
geojsonFile = r""
sh = arcrest.AGOLTokenSecurityHandler(username, password)
admin = arcrest.manageorg.Administration(securityHandler=sh)
user = admin.content.users.user()
ip = arcrest.manageorg.ItemParameter()
ip.title = "MyGeoJSONTestFile"
ip.type = "GeoJson"
ip.tags = "Geo1,Geo2"
ip.description = "Publishing a geojson file"
addedItem = user.addItem(itemParameters=ip, filePath=geojsonFile)
itemId = addedItem.id
pp = arcrest.manageorg.PublishGeoJSONParameter()
pp.name = "Geojsonrocks"
pp.hasStaticData = True
print( user.publishItem(fileType="geojson", publishParameters=pp, itemId=itemId, wait=True)) | apache-2.0 |
fbasso/ariatemplates | test/aria/widgets/container/tab/focusTab/FocusTabTestCase.js | 1249 | /*
* Copyright 2017 Amadeus s.a.s.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
Aria.classDefinition({
$classpath : "test.aria.widgets.container.tab.focusTab.FocusTabTestCase",
$extends : "aria.jsunit.TemplateTestCase",
$prototype : {
runTemplateTest : function () {
this.templateCtxt._tpl.$focus("summaryTab");
var domElt = this.getElementById("summaryTab");
var anchor = domElt.getElementsByTagName("a")[0];
this.waitForDomEltFocus(anchor, function () {
this.templateCtxt._tpl.$focus("mapTab");
var span = this.getElementById("mapTab");
this.waitForDomEltFocus(span, this.end());
});
}
}
});
| apache-2.0 |
wiltonlazary/arangodb | 3rdParty/immer/v0.7.0/test/set/B3.cpp | 529 | //
// immer: immutable data structures for C++
// Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente
//
// This software is distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt
//
#include <immer/set.hpp>
template <typename T,
typename Hash = std::hash<T>,
typename Eq = std::equal_to<T>>
using test_set_t = immer::set<T, Hash, Eq, immer::default_memory_policy, 3u>;
#define SET_T test_set_t
#include "generic.ipp"
| apache-2.0 |
siconos/siconos-deb | externals/numeric_bindings/libs/numeric/bindings/doc/html/boost_numeric_bindings/reference/blas/level_2_blas/spmv.html | 11836 | <html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=US-ASCII">
<title>spmv</title>
<link rel="stylesheet" href="../../../../boostbook.css" type="text/css">
<meta name="generator" content="DocBook XSL Stylesheets V1.75.2">
<link rel="home" href="../../../../index.html" title="Chapter 1. Boost.Numeric_Bindings">
<link rel="up" href="../level_2_blas.html" title="Level 2 BLAS">
<link rel="prev" href="sbmv.html" title="sbmv">
<link rel="next" href="spr2.html" title="spr2">
</head>
<body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF">
<table cellpadding="2" width="100%"><tr><td valign="top"></td></tr></table>
<hr>
<div class="spirit-nav">
<a accesskey="p" href="sbmv.html"><img src="../../../../images/prev.png" alt="Prev"></a><a accesskey="u" href="../level_2_blas.html"><img src="../../../../images/up.png" alt="Up"></a><a accesskey="h" href="../../../../index.html"><img src="../../../../images/home.png" alt="Home"></a><a accesskey="n" href="spr2.html"><img src="../../../../images/next.png" alt="Next"></a>
</div>
<div class="section">
<div class="titlepage"><div><div><h5 class="title">
<a name="boost_numeric_bindings.reference.blas.level_2_blas.spmv"></a><a class="link" href="spmv.html" title="spmv">spmv</a>
</h5></div></div></div>
<a name="boost_numeric_bindings.reference.blas.level_2_blas.spmv.prototype"></a><h6>
<a name="id777142"></a>
<a class="link" href="spmv.html#boost_numeric_bindings.reference.blas.level_2_blas.spmv.prototype">Prototype</a>
</h6>
<p>
There is one prototype of <code class="computeroutput"><span class="identifier">spmv</span></code>
available, please see below.
</p>
<pre class="programlisting"><span class="identifier">spmv</span><span class="special">(</span> <span class="keyword">const</span> <span class="identifier">Scalar</span> <span class="special">>,</span> <span class="keyword">const</span> <span class="identifier">MatrixAP</span><span class="special">&</span> <span class="identifier">ap</span><span class="special">,</span> <span class="keyword">const</span> <span class="identifier">VectorX</span><span class="special">&</span> <span class="identifier">x</span><span class="special">,</span>
<span class="keyword">const</span> <span class="identifier">Scalar</span> <span class="special">>,</span> <span class="identifier">VectorY</span><span class="special">&</span> <span class="identifier">y</span> <span class="special">);</span>
</pre>
<p>
</p>
<a name="boost_numeric_bindings.reference.blas.level_2_blas.spmv.description"></a><h6>
<a name="id777321"></a>
<a class="link" href="spmv.html#boost_numeric_bindings.reference.blas.level_2_blas.spmv.description">Description</a>
</h6>
<p>
<code class="computeroutput"><span class="identifier">spmv</span></code> (short for symmetric,
packed, matrix-vector operation) provides a C++ interface to BLAS routines
SSPMV and DSPMV. <code class="computeroutput"><span class="identifier">spmv</span></code>
performs the matrix-vector operation
</p>
<p>
y := alpha*A*x + beta*y,
</p>
<p>
where alpha and beta are scalars, x and y are n element vectors and A
is an n by n symmetric matrix, supplied in packed form.
</p>
<p>
The selection of the BLAS routine is done during compile-time, and is
determined by the type of values contained in type <code class="computeroutput"><span class="identifier">MatrixAP</span></code>.
The type of values is obtained through the <code class="computeroutput"><span class="identifier">value_type</span></code>
meta-function <code class="computeroutput"><span class="keyword">typename</span> <span class="identifier">value_type</span><span class="special"><</span><span class="identifier">MatrixAP</span><span class="special">>::</span><span class="identifier">type</span></code>. Table X below illustrates to
which specific routine this dispatching will take place.
</p>
<div class="table">
<a name="boost_numeric_bindings.reference.blas.level_2_blas.spmv.dispatching_of_spmv_"></a><p class="title"><b>Table 1.58. Dispatching of spmv.</b></p>
<div class="table-contents"><table class="table" summary="Dispatching of spmv.">
<colgroup>
<col>
<col>
<col>
<col>
</colgroup>
<thead><tr>
<th>
<p>
Value type of MatrixAP
</p>
</th>
<th>
<p>
BLAS routine
</p>
</th>
<th>
<p>
CBLAS routine
</p>
</th>
<th>
<p>
CUBLAS routine
</p>
</th>
</tr></thead>
<tbody>
<tr>
<td>
<p>
<code class="computeroutput"><span class="keyword">float</span></code>
</p>
</td>
<td>
<p>
SSPMV
</p>
</td>
<td>
<p>
cblas_sspmv
</p>
</td>
<td>
<p>
cublasSspmv
</p>
</td>
</tr>
<tr>
<td>
<p>
<code class="computeroutput"><span class="keyword">double</span></code>
</p>
</td>
<td>
<p>
DSPMV
</p>
</td>
<td>
<p>
cblas_dspmv
</p>
</td>
<td>
<p>
Unavailable
</p>
</td>
</tr>
</tbody>
</table></div>
</div>
<br class="table-break"><p>
The original routines SSPMV and DSPMV have nine arguments, whereas <code class="computeroutput"><span class="identifier">spmv</span></code> requires five arguments.
</p>
<div class="table">
<a name="boost_numeric_bindings.reference.blas.level_2_blas.spmv.deduction_of_arguments_of_spmv_"></a><p class="title"><b>Table 1.59. Deduction of arguments of spmv.</b></p>
<div class="table-contents"><table class="table" summary="Deduction of arguments of spmv.">
<colgroup></colgroup>
<tbody></tbody>
</table></div>
</div>
<br class="table-break"><a name="boost_numeric_bindings.reference.blas.level_2_blas.spmv.definition"></a><h6>
<a name="id777664"></a>
<a class="link" href="spmv.html#boost_numeric_bindings.reference.blas.level_2_blas.spmv.definition">Definition</a>
</h6>
<p>
Defined in header <code class="computeroutput">boost/numeric/bindings/blas/level2/spmv.hpp</code>.
</p>
<a name="boost_numeric_bindings.reference.blas.level_2_blas.spmv.parameters_or_requirements_on_types"></a><h6>
<a name="id777703"></a>
<a class="link" href="spmv.html#boost_numeric_bindings.reference.blas.level_2_blas.spmv.parameters_or_requirements_on_types">Parameters
or Requirements on Types</a>
</h6>
<div class="variablelist">
<p class="title"><b>Parameters</b></p>
<dl>
<dt><span class="term">MatrixA</span></dt>
<dd><p>
The definition of term 1
</p></dd>
<dt><span class="term">MatrixB</span></dt>
<dd><p>
The definition of term 2
</p></dd>
<dt><span class="term">MatrixC</span></dt>
<dd>
<p>
The definition of term 3.
</p>
<p>
Definitions may contain paragraphs.
</p>
</dd>
</dl>
</div>
<a name="boost_numeric_bindings.reference.blas.level_2_blas.spmv.complexity"></a><h6>
<a name="id777789"></a>
<a class="link" href="spmv.html#boost_numeric_bindings.reference.blas.level_2_blas.spmv.complexity">Complexity</a>
</h6>
<a name="boost_numeric_bindings.reference.blas.level_2_blas.spmv.example"></a><h6>
<a name="id777814"></a>
<a class="link" href="spmv.html#boost_numeric_bindings.reference.blas.level_2_blas.spmv.example">Example</a>
</h6>
<p>
</p>
<pre class="programlisting"><span class="preprocessor">#include</span> <span class="special"><</span><span class="identifier">boost</span><span class="special">/</span><span class="identifier">numeric</span><span class="special">/</span><span class="identifier">bindings</span><span class="special">/</span><span class="identifier">blas</span><span class="special">/</span><span class="identifier">level2</span><span class="special">/</span><span class="identifier">spmv</span><span class="special">.</span><span class="identifier">hpp</span><span class="special">></span>
<span class="keyword">using</span> <span class="keyword">namespace</span> <span class="identifier">boost</span><span class="special">::</span><span class="identifier">numeric</span><span class="special">::</span><span class="identifier">bindings</span><span class="special">;</span>
<span class="identifier">blas</span><span class="special">::</span><span class="identifier">spmv</span><span class="special">(</span> <span class="identifier">x</span><span class="special">,</span> <span class="identifier">y</span><span class="special">,</span> <span class="identifier">z</span> <span class="special">);</span>
</pre>
<p>
</p>
<p>
this will output
</p>
<p>
</p>
<pre class="programlisting"><span class="special">[</span><span class="number">5</span><span class="special">]</span> <span class="number">0</span> <span class="number">1</span> <span class="number">2</span> <span class="number">3</span> <span class="number">4</span> <span class="number">5</span>
</pre>
<p>
</p>
<a name="boost_numeric_bindings.reference.blas.level_2_blas.spmv.notes"></a><h6>
<a name="id778098"></a>
<a class="link" href="spmv.html#boost_numeric_bindings.reference.blas.level_2_blas.spmv.notes">Notes</a>
</h6>
<a name="boost_numeric_bindings.reference.blas.level_2_blas.spmv.see_also"></a><h6>
<a name="id778122"></a>
<a class="link" href="spmv.html#boost_numeric_bindings.reference.blas.level_2_blas.spmv.see_also">See
Also</a>
</h6>
<div class="itemizedlist"><ul class="itemizedlist" type="disc"><li class="listitem">
Originating Fortran source files <a href="http://www.netlib.org/blas/sspmv.f" target="_top">sspmv.f</a>
and <a href="http://www.netlib.org/blas/dspmv.f" target="_top">dspmv.f</a>
at Netlib.
</li></ul></div>
</div>
<table xmlns:rev="http://www.cs.rpi.edu/~gregod/boost/tools/doc/revision" width="100%"><tr>
<td align="left"></td>
<td align="right"><div class="copyright-footer">Copyright © 2002 -2009 Rutger ter Borg, Krešimir Fresl, Thomas Klimpel,
Toon Knapen, Karl Meerbergen<p>
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at <a href="http://www.boost.org/LICENSE_1_0.txt" target="_top">http://www.boost.org/LICENSE_1_0.txt</a>)
</p>
</div></td>
</tr></table>
<hr>
<div class="spirit-nav">
<a accesskey="p" href="sbmv.html"><img src="../../../../images/prev.png" alt="Prev"></a><a accesskey="u" href="../level_2_blas.html"><img src="../../../../images/up.png" alt="Up"></a><a accesskey="h" href="../../../../index.html"><img src="../../../../images/home.png" alt="Home"></a><a accesskey="n" href="spr2.html"><img src="../../../../images/next.png" alt="Next"></a>
</div>
</body>
</html>
| apache-2.0 |
facebook/buck | test/com/facebook/buck/rules/macros/StringWithMacrosConverterTest.java | 5254 | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.buck.rules.macros;
import static org.hamcrest.MatcherAssert.assertThat;
import com.facebook.buck.core.cell.CellPathResolver;
import com.facebook.buck.core.cell.TestCellPathResolver;
import com.facebook.buck.core.model.BuildTarget;
import com.facebook.buck.core.model.BuildTargetFactory;
import com.facebook.buck.core.rules.ActionGraphBuilder;
import com.facebook.buck.core.rules.resolver.impl.TestActionGraphBuilder;
import com.facebook.buck.io.filesystem.impl.FakeProjectFilesystem;
import com.facebook.buck.rules.args.Arg;
import com.facebook.buck.rules.args.CompositeArg;
import com.facebook.buck.rules.args.SanitizedArg;
import com.facebook.buck.rules.args.SourcePathArg;
import com.facebook.buck.rules.args.StringArg;
import com.facebook.buck.rules.args.WriteToFileArg;
import com.facebook.buck.shell.Genrule;
import com.facebook.buck.shell.GenruleBuilder;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import java.util.Optional;
import org.hamcrest.Matchers;
import org.junit.Test;
public class StringWithMacrosConverterTest {
private static final BuildTarget TARGET = BuildTargetFactory.newInstance("//:rule");
private static final CellPathResolver CELL_ROOTS =
TestCellPathResolver.get(new FakeProjectFilesystem());
private static final ImmutableList<MacroExpander<? extends Macro, ?>> MACRO_EXPANDERS =
ImmutableList.of(LocationMacroExpander.INSTANCE);
@Test
public void noMacros() {
ActionGraphBuilder graphBuilder = new TestActionGraphBuilder();
StringWithMacrosConverter converter =
StringWithMacrosConverter.of(
TARGET, CELL_ROOTS.getCellNameResolver(), graphBuilder, MACRO_EXPANDERS);
assertThat(
converter.convert(StringWithMacrosUtils.format("something")),
Matchers.equalTo(StringArg.of("something")));
}
@Test
public void macro() {
ActionGraphBuilder graphBuilder = new TestActionGraphBuilder();
Genrule genrule =
GenruleBuilder.newGenruleBuilder(BuildTargetFactory.newInstance("//:dep"))
.setOut("out")
.build(graphBuilder);
StringWithMacrosConverter converter =
StringWithMacrosConverter.of(
TARGET, CELL_ROOTS.getCellNameResolver(), graphBuilder, MACRO_EXPANDERS);
assertThat(
converter.convert(
StringWithMacrosUtils.format("%s", LocationMacro.of(genrule.getBuildTarget()))),
Matchers.equalTo(
SourcePathArg.of(Preconditions.checkNotNull(genrule.getSourcePathToOutput()))));
}
@Test
public void macroAndString() {
ActionGraphBuilder graphBuilder = new TestActionGraphBuilder();
Genrule genrule =
GenruleBuilder.newGenruleBuilder(BuildTargetFactory.newInstance("//:dep"))
.setOut("out")
.build(graphBuilder);
StringWithMacrosConverter converter =
StringWithMacrosConverter.of(
TARGET, CELL_ROOTS.getCellNameResolver(), graphBuilder, MACRO_EXPANDERS);
assertThat(
converter.convert(
StringWithMacrosUtils.format("--foo=%s", LocationMacro.of(genrule.getBuildTarget()))),
Matchers.equalTo(
CompositeArg.of(
ImmutableList.of(
StringArg.of("--foo="),
SourcePathArg.of(
Preconditions.checkNotNull(genrule.getSourcePathToOutput()))))));
}
@Test
public void sanitization() {
ActionGraphBuilder graphBuilder = new TestActionGraphBuilder();
StringWithMacrosConverter converter =
StringWithMacrosConverter.of(
TARGET,
CELL_ROOTS.getCellNameResolver(),
graphBuilder,
MACRO_EXPANDERS,
Optional.of(s -> "something else"));
assertThat(
converter.convert(StringWithMacrosUtils.format("something")),
Matchers.equalTo(SanitizedArg.create(s -> "something else", "something")));
}
@Test
public void outputToFileMacro() {
ActionGraphBuilder graphBuilder = new TestActionGraphBuilder();
Genrule genrule =
GenruleBuilder.newGenruleBuilder(BuildTargetFactory.newInstance("//:dep"))
.setOut("out")
.build(graphBuilder);
StringWithMacrosConverter converter =
StringWithMacrosConverter.of(
TARGET, CELL_ROOTS.getCellNameResolver(), graphBuilder, MACRO_EXPANDERS);
Arg result =
converter.convert(
StringWithMacrosUtils.format(
"%s", MacroContainer.of(LocationMacro.of(genrule.getBuildTarget()), true)));
assertThat(result, Matchers.instanceOf(WriteToFileArg.class));
}
}
| apache-2.0 |
JFLarvoire/the_silver_searcher | win32/pthread/pthread_attr_setschedparam.c | 1847 | /*
* pthread_attr_setschedparam.c
*
* Description:
* POSIX thread functions that deal with thread scheduling.
*
* --------------------------------------------------------------------------
*
* Pthreads4w - POSIX Threads for Windows
* Copyright 1998 John E. Bossom
* Copyright 1999-2018, Pthreads4w contributors
*
* Homepage: https://sourceforge.net/projects/pthreads4w/
*
* The current list of contributors is contained
* in the file CONTRIBUTORS included with the source
* code distribution. The list can also be seen at the
* following World Wide Web location:
*
* https://sourceforge.net/p/pthreads4w/wiki/Contributors/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef HAVE_CONFIG_H
# include <config.h>
#endif
#include "pthread.h"
#include "implement.h"
#include "sched.h"
int
pthread_attr_setschedparam (pthread_attr_t * attr,
const struct sched_param *param)
{
int priority;
if (__ptw32_is_attr (attr) != 0 || param == NULL)
{
return EINVAL;
}
priority = param->sched_priority;
/* Validate priority level. */
if (priority < sched_get_priority_min (SCHED_OTHER) ||
priority > sched_get_priority_max (SCHED_OTHER))
{
return EINVAL;
}
memcpy (&(*attr)->param, param, sizeof (*param));
return 0;
}
| apache-2.0 |
facebook/buck | src/com/facebook/buck/cxx/CxxPreprocessorInput.java | 3593 | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.buck.cxx;
import com.facebook.buck.core.model.BuildTarget;
import com.facebook.buck.core.rules.BuildRule;
import com.facebook.buck.core.rules.BuildRuleResolver;
import com.facebook.buck.core.rules.common.BuildableSupport;
import com.facebook.buck.core.util.immutables.BuckStyleValueWithBuilder;
import com.facebook.buck.rules.args.Arg;
import com.facebook.buck.rules.coercer.FrameworkPath;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Multimap;
import java.util.Optional;
/** The components that get contributed to a top-level run of the C++ preprocessor. */
@BuckStyleValueWithBuilder
public abstract class CxxPreprocessorInput {
private static final CxxPreprocessorInput INSTANCE =
ImmutableCxxPreprocessorInput.builder().build();
public abstract Multimap<CxxSource.Type, Arg> getPreprocessorFlags();
public abstract ImmutableList<CxxHeaders> getIncludes();
// Framework paths.
public abstract ImmutableSet<FrameworkPath> getFrameworks();
// The build rules which produce headers found in the includes below.
protected abstract ImmutableSet<BuildTarget> getRules();
public Iterable<BuildRule> getDeps(BuildRuleResolver ruleResolver) {
ImmutableList.Builder<BuildRule> builder = ImmutableList.builder();
for (CxxHeaders cxxHeaders : getIncludes()) {
cxxHeaders.getDeps(ruleResolver).forEachOrdered(builder::add);
}
builder.addAll(ruleResolver.getAllRules(getRules()));
for (FrameworkPath frameworkPath : getFrameworks()) {
if (frameworkPath.getSourcePath().isPresent()) {
Optional<BuildRule> frameworkRule =
ruleResolver.getRule(frameworkPath.getSourcePath().get());
if (frameworkRule.isPresent()) {
builder.add(frameworkRule.get());
}
}
}
for (Arg arg : getPreprocessorFlags().values()) {
builder.addAll(BuildableSupport.getDepsCollection(arg, ruleResolver));
}
return builder.build();
}
public static CxxPreprocessorInput concat(Iterable<CxxPreprocessorInput> inputs) {
CxxPreprocessorInput.Builder builder = CxxPreprocessorInput.builder();
for (CxxPreprocessorInput input : inputs) {
builder.putAllPreprocessorFlags(input.getPreprocessorFlags());
builder.addAllIncludes(input.getIncludes());
builder.addAllFrameworks(input.getFrameworks());
builder.addAllRules(input.getRules());
}
return builder.build();
}
public static CxxPreprocessorInput of() {
return INSTANCE;
}
public static Builder builder() {
return new Builder();
}
public static class Builder extends ImmutableCxxPreprocessorInput.Builder {
@Override
public CxxPreprocessorInput build() {
CxxPreprocessorInput cxxPreprocessorInput = super.build();
if (cxxPreprocessorInput.equals(INSTANCE)) {
return INSTANCE;
}
return cxxPreprocessorInput;
}
}
}
| apache-2.0 |
AzureAutomationTeam/azure-powershell | src/ResourceManager/ServiceFabric/Commands.ServiceFabric/help/Add-AzureRmServiceFabricClusterCertificate.md | 6986 | ---
external help file: Microsoft.Azure.Commands.ServiceFabric.dll-Help.xml
Module Name: AzureRM.ServiceFabric
online version: https://docs.microsoft.com/en-us/powershell/module/azurerm.servicefabric/add-azurermservicefabricclustercertificate
schema: 2.0.0
---
# Add-AzureRmServiceFabricClusterCertificate
## SYNOPSIS
Add a secondary cluster certificate to the cluster.
## SYNTAX
### ByExistingKeyVault
```
Add-AzureRmServiceFabricClusterCertificate [-ResourceGroupName] <String> [-Name] <String>
-SecretIdentifier <String> [-DefaultProfile <IAzureContextContainer>] [-WhatIf] [-Confirm]
[<CommonParameters>]
```
### ByNewPfxAndVaultName
```
Add-AzureRmServiceFabricClusterCertificate [-ResourceGroupName] <String> [-Name] <String>
[-KeyVaultResouceGroupName <String>] [-KeyVaultName <String>] [-CertificateOutputFolder <String>]
[-CertificatePassword <SecureString>] -CertificateSubjectName <String>
[-DefaultProfile <IAzureContextContainer>] [-WhatIf] [-Confirm] [<CommonParameters>]
```
### ByExistingPfxAndVaultName
```
Add-AzureRmServiceFabricClusterCertificate [-ResourceGroupName] <String> [-Name] <String>
[-KeyVaultResouceGroupName <String>] [-KeyVaultName <String>] -CertificateFile <String>
[-CertificatePassword <SecureString>] [-DefaultProfile <IAzureContextContainer>] [-WhatIf] [-Confirm]
[<CommonParameters>]
```
## DESCRIPTION
Use **Add-AzureRmServiceFabricClusterCertificate** to add a secondary cluster certificate, either from an existing Azure key vault or creating a new Azure key vault using an existing certificate provided or from a new self-signed certificate created.
It will override the secondary cluster if there is any.
## EXAMPLES
### Example 1
```
Add-AzureRmServiceFabricClusterCertificate -ResourceGroupName 'Group1' -Name 'Contoso01SFCluster'
-SecretIdentifier 'https://contoso03vault.vault.azure.net/secrets/contoso03vaultrg/7f7de9131c034172b9df37ccc549524f'
```
This command will add a certificate in the existing Azure key vault as a secondary cluster certificate.
### Example 2
```
PS c:\> $pwd = ConvertTo-SecureString -String "123" -AsPlainText -Force
PS c:\> add-AzureRmServiceFabricClusterCertificate -ResourceGroupName 'Group2' -Name 'Contoso02SFCluster' -CertificateSubjectName 'Contoso.com'
-CertificateOutputFolder 'c:\test' -CertificatePassword $pwd
```
This command will create a self-signed certificate in the Azure key vault and upgrade the cluster to use it as a secondary cluster certificate.
## PARAMETERS
### -CertificateFile
The existing certificate file path.
```yaml
Type: System.String
Parameter Sets: ByExistingPfxAndVaultName
Aliases: Source
Required: True
Position: Named
Default value: None
Accept pipeline input: True (ByValue)
Accept wildcard characters: False
```
### -CertificateOutputFolder
The folder of the new certificate to be created.
```yaml
Type: System.String
Parameter Sets: ByNewPfxAndVaultName
Aliases: Destination
Required: False
Position: Named
Default value: None
Accept pipeline input: True (ByValue)
Accept wildcard characters: False
```
### -CertificatePassword
The password of the certificate file.
```yaml
Type: System.Security.SecureString
Parameter Sets: ByNewPfxAndVaultName, ByExistingPfxAndVaultName
Aliases: CertPassword
Required: False
Position: Named
Default value: None
Accept pipeline input: True (ByValue)
Accept wildcard characters: False
```
### -CertificateSubjectName
The Dns name of the certificate to be created.
```yaml
Type: System.String
Parameter Sets: ByNewPfxAndVaultName
Aliases: Subject
Required: True
Position: Named
Default value: None
Accept pipeline input: True (ByValue)
Accept wildcard characters: False
```
### -DefaultProfile
The credentials, account, tenant, and subscription used for communication with azure.
```yaml
Type: Microsoft.Azure.Commands.Common.Authentication.Abstractions.IAzureContextContainer
Parameter Sets: (All)
Aliases: AzureRmContext, AzureCredential
Required: False
Position: Named
Default value: None
Accept pipeline input: False
Accept wildcard characters: False
```
### -KeyVaultName
Azure key vault name.
```yaml
Type: System.String
Parameter Sets: ByNewPfxAndVaultName, ByExistingPfxAndVaultName
Aliases:
Required: False
Position: Named
Default value: None
Accept pipeline input: True (ByValue)
Accept wildcard characters: False
```
### -KeyVaultResouceGroupName
Azure key vault resource group name.
```yaml
Type: System.String
Parameter Sets: ByNewPfxAndVaultName, ByExistingPfxAndVaultName
Aliases:
Required: False
Position: Named
Default value: None
Accept pipeline input: True (ByValue)
Accept wildcard characters: False
```
### -Name
Specify the name of the cluster.
```yaml
Type: System.String
Parameter Sets: (All)
Aliases: ClusterName
Required: True
Position: 1
Default value: None
Accept pipeline input: True (ByPropertyName)
Accept wildcard characters: False
```
### -ResourceGroupName
Specifies the name of the resource group.
```yaml
Type: System.String
Parameter Sets: (All)
Aliases:
Required: True
Position: 0
Default value: None
Accept pipeline input: True (ByPropertyName)
Accept wildcard characters: False
```
### -SecretIdentifier
The existing Azure key vault secret Url.
```yaml
Type: System.String
Parameter Sets: ByExistingKeyVault
Aliases:
Required: True
Position: Named
Default value: None
Accept pipeline input: True (ByValue)
Accept wildcard characters: False
```
### -Confirm
Prompts you for confirmation before running the cmdlet.
```yaml
Type: System.Management.Automation.SwitchParameter
Parameter Sets: (All)
Aliases: cf
Required: False
Position: Named
Default value: None
Accept pipeline input: False
Accept wildcard characters: False
```
### -WhatIf
Shows what would happen if the cmdlet runs. The cmdlet is not run.
```yaml
Type: System.Management.Automation.SwitchParameter
Parameter Sets: (All)
Aliases: wi
Required: False
Position: Named
Default value: None
Accept pipeline input: False
Accept wildcard characters: False
```
### CommonParameters
This cmdlet supports the common parameters: -Debug, -ErrorAction, -ErrorVariable, -InformationAction, -InformationVariable, -OutVariable, -OutBuffer, -PipelineVariable, -Verbose, -WarningAction, and -WarningVariable. For more information, see about_CommonParameters (http://go.microsoft.com/fwlink/?LinkID=113216).
## INPUTS
### System.String
Parameters: CertificateFile (ByValue), CertificateOutputFolder (ByValue), CertificateSubjectName (ByValue), KeyVaultName (ByValue), KeyVaultResouceGroupName (ByValue), SecretIdentifier (ByValue)
### System.Security.SecureString
Parameters: CertificatePassword (ByValue)
## OUTPUTS
### Microsoft.Azure.Commands.ServiceFabric.Models.PSCluster
## NOTES
## RELATED LINKS
[Remove-AzureRmServiceFabricClusterCertificate](./Remove-AzureRmServiceFabricClusterCertificate.md)
[New-AzureRmServiceFabricCluster](./New-AzureRmServiceFabricCluster.md)
[Add-AzureRmServiceFabricApplicationCertificate](./Add-AzureRmServiceFabricApplicationCertificate.md)
| apache-2.0 |
akosyakov/intellij-community | xml/xml-psi-impl/src/com/intellij/embedding/MasqueradingPsiBuilderAdapter.java | 11304 | /*
* Copyright 2000-2015 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.embedding;
import com.intellij.lang.ASTNode;
import com.intellij.lang.LighterLazyParseableNode;
import com.intellij.lang.ParserDefinition;
import com.intellij.lang.PsiBuilder;
import com.intellij.lang.impl.DelegateMarker;
import com.intellij.lang.impl.PsiBuilderAdapter;
import com.intellij.lang.impl.PsiBuilderImpl;
import com.intellij.openapi.diagnostic.Logger;
import com.intellij.openapi.project.Project;
import com.intellij.psi.TokenType;
import com.intellij.psi.tree.IElementType;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import java.util.ArrayList;
import java.util.List;
/**
* A delegate PsiBuilder that hides or substitutes some tokens (namely, the ones provided by {@link MasqueradingLexer})
* from a parser, however, _still inserting_ them into a production tree in their initial appearance.
* @see MasqueradingLexer
*/
public class MasqueradingPsiBuilderAdapter extends PsiBuilderAdapter {
private final static Logger LOG = Logger.getInstance(MasqueradingPsiBuilderAdapter.class);
private List<MyShiftedToken> myShrunkSequence;
private CharSequence myShrunkCharSequence;
private int myLexPosition;
private final PsiBuilderImpl myBuilderDelegate;
private final MasqueradingLexer myLexer;
public MasqueradingPsiBuilderAdapter(@NotNull final Project project,
@NotNull final ParserDefinition parserDefinition,
@NotNull final MasqueradingLexer lexer,
@NotNull final ASTNode chameleon,
@NotNull final CharSequence text) {
this(new PsiBuilderImpl(project, parserDefinition, lexer, chameleon, text));
}
public MasqueradingPsiBuilderAdapter(@NotNull final Project project,
@NotNull final ParserDefinition parserDefinition,
@NotNull final MasqueradingLexer lexer,
@NotNull final LighterLazyParseableNode chameleon,
@NotNull final CharSequence text) {
this(new PsiBuilderImpl(project, parserDefinition, lexer, chameleon, text));
}
private MasqueradingPsiBuilderAdapter(PsiBuilderImpl builder) {
super(builder);
LOG.assertTrue(myDelegate instanceof PsiBuilderImpl);
myBuilderDelegate = ((PsiBuilderImpl)myDelegate);
LOG.assertTrue(myBuilderDelegate.getLexer() instanceof MasqueradingLexer);
myLexer = ((MasqueradingLexer)myBuilderDelegate.getLexer());
initShrunkSequence();
}
@Override
public CharSequence getOriginalText() {
return myShrunkCharSequence;
}
@Override
public void advanceLexer() {
myLexPosition++;
skipWhitespace();
synchronizePositions(false);
}
/**
* @param exact if true then positions should be equal;
* else delegate should be behind, not including exactly all foreign (skipped) or whitespace tokens
*/
private void synchronizePositions(boolean exact) {
final PsiBuilder delegate = getDelegate();
if (myLexPosition >= myShrunkSequence.size() || delegate.eof()) {
myLexPosition = myShrunkSequence.size();
while (!delegate.eof()) {
delegate.advanceLexer();
}
return;
}
if (delegate.getCurrentOffset() > myShrunkSequence.get(myLexPosition).realStart) {
LOG.error("delegate is ahead of my builder!");
return;
}
final int keepUpPosition = getKeepUpPosition(exact);
while (!delegate.eof()) {
final int delegatePosition = delegate.getCurrentOffset();
if (delegatePosition < keepUpPosition) {
delegate.advanceLexer();
}
else {
break;
}
}
}
private int getKeepUpPosition(boolean exact) {
if (exact) {
return myShrunkSequence.get(myLexPosition).realStart;
}
int lexPosition = myLexPosition;
while (lexPosition > 0 && (myShrunkSequence.get(lexPosition - 1).shrunkStart == myShrunkSequence.get(lexPosition).shrunkStart
|| isWhiteSpaceOnPos(lexPosition - 1))) {
lexPosition--;
}
if (lexPosition == 0) {
return myShrunkSequence.get(lexPosition).realStart;
}
return myShrunkSequence.get(lexPosition - 1).realStart + 1;
}
@Override
public IElementType lookAhead(int steps) {
if (eof()) { // ensure we skip over whitespace if it's needed
return null;
}
int cur = myLexPosition;
while (steps > 0) {
++cur;
while (cur < myShrunkSequence.size() && isWhiteSpaceOnPos(cur)) {
cur++;
}
steps--;
}
return cur < myShrunkSequence.size() ? myShrunkSequence.get(cur).elementType : null;
}
@Override
public IElementType rawLookup(int steps) {
int cur = myLexPosition + steps;
return cur >= 0 && cur < myShrunkSequence.size() ? myShrunkSequence.get(cur).elementType : null;
}
@Override
public int rawTokenTypeStart(int steps) {
int cur = myLexPosition + steps;
if (cur < 0) return -1;
if (cur >= myShrunkSequence.size()) return getOriginalText().length();
return myShrunkSequence.get(cur).shrunkStart;
}
@Override
public int rawTokenIndex() {
return myLexPosition;
}
@Override
public int getCurrentOffset() {
return myLexPosition < myShrunkSequence.size() ? myShrunkSequence.get(myLexPosition).shrunkStart : myShrunkCharSequence.length();
}
@Nullable
@Override
public IElementType getTokenType() {
if (allIsEmpty()) {
return TokenType.DUMMY_HOLDER;
}
skipWhitespace();
return myLexPosition < myShrunkSequence.size() ? myShrunkSequence.get(myLexPosition).elementType : null;
}
@Nullable
@Override
public String getTokenText() {
if (allIsEmpty()) {
return getDelegate().getOriginalText().toString();
}
skipWhitespace();
if (myLexPosition >= myShrunkSequence.size()) {
return null;
}
final MyShiftedToken token = myShrunkSequence.get(myLexPosition);
return myShrunkCharSequence.subSequence(token.shrunkStart, token.shrunkEnd).toString();
}
@Override
public boolean eof() {
boolean isEof = myLexPosition >= myShrunkSequence.size();
if (!isEof) {
return false;
}
synchronizePositions(true);
return true;
}
@Override
public Marker mark() {
// In the case of the topmost node all should be inserted
if (myLexPosition != 0) {
synchronizePositions(true);
}
final Marker mark = super.mark();
return new MyMarker(mark, myLexPosition);
}
private boolean allIsEmpty() {
return myShrunkSequence.isEmpty() && getDelegate().getOriginalText().length() != 0;
}
private void skipWhitespace() {
while (myLexPosition < myShrunkSequence.size() && isWhiteSpaceOnPos(myLexPosition)) {
myLexPosition++;
}
}
private boolean isWhiteSpaceOnPos(int pos) {
return myBuilderDelegate.whitespaceOrComment(myShrunkSequence.get(pos).elementType);
}
protected void initShrunkSequence() {
initTokenListAndCharSequence(myLexer);
myLexPosition = 0;
}
private void initTokenListAndCharSequence(MasqueradingLexer lexer) {
lexer.start(getDelegate().getOriginalText());
myShrunkSequence = new ArrayList<MyShiftedToken>();
StringBuilder charSequenceBuilder = new StringBuilder();
int realPos = 0;
int shrunkPos = 0;
while (lexer.getTokenType() != null) {
final IElementType masqueTokenType = lexer.getMasqueTokenType();
final String masqueTokenText = lexer.getMasqueTokenText();
final int realLength = lexer.getTokenEnd() - lexer.getTokenStart();
if (masqueTokenType != null) {
assert masqueTokenText != null;
final int masqueLength = masqueTokenText.length();
myShrunkSequence.add(new MyShiftedToken(masqueTokenType,
realPos, realPos + realLength,
shrunkPos, shrunkPos + masqueLength));
charSequenceBuilder.append(masqueTokenText);
shrunkPos += masqueLength;
}
realPos += realLength;
lexer.advance();
}
myShrunkCharSequence = charSequenceBuilder.toString();
}
@SuppressWarnings({"StringConcatenationInsideStringBufferAppend", "UnusedDeclaration"})
private void logPos() {
StringBuilder sb = new StringBuilder();
sb.append("\nmyLexPosition=" + myLexPosition + "/" + myShrunkSequence.size());
if (myLexPosition < myShrunkSequence.size()) {
final MyShiftedToken token = myShrunkSequence.get(myLexPosition);
sb.append("\nshrunk:" + token.shrunkStart + "," + token.shrunkEnd);
sb.append("\nreal:" + token.realStart + "," + token.realEnd);
sb.append("\nTT:" + getTokenText());
}
sb.append("\ndelegate:");
sb.append("eof=" + myDelegate.eof());
if (!myDelegate.eof()) {
//noinspection ConstantConditions
sb.append("\nposition:" + myDelegate.getCurrentOffset() + "," + (myDelegate.getCurrentOffset() + myDelegate.getTokenText().length()));
sb.append("\nTT:" + myDelegate.getTokenText());
}
LOG.info(sb.toString());
}
private static class MyShiftedToken {
public final IElementType elementType;
public final int realStart;
public final int realEnd;
public final int shrunkStart;
public final int shrunkEnd;
public MyShiftedToken(IElementType elementType, int realStart, int realEnd, int shrunkStart, int shrunkEnd) {
this.elementType = elementType;
this.realStart = realStart;
this.realEnd = realEnd;
this.shrunkStart = shrunkStart;
this.shrunkEnd = shrunkEnd;
}
@Override
public String toString() {
return "MSTk: [" + realStart + ", " + realEnd + "] -> [" + shrunkStart + ", " + shrunkEnd + "]: " + elementType.toString();
}
}
private class MyMarker extends DelegateMarker {
private final int myBuilderPosition;
public MyMarker(Marker delegate, int builderPosition) {
super(delegate);
myBuilderPosition = builderPosition;
}
@Override
public void rollbackTo() {
super.rollbackTo();
myLexPosition = myBuilderPosition;
}
@Override
public void doneBefore(IElementType type, Marker before) {
super.doneBefore(type, getDelegateOrThis(before));
}
@Override
public void doneBefore(IElementType type, Marker before, String errorMessage) {
super.doneBefore(type, getDelegateOrThis(before), errorMessage);
}
@NotNull
private Marker getDelegateOrThis(@NotNull Marker marker) {
if (marker instanceof DelegateMarker) {
return ((DelegateMarker)marker).getDelegate();
}
else {
return marker;
}
}
}
}
| apache-2.0 |
jfrazelle/notary | server/storage/errors.go | 1372 | package storage
import (
"fmt"
)
// ErrOldVersion is returned when a newer version of TUF metadata is already available
type ErrOldVersion struct{}
// ErrOldVersion is returned when a newer version of TUF metadata is already available
func (err ErrOldVersion) Error() string {
return fmt.Sprintf("Error updating metadata. A newer version is already available")
}
// ErrNotFound is returned when TUF metadata isn't found for a specific record
type ErrNotFound struct{}
// Error implements error
func (err ErrNotFound) Error() string {
return fmt.Sprintf("No record found")
}
// ErrKeyExists is returned when a key already exists
type ErrKeyExists struct {
gun string
role string
}
// ErrKeyExists is returned when a key already exists
func (err ErrKeyExists) Error() string {
return fmt.Sprintf("Error, timestamp key already exists for %s:%s", err.gun, err.role)
}
// ErrNoKey is returned when no timestamp key is found
type ErrNoKey struct {
gun string
}
// ErrNoKey is returned when no timestamp key is found
func (err ErrNoKey) Error() string {
return fmt.Sprintf("Error, no timestamp key found for %s", err.gun)
}
// ErrBadQuery is used when the parameters provided cannot be appropriately
// coerced.
type ErrBadQuery struct {
msg string
}
func (err ErrBadQuery) Error() string {
return fmt.Sprintf("did not recognize parameters: %s", err.msg)
}
| apache-2.0 |
emre-aydin/hazelcast | hazelcast/src/main/java/com/hazelcast/nio/serialization/TypedDataSerializable.java | 1000 | /*
* Copyright (c) 2008-2021, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.nio.serialization;
/**
* This interface adds the possibility for the class to act as if it is another class when being deserialized using
* DataSerializable
*/
public interface TypedDataSerializable extends DataSerializable {
/**
*
* @return The class type that this serializable wants to act as.
*/
Class getClassType();
}
| apache-2.0 |
lord19871207/barber | api/src/main/java/io/sweers/barber/package-info.java | 465 | /**
* View attribute injection library for Android which generates the obtainStyledAttributes() and
* TypedArray boilerplate code for you at compile time.
* <p>
* No more handing to deal with context.obtainStyledAttributes(...) or manually retrieving values
* from the resulting {@link android.content.res.TypedArray TypedArray} instance. Just annotate your
* field or method with {@link io.sweers.barber.StyledAttr @StyledAttr}.
*/
package io.sweers.barber; | apache-2.0 |
thhiep/galen | galen-core/src/test/java/com/galenframework/components/report/FakeException.java | 2298 | /*******************************************************************************
* Copyright 2015 Ivan Shubin http://galenframework.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.galenframework.components.report;
import static java.lang.String.format;
import java.io.PrintStream;
import java.io.PrintWriter;
public class FakeException extends RuntimeException {
/**
*
*/
private static final long serialVersionUID = -4840622707009032748L;
public FakeException(String string) {
super(string);
}
@Override
public StackTraceElement[] getStackTrace() {
return new StackTraceElement[]{
new StackTraceElement("net.mindengine.someclass.SomeClass", "method1", "SomeClass.java", 4),
new StackTraceElement("net.mindengine.someclass.SomeClass2", "method2", "SomeClass2.java", 5),
new StackTraceElement("net.mindengine.someclass.SomeClass3", "method3", "SomeClass3.java", 6)
};
}
@Override
public void printStackTrace(PrintStream ps) {
ps.println(getClass().getName() + ": " + getMessage());
for (StackTraceElement element : getStackTrace()) {
ps.println(format("\tat %s.%s(%s:%d)", element.getClassName(), element.getMethodName(), element.getFileName(), element.getLineNumber()));
}
}
@Override
public void printStackTrace(PrintWriter s) {
s.println(getClass().getName() + ": " + getMessage());
for (StackTraceElement element : getStackTrace()) {
s.println(format("\tat %s.%s(%s:%d)", element.getClassName(), element.getMethodName(), element.getFileName(), element.getLineNumber()));
}
}
}
| apache-2.0 |
MuShiiii/commons-collections | src/test/java/org/apache/commons/collections4/comparators/ComparableComparatorTest.java | 1878 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.commons.collections4.comparators;
import java.util.Comparator;
import java.util.LinkedList;
import java.util.List;
/**
* Tests for ComparableComparator.
*
* @version $Id$
*
*/
@SuppressWarnings("boxing")
public class ComparableComparatorTest extends AbstractComparatorTest<Integer> {
public ComparableComparatorTest(final String testName) {
super(testName);
}
@Override
public Comparator<Integer> makeObject() {
return new ComparableComparator<Integer>();
}
@Override
public List<Integer> getComparableObjectsOrdered() {
final List<Integer> list = new LinkedList<Integer>();
list.add(1);
list.add(2);
list.add(3);
list.add(4);
list.add(5);
return list;
}
@Override
public String getCompatibilityVersion() {
return "4";
}
// public void testCreate() throws Exception {
// writeExternalFormToDisk((java.io.Serializable) makeObject(), "src/test/resources/data/test/ComparableComparator.version4.obj");
// }
}
| apache-2.0 |
HonzaKral/elasticsearch | x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SLMInfoTransportAction.java | 1649 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.slm;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.license.XPackLicenseState;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.core.XPackField;
import org.elasticsearch.xpack.core.XPackSettings;
import org.elasticsearch.xpack.core.action.XPackInfoFeatureAction;
import org.elasticsearch.xpack.core.action.XPackInfoFeatureTransportAction;
public class SLMInfoTransportAction extends XPackInfoFeatureTransportAction {
private final boolean enabled;
private final XPackLicenseState licenseState;
@Inject
public SLMInfoTransportAction(TransportService transportService, ActionFilters actionFilters,
Settings settings, XPackLicenseState licenseState) {
super(XPackInfoFeatureAction.SNAPSHOT_LIFECYCLE.name(), transportService, actionFilters);
this.enabled = XPackSettings.SNAPSHOT_LIFECYCLE_ENABLED.get(settings);
this.licenseState = licenseState;
}
@Override
public String name() {
return XPackField.SNAPSHOT_LIFECYCLE;
}
@Override
public boolean available() {
return licenseState.isIndexLifecycleAllowed();
}
@Override
public boolean enabled() {
return enabled;
}
}
| apache-2.0 |
selkhateeb/closure-compiler | test/com/google/javascript/jscomp/PolymerBehaviorExtractorTest.java | 5025 | /*
* Copyright 2016 The Closure Compiler Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.javascript.jscomp;
import static com.google.common.truth.Truth.assertThat;
import com.google.common.base.Predicates;
import com.google.common.collect.ImmutableList;
import com.google.javascript.jscomp.PolymerBehaviorExtractor.BehaviorDefinition;
import com.google.javascript.rhino.Node;
/**
* Unit tests for {@link PolymerBehaviorExtractor}.
*/
public class PolymerBehaviorExtractorTest extends CompilerTypeTestCase {
private PolymerBehaviorExtractor extractor;
private Node behaviorArray;
@Override
protected void setUp() {
super.setUp();
behaviorArray = null;
}
public void testArrayBehavior() {
parseAndInitializeExtractor(
LINE_JOINER.join(
"/** @polymerBehavior */",
"var FunBehavior = {",
" properties: {",
" isFun: Boolean",
" },",
" /** @param {string} funAmount */",
" doSomethingFun: function(funAmount) { alert('Something ' + funAmount + ' fun!'); },",
" /** @override */",
" created: function() {}",
"};",
"/** @polymerBehavior */",
"var RadBehavior = {",
" properties: {",
" howRad: Number",
" },",
" /** @param {number} radAmount */",
" doSomethingRad: function(radAmount) { alert('Something ' + radAmount + ' rad!'); },",
" /** @override */",
" ready: function() {}",
"};",
"/** @polymerBehavior */",
"var SuperCoolBehaviors = [FunBehavior, RadBehavior];",
"/** @polymerBehavior */",
"var BoringBehavior = {",
" properties: {",
" boringString: String",
" },",
" /** @param {boolean} boredYet */",
" doSomething: function(boredYet) { alert(boredYet + ' ' + this.boringString); },",
"};",
"var A = Polymer({",
" is: 'x-element',",
" behaviors: [ SuperCoolBehaviors, BoringBehavior ],",
"});"));
ImmutableList<BehaviorDefinition> defs = extractor.extractBehaviors(behaviorArray);
assertThat(defs).hasSize(3);
// TODO(jlklein): Actually verify the properties of the BehaviorDefinitions.
}
public void testInlineLiteralBehavior() {
parseAndInitializeExtractor(
LINE_JOINER.join(
"/** @polymerBehavior */",
"var FunBehavior = {",
" properties: {",
" isFun: Boolean",
" },",
" /** @param {string} funAmount */",
" doSomethingFun: function(funAmount) { alert('Something ' + funAmount + ' fun!'); },",
" /** @override */",
" created: function() {}",
"};",
"/** @polymerBehavior */",
"var SuperCoolBehaviors = [FunBehavior, {",
" properties: {",
" howRad: Number",
" },",
" /** @param {number} radAmount */",
" doSomethingRad: function(radAmount) { alert('Something ' + radAmount + ' rad!'); },",
" /** @override */",
" ready: function() {}",
"}];",
"var A = Polymer({",
" is: 'x-element',",
" behaviors: [ SuperCoolBehaviors ],",
"});"));
ImmutableList<BehaviorDefinition> defs = extractor.extractBehaviors(behaviorArray);
assertThat(defs).hasSize(2);
// TODO(jlklein): Actually verify the properties of the BehaviorDefinitions.
}
// TODO(jlklein): Test more use cases: names to avoid copying, global vs. non-global, etc.
private void parseAndInitializeExtractor(String code) {
Node root = compiler.parseTestCode(code);
GlobalNamespace globalNamespace = new GlobalNamespace(compiler, root);
extractor = new PolymerBehaviorExtractor(compiler, globalNamespace);
NodeUtil.visitPostOrder(root, new NodeUtil.Visitor() {
@Override
public void visit(Node node) {
if (isBehaviorArrayDeclaration(node)) {
behaviorArray = node;
}
}
}, Predicates.<Node>alwaysTrue());
assertNotNull(behaviorArray);
}
private boolean isBehaviorArrayDeclaration(Node node) {
return node.isArrayLit()
&& node.getParent().isStringKey() && node.getParent().getString().equals("behaviors");
}
}
| apache-2.0 |
Microsoft/TypeScript | tests/cases/compiler/destructuringControlFlowNoCrash.ts | 323 |
// legal JS, if nonsensical, which also triggers the issue
const {
date,
} = (inspectedElement: any) => 0;
date.toISOString();
// Working flow code
const {
date2,
} = (inspectedElement: any).props;
date2.toISOString();
// It could also be an async function
const { constructor } = async () => {};
| apache-2.0 |
hurricup/intellij-community | python/src/com/jetbrains/python/psi/impl/blockEvaluator/PyEvaluationResult.java | 1369 | /*
* Copyright 2000-2014 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.jetbrains.python.psi.impl.blockEvaluator;
import com.jetbrains.python.psi.PyExpression;
import org.jetbrains.annotations.NotNull;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* @author Ilya.Kazakevich
*/
@SuppressWarnings("PackageVisibleField") // Package-only class
class PyEvaluationResult {
@NotNull
final Map<String, Object> myNamespace = new HashMap<>();
@NotNull
final Map<String, List<PyExpression>> myDeclarations = new HashMap<>();
@NotNull
List<PyExpression> getDeclarations(@NotNull final String name) {
final List<PyExpression> expressions = myDeclarations.get(name);
return (expressions != null) ? expressions : Collections.<PyExpression>emptyList();
}
}
| apache-2.0 |
brianjmoore/material-components-ios | components/BottomSheet/examples/supplemental/BottomSheetPresenterViewController.h | 749 | /*
Copyright 2017-present the Material Components for iOS authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#import <UIKit/UIKit.h>
@interface BottomSheetPresenterViewController : UIViewController
- (void)presentBottomSheet;
@end
| apache-2.0 |
abovelabs/aws-ios-sdk | src/include/AutoScaling/AutoScalingCreateLaunchConfigurationRequest.h | 7556 | /*
* Copyright 2010-2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#import "AutoScalingBlockDeviceMapping.h"
#import "AutoScalingInstanceMonitoring.h"
#import "../AmazonServiceRequestConfig.h"
/**
* Create Launch Configuration Request
*/
@interface AutoScalingCreateLaunchConfigurationRequest:AmazonServiceRequestConfig
{
NSString *launchConfigurationName;
NSString *imageId;
NSString *keyName;
NSMutableArray *securityGroups;
NSString *userData;
NSString *instanceType;
NSString *kernelId;
NSString *ramdiskId;
NSMutableArray *blockDeviceMappings;
AutoScalingInstanceMonitoring *instanceMonitoring;
NSString *spotPrice;
NSString *iamInstanceProfile;
}
/**
* Default constructor for a new object. Callers should use the
* property methods to initialize this object after creating it.
*/
-(id)init;
/**
* The name of the launch configuration to create.
* <p>
* <b>Constraints:</b><br/>
* <b>Length: </b>1 - 255<br/>
* <b>Pattern: </b>[\u0020-\uD7FF\uE000-\uFFFD\uD800\uDC00-\uDBFF\uDFFF\r\n\t]*<br/>
*/
@property (nonatomic, retain) NSString *launchConfigurationName;
/**
* Unique ID of the <i>Amazon Machine Image</i> (AMI) which was assigned
* during registration. For more information about Amazon EC2 images,
* please see <a href="http://aws.amazon.com/ec2/"> Amazon EC2 product
* documentation</a>.
* <p>
* <b>Constraints:</b><br/>
* <b>Length: </b>1 - 255<br/>
* <b>Pattern: </b>[\u0020-\uD7FF\uE000-\uFFFD\uD800\uDC00-\uDBFF\uDFFF\r\n\t]*<br/>
*/
@property (nonatomic, retain) NSString *imageId;
/**
* The name of the Amazon EC2 key pair.
* <p>
* <b>Constraints:</b><br/>
* <b>Length: </b>1 - 255<br/>
* <b>Pattern: </b>[\u0020-\uD7FF\uE000-\uFFFD\uD800\uDC00-\uDBFF\uDFFF\r\n\t]*<br/>
*/
@property (nonatomic, retain) NSString *keyName;
/**
* The names of the security groups with which to associate Amazon EC2 or
* Amazon VPC instances. Specify Amazon EC2 security groups using
* security group names, such as <code>websrv</code>. Specify Amazon VPC
* security groups using security group IDs, such as
* <code>sg-12345678</code>. For more information about Amazon EC2
* security groups, go to <a
* s.com/AWSEC2/latest/UserGuide/index.html?using-network-security.html">
* Using Security Groups</a> in the Amazon EC2 product documentation. For
* more information about Amazon VPC security groups, go to <a
* es.com/AmazonVPC/latest/UserGuide/index.html?VPC_SecurityGroups.html">
* Security Groups</a> in the Amazon VPC product documentation.
*/
@property (nonatomic, retain) NSMutableArray *securityGroups;
/**
* The user data available to the launched Amazon EC2 instances. For more
* information about Amazon EC2 user data, please see <a
* href="http://aws.amazon.com/ec2/"> Amazon EC2 product
* documentation</a>.
* <p>
* <b>Constraints:</b><br/>
* <b>Length: </b>0 - 21847<br/>
* <b>Pattern: </b>[\u0020-\uD7FF\uE000-\uFFFD\uD800\uDC00-\uDBFF\uDFFF\r\n\t]*<br/>
*/
@property (nonatomic, retain) NSString *userData;
/**
* The instance type of the Amazon EC2 instance. For more information
* about Amazon EC2 instance types, please see <a
* href="http://aws.amazon.com/ec2/"> Amazon EC2 product
* documentation</a>
* <p>
* <b>Constraints:</b><br/>
* <b>Length: </b>1 - 255<br/>
* <b>Pattern: </b>[\u0020-\uD7FF\uE000-\uFFFD\uD800\uDC00-\uDBFF\uDFFF\r\n\t]*<br/>
*/
@property (nonatomic, retain) NSString *instanceType;
/**
* The ID of the kernel associated with the Amazon EC2 AMI.
* <p>
* <b>Constraints:</b><br/>
* <b>Length: </b>1 - 255<br/>
* <b>Pattern: </b>[\u0020-\uD7FF\uE000-\uFFFD\uD800\uDC00-\uDBFF\uDFFF\r\n\t]*<br/>
*/
@property (nonatomic, retain) NSString *kernelId;
/**
* The ID of the RAM disk associated with the Amazon EC2 AMI.
* <p>
* <b>Constraints:</b><br/>
* <b>Length: </b>1 - 255<br/>
* <b>Pattern: </b>[\u0020-\uD7FF\uE000-\uFFFD\uD800\uDC00-\uDBFF\uDFFF\r\n\t]*<br/>
*/
@property (nonatomic, retain) NSString *ramdiskId;
/**
* A list of mappings that specify how block devices are exposed to the
* instance. Each mapping is made up of a <i>VirtualName</i>, a
* <i>DeviceName</i>, and an <i>ebs</i> data structure that contains
* information about the associated Elastic Block Storage volume. For
* more information about Amazon EC2 BlockDeviceMappings, go to <a
* WSEC2/latest/UserGuide/index.html?block-device-mapping-concepts.html">
* Block Device Mapping</a> in the Amazon EC2 product documentation.
*/
@property (nonatomic, retain) NSMutableArray *blockDeviceMappings;
/**
* Enables detailed monitoring, which is enabled by default. <p> When
* detailed monitoring is enabled, CloudWatch will generate metrics every
* minute and your account will be charged a fee. When you disable
* detailed monitoring, by specifying <code>False</code>, Cloudwatch will
* generate metrics every 5 minutes. For information about monitoring,
* see the <a href="http://aws.amazon.com/cloudwatch/">Amazon
* CloudWatch</a> product page.
*/
@property (nonatomic, retain) AutoScalingInstanceMonitoring *instanceMonitoring;
/**
* The maximum hourly price to be paid for any Spot Instance launched to
* fulfill the request. Spot Instances are launched when the price you
* specify exceeds the current Spot market price. For more information on
* launching Spot Instances, go to <a
* services.com/AutoScaling/latest/DeveloperGuide/US-SpotInstances.html">
* Using Auto Scaling to Launch Spot Instances</a> in the <i>Auto Scaling
* Developer Guide</i>.
* <p>
* <b>Constraints:</b><br/>
* <b>Length: </b>1 - 255<br/>
*/
@property (nonatomic, retain) NSString *spotPrice;
/**
* The name or the Amazon Resource Name (ARN) of the instance profile
* associated with the IAM role for the instance. For information on
* launching EC2 instances with an IAM role, go to <a
* ices.com/AutoScaling/latest/DeveloperGuide/us-iam-role.html">Launching
* Auto Scaling Instances With an IAM Role</a> in the <i>Auto Scaling
* Developer Guide</i>.
* <p>
* <b>Constraints:</b><br/>
* <b>Length: </b>1 - 1600<br/>
* <b>Pattern: </b>[\u0020-\uD7FF\uE000-\uFFFD\uD800\uDC00-\uDBFF\uDFFF\r\n\t]*<br/>
*/
@property (nonatomic, retain) NSString *iamInstanceProfile;
/**
* Adds a single object to securityGroups.
* This function will alloc and init securityGroups if not already done.
*/
-(void)addSecurityGroup:(NSString *)securityGroupObject;
/**
* Adds a single object to blockDeviceMappings.
* This function will alloc and init blockDeviceMappings if not already done.
*/
-(void)addBlockDeviceMapping:(AutoScalingBlockDeviceMapping *)blockDeviceMappingObject;
/**
* Returns a string representation of this object; useful for testing and
* debugging.
*
* @return A string representation of this object.
*/
-(NSString *)description;
@end
| apache-2.0 |
aminmkhan/pentaho-kettle | engine/src/test/java/org/pentaho/di/trans/steps/reservoirsampling/ReservoirSamplingMetaTest.java | 1856 | /*! ******************************************************************************
*
* Pentaho Data Integration
*
* Copyright (C) 2002-2017 by Hitachi Vantara : http://www.pentaho.com
*
*******************************************************************************
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************************/
package org.pentaho.di.trans.steps.reservoirsampling;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.junit.Test;
import org.pentaho.di.core.exception.KettleException;
import org.pentaho.di.trans.steps.loadsave.LoadSaveTester;
public class ReservoirSamplingMetaTest {
@Test
public void testLoadSaveMeta() throws KettleException {
List<String> attributes = Arrays.asList( "sample_size", "seed" );
Map<String, String> getterMap = new HashMap<String, String>();
getterMap.put( "sample_size", "getSampleSize" );
getterMap.put( "seed", "getSeed" );
Map<String, String> setterMap = new HashMap<String, String>();
setterMap.put( "sample_size", "setSampleSize" );
setterMap.put( "seed", "setSeed" );
LoadSaveTester tester = new LoadSaveTester( ReservoirSamplingMeta.class, attributes, getterMap, setterMap );
tester.testSerialization();
}
}
| apache-2.0 |
DavidKarlas/roslyn | src/Workspaces/Core/Portable/Formatting/Engine/NodeOperations.cs | 2013 | // Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System.Collections.Generic;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.CodeAnalysis.Formatting.Rules;
using Microsoft.CodeAnalysis.Text;
namespace Microsoft.CodeAnalysis.Formatting
{
/// <summary>
/// this collector gathers formatting operations that are based on a node
/// </summary>
internal class NodeOperations
{
public static NodeOperations Empty = new NodeOperations();
public Task<List<IndentBlockOperation>> IndentBlockOperationTask { get; private set; }
public Task<List<SuppressOperation>> SuppressOperationTask { get; private set; }
public Task<List<AlignTokensOperation>> AlignmentOperationTask { get; private set; }
public Task<List<AnchorIndentationOperation>> AnchorIndentationOperationsTask { get; private set; }
public NodeOperations(Task<List<IndentBlockOperation>> indentBlockOperationTask, Task<List<SuppressOperation>> suppressOperationTask, Task<List<AnchorIndentationOperation>> anchorIndentationOperationsTask, Task<List<AlignTokensOperation>> alignmentOperationTask)
{
this.IndentBlockOperationTask = indentBlockOperationTask;
this.SuppressOperationTask = suppressOperationTask;
this.AlignmentOperationTask = alignmentOperationTask;
this.AnchorIndentationOperationsTask = anchorIndentationOperationsTask;
}
private NodeOperations()
{
this.IndentBlockOperationTask = Task.FromResult(new List<IndentBlockOperation>());
this.SuppressOperationTask = Task.FromResult(new List<SuppressOperation>());
this.AlignmentOperationTask = Task.FromResult(new List<AlignTokensOperation>());
this.AnchorIndentationOperationsTask = Task.FromResult(new List<AnchorIndentationOperation>());
}
}
}
| apache-2.0 |
P1start/rust | src/test/compile-fail/if-without-else-result.rs | 621 | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern:mismatched types: expected `()`, found `bool`
extern crate debug;
fn main() {
let a = if true { true };
println!("{:?}", a);
}
| apache-2.0 |
brettfo/roslyn | src/Compilers/Core/Portable/Operations/OperationVisitor.cs | 1874 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
#nullable disable
namespace Microsoft.CodeAnalysis.Operations
{
/// <summary>
/// Represents a <see cref="IOperation"/> visitor that visits only the single IOperation
/// passed into its Visit method.
/// </summary>
public abstract partial class OperationVisitor
{
// Make public after review: https://github.com/dotnet/roslyn/issues/21281
internal virtual void VisitFixed(IFixedOperation operation) =>
// https://github.com/dotnet/roslyn/issues/21281
//DefaultVisit(operation);
VisitNoneOperation(operation);
}
/// <summary>
/// Represents a <see cref="IOperation"/> visitor that visits only the single IOperation
/// passed into its Visit method with an additional argument of the type specified by the
/// <typeparamref name="TArgument"/> parameter and produces a value of the type specified by
/// the <typeparamref name="TResult"/> parameter.
/// </summary>
/// <typeparam name="TArgument">
/// The type of the additional argument passed to this visitor's Visit method.
/// </typeparam>
/// <typeparam name="TResult">
/// The type of the return value of this visitor's Visit method.
/// </typeparam>
public abstract partial class OperationVisitor<TArgument, TResult>
{
// Make public after review: https://github.com/dotnet/roslyn/issues/21281
internal virtual TResult VisitFixed(IFixedOperation operation, TArgument argument) =>
// https://github.com/dotnet/roslyn/issues/21281
//return DefaultVisit(operation, argument);
VisitNoneOperation(operation, argument);
}
}
| apache-2.0 |
mchavarriagam/clever-ruby | lib/clever-ruby/api_operations/results_list.rb | 699 | module Clever
module APIOperations
# Represents a list of results for a paged request.
class ResultsList
include Enumerable
# Create a results list from a PageList
# @api private
# @return [ResultsList]
def initialize(pagelist)
@pages = pagelist
end
# Iterate over results list
# @api public
# @return [nil]
# @example
# results = Clever::District.find # returns a ResultsList
# results.each do |district|
# puts district.name
# end
def each
@pages.each do |page|
page.each do |elem|
yield elem
end
end
end
end
end
end
| apache-2.0 |
awakecoding/FreeRDP | winpr/libwinpr/file/test/TestFileFindNextFile.c | 2603 |
#include <stdio.h>
#include <winpr/crt.h>
#include <winpr/file.h>
#include <winpr/path.h>
#include <winpr/tchar.h>
#include <winpr/windows.h>
static TCHAR testDirectory2File1[] = _T("TestDirectory2File1");
static TCHAR testDirectory2File2[] = _T("TestDirectory2File2");
int TestFileFindNextFile(int argc, char* argv[])
{
char* str;
int length;
BOOL status;
HANDLE hFind;
LPTSTR BasePath;
WIN32_FIND_DATA FindData;
TCHAR FilePath[PATHCCH_MAX_CCH];
WINPR_UNUSED(argc);
str = argv[1];
#ifdef UNICODE
length = MultiByteToWideChar(CP_UTF8, 0, str, strlen(str), NULL, 0);
BasePath = (WCHAR*)calloc((length + 1), sizeof(WCHAR));
if (!BasePath)
{
_tprintf(_T("Unable to allocate memory"));
return -1;
}
MultiByteToWideChar(CP_UTF8, 0, str, length, (LPWSTR)BasePath, length * sizeof(WCHAR));
BasePath[length] = 0;
#else
BasePath = _strdup(str);
if (!BasePath)
{
printf("Unable to allocate memory");
return -1;
}
length = strlen(BasePath);
#endif
/* Simple filter matching all files inside current directory */
CopyMemory(FilePath, BasePath, length * sizeof(TCHAR));
FilePath[length] = 0;
PathCchConvertStyle(BasePath, length, PATH_STYLE_WINDOWS);
NativePathCchAppend(FilePath, PATHCCH_MAX_CCH, _T("TestDirectory2"));
NativePathCchAppend(FilePath, PATHCCH_MAX_CCH, _T("TestDirectory2File*"));
free(BasePath);
_tprintf(_T("Finding file: %s\n"), FilePath);
hFind = FindFirstFile(FilePath, &FindData);
if (hFind == INVALID_HANDLE_VALUE)
{
_tprintf(_T("FindFirstFile failure: %s\n"), FilePath);
return -1;
}
_tprintf(_T("FindFirstFile: %s"), FindData.cFileName);
/**
* The current implementation does not enforce a particular order
*/
if ((_tcscmp(FindData.cFileName, testDirectory2File1) != 0) &&
(_tcscmp(FindData.cFileName, testDirectory2File2) != 0))
{
_tprintf(_T("FindFirstFile failure: Expected: %s, Actual: %s\n"), testDirectory2File1,
FindData.cFileName);
return -1;
}
status = FindNextFile(hFind, &FindData);
if (!status)
{
_tprintf(_T("FindNextFile failure: Expected: TRUE, Actual: %") _T(PRId32) _T("\n"), status);
return -1;
}
if ((_tcscmp(FindData.cFileName, testDirectory2File1) != 0) &&
(_tcscmp(FindData.cFileName, testDirectory2File2) != 0))
{
_tprintf(_T("FindNextFile failure: Expected: %s, Actual: %s\n"), testDirectory2File2,
FindData.cFileName);
return -1;
}
status = FindNextFile(hFind, &FindData);
if (status)
{
_tprintf(_T("FindNextFile failure: Expected: FALSE, Actual: %") _T(PRId32) _T("\n"),
status);
return -1;
}
FindClose(hFind);
return 0;
}
| apache-2.0 |
hackintoshrao/minio | cmd/storage-rpc-server_test.go | 4913 | /*
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"testing"
"github.com/minio/minio/pkg/disk"
)
const invalidToken = "invalidToken"
type testStorageRPCServer struct {
configDir string
token string
diskDirs []string
stServer *storageServer
endpoints EndpointList
}
func createTestStorageServer(t *testing.T) *testStorageRPCServer {
testPath, err := newTestConfig(globalMinioDefaultRegion)
if err != nil {
t.Fatalf("unable initialize config file, %s", err)
}
serverCred := serverConfig.GetCredential()
token, err := authenticateNode(serverCred.AccessKey, serverCred.SecretKey)
if err != nil {
t.Fatalf("unable for JWT to generate token, %s", err)
}
fsDirs, err := getRandomDisks(1)
if err != nil {
t.Fatalf("unable to create FS backend, %s", err)
}
endpoints := mustGetNewEndpointList(fsDirs...)
storageDisks, err := initStorageDisks(endpoints)
if err != nil {
t.Fatalf("unable to initialize storage disks, %s", err)
}
stServer := &storageServer{
storage: storageDisks[0],
path: "/disk1",
timestamp: UTCNow(),
}
return &testStorageRPCServer{
token: token,
configDir: testPath,
diskDirs: fsDirs,
endpoints: endpoints,
stServer: stServer,
}
}
func errorIfInvalidToken(t *testing.T, err error) {
realErr := errorCause(err)
if realErr != errInvalidToken {
t.Errorf("Expected to fail with %s but failed with %s", errInvalidToken, realErr)
}
}
func TestStorageRPCInvalidToken(t *testing.T) {
st := createTestStorageServer(t)
defer removeRoots(st.diskDirs)
defer removeAll(st.configDir)
storageRPC := st.stServer
// Following test cases are meant to exercise the invalid
// token code path of the storage RPC methods.
var err error
badAuthRPCArgs := AuthRPCArgs{AuthToken: "invalidToken"}
badGenericVolArgs := GenericVolArgs{
AuthRPCArgs: badAuthRPCArgs,
Vol: "myvol",
}
// 1. DiskInfoHandler
diskInfoReply := &disk.Info{}
err = storageRPC.DiskInfoHandler(&badAuthRPCArgs, diskInfoReply)
errorIfInvalidToken(t, err)
// 2. MakeVolHandler
makeVolArgs := &badGenericVolArgs
makeVolReply := &AuthRPCReply{}
err = storageRPC.MakeVolHandler(makeVolArgs, makeVolReply)
errorIfInvalidToken(t, err)
// 3. ListVolsHandler
listVolReply := &ListVolsReply{}
err = storageRPC.ListVolsHandler(&badAuthRPCArgs, listVolReply)
errorIfInvalidToken(t, err)
// 4. StatVolHandler
statVolReply := &VolInfo{}
statVolArgs := &badGenericVolArgs
err = storageRPC.StatVolHandler(statVolArgs, statVolReply)
errorIfInvalidToken(t, err)
// 5. DeleteVolHandler
deleteVolArgs := &badGenericVolArgs
deleteVolReply := &AuthRPCReply{}
err = storageRPC.DeleteVolHandler(deleteVolArgs, deleteVolReply)
errorIfInvalidToken(t, err)
// 6. StatFileHandler
statFileArgs := &StatFileArgs{
AuthRPCArgs: badAuthRPCArgs,
}
statReply := &FileInfo{}
err = storageRPC.StatFileHandler(statFileArgs, statReply)
errorIfInvalidToken(t, err)
// 7. ListDirHandler
listDirArgs := &ListDirArgs{
AuthRPCArgs: badAuthRPCArgs,
}
listDirReply := &[]string{}
err = storageRPC.ListDirHandler(listDirArgs, listDirReply)
errorIfInvalidToken(t, err)
// 8. ReadAllHandler
readFileArgs := &ReadFileArgs{
AuthRPCArgs: badAuthRPCArgs,
}
readFileReply := &[]byte{}
err = storageRPC.ReadAllHandler(readFileArgs, readFileReply)
errorIfInvalidToken(t, err)
// 9. ReadFileHandler
err = storageRPC.ReadFileHandler(readFileArgs, readFileReply)
errorIfInvalidToken(t, err)
// 10. PrepareFileHandler
prepFileArgs := &PrepareFileArgs{
AuthRPCArgs: badAuthRPCArgs,
}
prepFileReply := &AuthRPCReply{}
err = storageRPC.PrepareFileHandler(prepFileArgs, prepFileReply)
errorIfInvalidToken(t, err)
// 11. AppendFileHandler
appendArgs := &AppendFileArgs{
AuthRPCArgs: badAuthRPCArgs,
}
appendReply := &AuthRPCReply{}
err = storageRPC.AppendFileHandler(appendArgs, appendReply)
errorIfInvalidToken(t, err)
// 12. DeleteFileHandler
delFileArgs := &DeleteFileArgs{
AuthRPCArgs: badAuthRPCArgs,
}
delFileRely := &AuthRPCReply{}
err = storageRPC.DeleteFileHandler(delFileArgs, delFileRely)
errorIfInvalidToken(t, err)
// 13. RenameFileHandler
renameArgs := &RenameFileArgs{
AuthRPCArgs: badAuthRPCArgs,
}
renameReply := &AuthRPCReply{}
err = storageRPC.RenameFileHandler(renameArgs, renameReply)
errorIfInvalidToken(t, err)
}
| apache-2.0 |
iharkhukhrakou/XPagesExtensionLibrary | extlib/lwp/product/runtime/eclipse/plugins/com.ibm.xsp.extlib.domino/resources/web/dwa/common/nls/fi/menu.js | 196 | ({
L_MENU_GRID: "Valikkoruudukko",
L_MENU_ITEM_DISABLED: "%1 ei ole k\u00e4ytett\u00e4viss\u00e4",
L_MENU_ITEM_SUBMENU: "%1 (alivalikko)",
L_MENU_SUBMENU: "alivalikko",
L_MENU_CHECK: "valinta"
})
| apache-2.0 |
google/google-ctf | third_party/edk2/EdkCompatibilityPkg/Foundation/Guid/PrimaryConsoleOutDevice/PrimaryConsoleOutDevice.h | 1047 | /*++
Copyright (c) 2004, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
Module Name:
PrimaryConsoleOutDevice.h
Abstract:
--*/
#ifndef _PRIMARY_CONSOLE_OUT_DEVICE_H_
#define _PRIMARY_CONSOLE_OUT_DEVICE_H_
#define EFI_PRIMARY_CONSOLE_OUT_DEVICE_GUID \
{ 0x62bdf38a, 0xe3d5, 0x492c, {0x95, 0xc, 0x23, 0xa7, 0xf6, 0x6e, 0x67, 0x2e} }
extern EFI_GUID gEfiPrimaryConsoleOutDeviceGuid;
#endif
| apache-2.0 |
HebaKhaled/bposs | src/pt_antlr/antlr/CppCharFormatter.java | 2509 | package antlr;
/* ANTLR Translator Generator
* Project led by Terence Parr at http://www.jGuru.com
* Software rights: http://www.antlr.org/RIGHTS.html
*
* $Id: CppCharFormatter.java,v 1.1 2003/06/04 20:54:22 greg Exp $
*/
// C++ code generator by Pete Wells: [email protected]
class CppCharFormatter implements CharFormatter {
/** Given a character value, return a string representing the character
* that can be embedded inside a string literal or character literal
* This works for Java/C/C++ code-generation and languages with compatible
* special-character-escapment.
* Code-generators for languages should override this method.
* @param c The character of interest.
* @param forCharLiteral true to escape for char literal, false for string literal
*/
public String escapeChar(int c, boolean forCharLiteral) {
switch (c) {
case '\n' : return "\\n";
case '\t' : return "\\t";
case '\r' : return "\\r";
case '\\' : return "\\\\";
case '\'' : return forCharLiteral ? "\\'" : "'";
case '"' : return forCharLiteral ? "\"" : "\\\"";
default :
if ( c<' '||c>126 ) {
if (c > 255) {
return "\\u" + Integer.toString(c,16);
}
else {
return "\\" + Integer.toString(c,8);
}
}
else {
return String.valueOf((char)c);
}
}
}
/** Converts a String into a representation that can be use as a literal
* when surrounded by double-quotes.
* @param s The String to be changed into a literal
*/
public String escapeString(String s)
{
String retval = new String();
for (int i = 0; i < s.length(); i++)
{
retval += escapeChar(s.charAt(i), false);
}
return retval;
}
/** Given a character value, return a string representing the character
* literal that can be recognized by the target language compiler.
* This works for languages that use single-quotes for character literals.
* Code-generators for languages should override this method.
* @param c The character of interest.
*/
public String literalChar(int c) {
return "static_cast<unsigned char>('" + escapeChar(c, true) + "')";
}
/** Converts a String into a string literal
* This works for languages that use double-quotes for string literals.
* Code-generators for languages should override this method.
* @param s The String to be changed into a literal
*/
public String literalString(String s)
{
return "\"" + escapeString(s) + "\"";
}
}
| apache-2.0 |
apache/incubator-trafodion | wms/src/test/java/org/trafodion/wms/SmallTests.java | 909 | /**
* @@@ START COPYRIGHT @@@
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
* @@@ END COPYRIGHT @@@
**/
package org.trafodion.wms;
public interface SmallTests {
}
| apache-2.0 |
apache/incubator-trafodion | wms/bin/local-servers.sh | 1338 | #!/bin/sh
#/**
# @@@ START COPYRIGHT @@@
#
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing,
#software distributed under the License is distributed on an
#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
#KIND, either express or implied. See the License for the
#specific language governing permissions and limitations
#under the License.
#
# @@@ END COPYRIGHT @@@
# */
# This is used for starting multiple servers on the same machine.
# run it from 'bin/wms'
bin=`dirname "${BASH_SOURCE-$0}"`
bin=`cd "$bin" >/dev/null && pwd`
if [ $# -lt 2 ]; then
S=`basename "${BASH_SOURCE-$0}"`
echo "Usage: $S [start|stop] offset(s)"
echo ""
echo " e.g. $S start 1 2"
exit
fi
export WMS_SERVER_OPTS=" "
run_server () {
DN=$2
export WMS_IDENT_STRING="$USER-$DN"
"$bin"/wms-daemon.sh $1 server
}
cmd=$1
shift;
for i in $*
do
run_server $cmd $i
done
| apache-2.0 |
chandresh-pancholi/ignite | modules/hibernate-core/src/main/java/org/apache/ignite/cache/hibernate/HibernateCacheProxy.java | 26223 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.cache.hibernate;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.Map;
import java.util.Set;
import java.util.function.Supplier;
import javax.cache.Cache;
import javax.cache.expiry.ExpiryPolicy;
import javax.cache.processor.EntryProcessor;
import javax.cache.processor.EntryProcessorResult;
import org.apache.ignite.IgniteCheckedException;
import org.apache.ignite.cache.CacheEntry;
import org.apache.ignite.cache.CacheMetrics;
import org.apache.ignite.cache.CachePeekMode;
import org.apache.ignite.cache.affinity.Affinity;
import org.apache.ignite.cluster.ClusterGroup;
import org.apache.ignite.configuration.CacheConfiguration;
import org.apache.ignite.internal.IgniteInternalFuture;
import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
import org.apache.ignite.internal.processors.cache.GridCacheContext;
import org.apache.ignite.internal.processors.cache.IgniteInternalCache;
import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal;
import org.apache.ignite.lang.IgniteBiPredicate;
import org.apache.ignite.mxbean.CacheMetricsMXBean;
import org.apache.ignite.transactions.Transaction;
import org.apache.ignite.transactions.TransactionConcurrency;
import org.apache.ignite.transactions.TransactionIsolation;
import org.jetbrains.annotations.Nullable;
/**
* Hibernate cache proxy used to substitute hibernate keys with ignite keys.
*/
public class HibernateCacheProxy implements IgniteInternalCache<Object, Object> {
/** Delegate is lazily loaded which allows for creation of caches after the SPI is bootstrapped */
private final Supplier<IgniteInternalCache<Object, Object>> delegate;
/** Transformer. */
private final HibernateKeyTransformer keyTransformer;
/** */
private String cacheName;
/**
* @param cacheName Cache name. Should match delegate.get().name(). Needed for lazy loading.
* @param delegate Delegate.
* @param keyTransformer Key keyTransformer.
*/
HibernateCacheProxy(
String cacheName,
Supplier<IgniteInternalCache<Object, Object>> delegate,
HibernateKeyTransformer keyTransformer
) {
assert cacheName != null;
assert delegate != null;
assert keyTransformer != null;
this.cacheName = cacheName;
this.delegate = delegate;
this.keyTransformer = keyTransformer;
}
/**
* @return HibernateKeyTransformer
*/
public HibernateKeyTransformer keyTransformer() {
return keyTransformer;
}
/** {@inheritDoc} */
@Override public String name() {
return cacheName;
}
/** {@inheritDoc} */
@Override public boolean skipStore() {
return delegate.get().skipStore();
}
/** {@inheritDoc} */
@Override public IgniteInternalCache setSkipStore(boolean skipStore) {
return delegate.get().setSkipStore(skipStore);
}
/** {@inheritDoc} */
@Override public boolean isEmpty() {
return delegate.get().isEmpty();
}
/** {@inheritDoc} */
@Override public boolean containsKey(Object key) {
return delegate.get().containsKey(keyTransformer.transform(key));
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<Boolean> containsKeyAsync(Object key) {
return delegate.get().containsKeyAsync(keyTransformer.transform(key));
}
/** {@inheritDoc} */
@Override public boolean containsKeys(Collection keys) {
return delegate.get().containsKey(transform(keys));
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<Boolean> containsKeysAsync(Collection keys) {
return delegate.get().containsKeysAsync(transform(keys));
}
/** {@inheritDoc} */
@Nullable @Override public Object localPeek(
Object key,
CachePeekMode[] peekModes
) throws IgniteCheckedException {
return delegate.get().localPeek(keyTransformer.transform(key), peekModes);
}
/** {@inheritDoc} */
@Override public Iterable<Cache.Entry<Object, Object>> localEntries(
CachePeekMode[] peekModes
) throws IgniteCheckedException {
return delegate.get().localEntries(peekModes);
}
/** {@inheritDoc} */
@Nullable @Override public Object get(Object key) throws IgniteCheckedException {
return delegate.get().get(keyTransformer.transform(key));
}
/** {@inheritDoc} */
@Nullable @Override public CacheEntry getEntry(Object key) throws IgniteCheckedException {
return delegate.get().getEntry(keyTransformer.transform(key));
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture getAsync(Object key) {
return delegate.get().getAsync(keyTransformer.transform(key));
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<CacheEntry<Object, Object>> getEntryAsync(Object key) {
return delegate.get().getEntryAsync(keyTransformer.transform(key));
}
/** {@inheritDoc} */
@Override public Map getAll(@Nullable Collection keys) throws IgniteCheckedException {
return delegate.get().getAll(transform(keys));
}
/** {@inheritDoc} */
@Override public Collection<CacheEntry<Object, Object>> getEntries(
@Nullable Collection keys) throws IgniteCheckedException {
return delegate.get().getEntries(transform(keys));
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<Map<Object, Object>> getAllAsync(@Nullable Collection keys) {
return delegate.get().getAllAsync(transform(keys));
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<Collection<CacheEntry<Object, Object>>> getEntriesAsync(
@Nullable Collection keys
) {
return delegate.get().getEntriesAsync(transform(keys));
}
/** {@inheritDoc} */
@Nullable @Override public Object getAndPut(Object key, Object val) throws IgniteCheckedException {
return delegate.get().getAndPut(keyTransformer.transform(key), val);
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture getAndPutAsync(Object key, Object val) {
return delegate.get().getAndPutAsync(keyTransformer.transform(key), val);
}
/** {@inheritDoc} */
@Override public boolean put(Object key, Object val) throws IgniteCheckedException {
return delegate.get().put(keyTransformer.transform(key), val);
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<Boolean> putAsync(Object key, Object val) {
return delegate.get().putAsync(keyTransformer.transform(key), val);
}
/** {@inheritDoc} */
@Nullable @Override public Object getAndPutIfAbsent(Object key, Object val) throws IgniteCheckedException {
return delegate.get().getAndPutIfAbsent(keyTransformer.transform(key), val);
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture getAndPutIfAbsentAsync(Object key, Object val) {
return delegate.get().getAndPutIfAbsentAsync(keyTransformer.transform(key), val);
}
/** {@inheritDoc} */
@Override public boolean putIfAbsent(Object key, Object val) throws IgniteCheckedException {
return delegate.get().putIfAbsent(keyTransformer.transform(key), val);
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<Boolean> putIfAbsentAsync(Object key, Object val) {
return delegate.get().putIfAbsentAsync(keyTransformer.transform(key), val);
}
/** {@inheritDoc} */
@Nullable @Override public Object getAndReplace(Object key, Object val) throws IgniteCheckedException {
return delegate.get().getAndReplace(keyTransformer.transform(key), val);
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture getAndReplaceAsync(Object key, Object val) {
return delegate.get().getAndReplaceAsync(keyTransformer.transform(key), val);
}
/** {@inheritDoc} */
@Override public boolean replace(Object key, Object val) throws IgniteCheckedException {
return delegate.get().replace(keyTransformer.transform(key), val);
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<Boolean> replaceAsync(Object key, Object val) {
return delegate.get().replaceAsync(keyTransformer.transform(key), val);
}
/** {@inheritDoc} */
@Override public boolean replace(Object key, Object oldVal, Object newVal) throws IgniteCheckedException {
return delegate.get().replace(keyTransformer.transform(key), oldVal, newVal);
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<Boolean> replaceAsync(Object key, Object oldVal, Object newVal) {
return delegate.get().replaceAsync(keyTransformer.transform(key), oldVal, newVal);
}
/** {@inheritDoc} */
@Override public void putAll(@Nullable Map m) throws IgniteCheckedException {
delegate.get().putAll(transform(m));
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<?> putAllAsync(@Nullable Map m) {
return delegate.get().putAllAsync(transform(m));
}
/** {@inheritDoc} */
@Override public Set keySet() {
return delegate.get().keySet();
}
/** {@inheritDoc} */
@Override public Set<Cache.Entry<Object, Object>> entrySet() {
return delegate.get().entrySet();
}
/** {@inheritDoc} */
@Override public Transaction txStart(
TransactionConcurrency concurrency,
TransactionIsolation isolation
) {
return delegate.get().txStart(concurrency, isolation);
}
/** {@inheritDoc} */
@Override public GridNearTxLocal txStartEx(
TransactionConcurrency concurrency,
TransactionIsolation isolation
) {
return delegate.get().txStartEx(concurrency, isolation);
}
/** {@inheritDoc} */
@Override public Transaction txStart(
TransactionConcurrency concurrency,
TransactionIsolation isolation,
long timeout,
int txSize
) {
return delegate.get().txStart(concurrency, isolation, timeout, txSize);
}
/** {@inheritDoc} */
@Nullable @Override public GridNearTxLocal tx() {
return delegate.get().tx();
}
/** {@inheritDoc} */
@Override public boolean evict(Object key) {
return delegate.get().evict(keyTransformer.transform(key));
}
/** {@inheritDoc} */
@Override public void evictAll(@Nullable Collection keys) {
delegate.get().evictAll(transform(keys));
}
/** {@inheritDoc} */
@Override public void clearLocally(boolean srv, boolean near, boolean readers) {
delegate.get().clearLocally(srv, near, readers);
}
/** {@inheritDoc} */
@Override public boolean clearLocally(Object key) {
return delegate.get().clearLocally(keyTransformer.transform(key));
}
/** {@inheritDoc} */
@Override public void clearLocallyAll(Set keys, boolean srv, boolean near, boolean readers) {
delegate.get().clearLocallyAll((Set<?>)transform(keys), srv, near, readers);
}
/** {@inheritDoc} */
@Override public void clear(Object key) throws IgniteCheckedException {
delegate.get().clear(keyTransformer.transform(key));
}
/** {@inheritDoc} */
@Override public void clearAll(Set keys) throws IgniteCheckedException {
delegate.get().clearAll((Set<?>)transform(keys));
}
/** {@inheritDoc} */
@Override public void clear() throws IgniteCheckedException {
delegate.get().clear();
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<?> clearAsync() {
return delegate.get().clearAsync();
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<?> clearAsync(Object key) {
return delegate.get().clearAsync(keyTransformer.transform(key));
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<?> clearAllAsync(Set keys) {
return delegate.get().clearAllAsync((Set<?>)transform(keys));
}
/** {@inheritDoc} */
@Nullable @Override public Object getAndRemove(Object key) throws IgniteCheckedException {
return delegate.get().getAndRemove(keyTransformer.transform(key));
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture getAndRemoveAsync(Object key) {
return delegate.get().getAndRemoveAsync(keyTransformer.transform(key));
}
/** {@inheritDoc} */
@Override public boolean remove(Object key) throws IgniteCheckedException {
return delegate.get().remove(keyTransformer.transform(key));
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<Boolean> removeAsync(Object key) {
return delegate.get().removeAsync(keyTransformer.transform(key));
}
/** {@inheritDoc} */
@Override public boolean remove(Object key, Object val) throws IgniteCheckedException {
return delegate.get().remove(keyTransformer.transform(key), val);
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<Boolean> removeAsync(Object key, Object val) {
return delegate.get().removeAsync(keyTransformer.transform(key), val);
}
/** {@inheritDoc} */
@Override public void removeAll(@Nullable Collection keys) throws IgniteCheckedException {
delegate.get().removeAll(transform(keys));
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<?> removeAllAsync(@Nullable Collection keys) {
return delegate.get().removeAllAsync(transform(keys));
}
/** {@inheritDoc} */
@Override public void removeAll() throws IgniteCheckedException {
delegate.get().removeAll();
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<?> removeAllAsync() {
return delegate.get().removeAllAsync();
}
/** {@inheritDoc} */
@Override public boolean lock(Object key, long timeout) throws IgniteCheckedException {
return delegate.get().lock(keyTransformer.transform(key), timeout);
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<Boolean> lockAsync(Object key, long timeout) {
return delegate.get().lockAsync(keyTransformer.transform(key), timeout);
}
/** {@inheritDoc} */
@Override public boolean lockAll(@Nullable Collection keys, long timeout) throws IgniteCheckedException {
return delegate.get().lockAll(transform(keys), timeout);
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<Boolean> lockAllAsync(@Nullable Collection keys, long timeout) {
return delegate.get().lockAllAsync(transform(keys), timeout);
}
/** {@inheritDoc} */
@Override public void unlock(Object key) throws IgniteCheckedException {
delegate.get().unlock(keyTransformer.transform(key));
}
/** {@inheritDoc} */
@Override public void unlockAll(@Nullable Collection keys) throws IgniteCheckedException {
delegate.get().unlockAll(transform(keys));
}
/** {@inheritDoc} */
@Override public boolean isLocked(Object key) {
return delegate.get().isLocked(keyTransformer.transform(key));
}
/** {@inheritDoc} */
@Override public boolean isLockedByThread(Object key) {
return delegate.get().isLockedByThread(keyTransformer.transform(key));
}
/** {@inheritDoc} */
@Override public int size() {
return delegate.get().size();
}
/** {@inheritDoc} */
@Override public long sizeLong() {
return delegate.get().sizeLong();
}
/** {@inheritDoc} */
@Override public int localSize(CachePeekMode[] peekModes) throws IgniteCheckedException {
return delegate.get().localSize(peekModes);
}
/** {@inheritDoc} */
@Override public long localSizeLong(CachePeekMode[] peekModes) throws IgniteCheckedException {
return delegate.get().localSizeLong(peekModes);
}
/** {@inheritDoc} */
@Override public long localSizeLong(int partition, CachePeekMode[] peekModes) throws IgniteCheckedException {
return delegate.get().localSizeLong(partition, peekModes);
}
/** {@inheritDoc} */
@Override public int size(CachePeekMode[] peekModes) throws IgniteCheckedException {
return delegate.get().size(peekModes);
}
/** {@inheritDoc} */
@Override public long sizeLong(CachePeekMode[] peekModes) throws IgniteCheckedException {
return delegate.get().sizeLong(peekModes);
}
/** {@inheritDoc} */
@Override public long sizeLong(int partition, CachePeekMode[] peekModes) throws IgniteCheckedException {
return delegate.get().sizeLong(partition, peekModes);
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<Integer> sizeAsync(CachePeekMode[] peekModes) {
return delegate.get().sizeAsync(peekModes);
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<Long> sizeLongAsync(CachePeekMode[] peekModes) {
return delegate.get().sizeLongAsync(peekModes);
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<Long> sizeLongAsync(int partition, CachePeekMode[] peekModes) {
return delegate.get().sizeLongAsync(partition, peekModes);
}
/** {@inheritDoc} */
@Override public int nearSize() {
return delegate.get().nearSize();
}
/** {@inheritDoc} */
@Override public int primarySize() {
return delegate.get().primarySize();
}
/** {@inheritDoc} */
@Override public long primarySizeLong() {
return delegate.get().primarySizeLong();
}
/** {@inheritDoc} */
@Override public CacheConfiguration configuration() {
return delegate.get().configuration();
}
/** {@inheritDoc} */
@Override public Affinity affinity() {
return delegate.get().affinity();
}
/** {@inheritDoc} */
@Override public CacheMetrics clusterMetrics() {
return delegate.get().clusterMetrics();
}
/** {@inheritDoc} */
@Override public CacheMetrics clusterMetrics(ClusterGroup grp) {
return delegate.get().clusterMetrics(grp);
}
/** {@inheritDoc} */
@Override public CacheMetrics localMetrics() {
return delegate.get().localMetrics();
}
/** {@inheritDoc} */
@Override public CacheMetricsMXBean clusterMxBean() {
return delegate.get().clusterMxBean();
}
/** {@inheritDoc} */
@Override public CacheMetricsMXBean localMxBean() {
return delegate.get().localMxBean();
}
/** {@inheritDoc} */
@Override public long offHeapEntriesCount() {
return delegate.get().offHeapEntriesCount();
}
/** {@inheritDoc} */
@Override public long offHeapAllocatedSize() {
return delegate.get().offHeapAllocatedSize();
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<?> rebalance() {
return delegate.get().rebalance();
}
/** {@inheritDoc} */
@Nullable @Override public Object getForcePrimary(Object key) throws IgniteCheckedException {
return delegate.get().getForcePrimary(keyTransformer.transform(key));
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture getForcePrimaryAsync(Object key) {
return delegate.get().getForcePrimaryAsync(keyTransformer.transform(key));
}
/** {@inheritDoc} */
@Override public Map getAllOutTx(Set keys) throws IgniteCheckedException {
return delegate.get().getAllOutTx((Set<?>)transform(keys));
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<Map<Object, Object>> getAllOutTxAsync(Set keys) {
return delegate.get().getAllOutTxAsync((Set<?>)transform(keys));
}
/** {@inheritDoc} */
@Nullable @Override public ExpiryPolicy expiry() {
return delegate.get().expiry();
}
/** {@inheritDoc} */
@Override public IgniteInternalCache withExpiryPolicy(ExpiryPolicy plc) {
return delegate.get().withExpiryPolicy(plc);
}
/** {@inheritDoc} */
@Override public IgniteInternalCache withNoRetries() {
return delegate.get().withNoRetries();
}
/** {@inheritDoc} */
@Override public <K1, V1> IgniteInternalCache<K1, V1> withAllowAtomicOpsInTx() {
return delegate.get().withAllowAtomicOpsInTx();
}
/** {@inheritDoc} */
@Override public GridCacheContext context() {
return delegate.get().context();
}
/** {@inheritDoc} */
@Override public void localLoadCache(
@Nullable IgniteBiPredicate p,
@Nullable Object... args
) throws IgniteCheckedException {
delegate.get().localLoadCache(p, args);
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<?> localLoadCacheAsync(
@Nullable IgniteBiPredicate p,
@Nullable Object... args
) {
return delegate.get().localLoadCacheAsync(p, args);
}
/** {@inheritDoc} */
@Override public Collection<Integer> lostPartitions() {
return delegate.get().lostPartitions();
}
/** {@inheritDoc} */
@Override public void preloadPartition(int part) throws IgniteCheckedException {
delegate.get().preloadPartition(part);
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<?> preloadPartitionAsync(int part) throws IgniteCheckedException {
return delegate.get().preloadPartitionAsync(part);
}
/** {@inheritDoc} */
@Override public boolean localPreloadPartition(int part) throws IgniteCheckedException {
return delegate.get().localPreloadPartition(part);
}
/** {@inheritDoc} */
@Nullable @Override public EntryProcessorResult invoke(
@Nullable AffinityTopologyVersion topVer,
Object key,
EntryProcessor entryProcessor,
Object... args
) throws IgniteCheckedException {
return delegate.get().invoke(topVer, key, entryProcessor, args);
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<Map> invokeAllAsync(Map map, Object... args) {
return delegate.get().invokeAllAsync(map, args);
}
/** {@inheritDoc} */
@Override public Map invokeAll(Map map, Object... args) throws IgniteCheckedException {
return delegate.get().invokeAll(map, args);
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<Map> invokeAllAsync(Set keys, EntryProcessor entryProcessor, Object... args) {
return delegate.get().invokeAllAsync((Set<?>)transform(keys), entryProcessor, args);
}
/** {@inheritDoc} */
@Override public Map invokeAll(Set keys, EntryProcessor entryProcessor, Object... args) throws IgniteCheckedException {
return delegate.get().invokeAll((Set<?>)transform(keys), entryProcessor, args);
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<EntryProcessorResult> invokeAsync(
Object key,
EntryProcessor entryProcessor,
Object... args
) {
return delegate.get().invokeAsync(keyTransformer.transform(key), entryProcessor, args);
}
/** {@inheritDoc} */
@Nullable @Override public EntryProcessorResult invoke(
Object key,
EntryProcessor entryProcessor,
Object... args
) throws IgniteCheckedException {
return delegate.get().invoke(keyTransformer.transform(key), entryProcessor, args);
}
/** {@inheritDoc} */
@Override public Iterator<Cache.Entry<Object, Object>> scanIterator(
boolean keepBinary,
@Nullable IgniteBiPredicate p
) throws IgniteCheckedException {
return delegate.get().scanIterator(keepBinary, p);
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<?> removeAllConflictAsync(Map drMap) throws IgniteCheckedException {
return delegate.get().removeAllConflictAsync(drMap);
}
/** {@inheritDoc} */
@Override public void removeAllConflict(Map drMap) throws IgniteCheckedException {
delegate.get().removeAllConflictAsync(drMap);
}
/** {@inheritDoc} */
@Override public IgniteInternalFuture<?> putAllConflictAsync(Map drMap) throws IgniteCheckedException {
return delegate.get().putAllConflictAsync(drMap);
}
/** {@inheritDoc} */
@Override public void putAllConflict(Map drMap) throws IgniteCheckedException {
delegate.get().putAllConflict(drMap);
}
/** {@inheritDoc} */
@Override public IgniteInternalCache keepBinary() {
return delegate.get().keepBinary();
}
/** {@inheritDoc} */
@Override public IgniteInternalCache cache() {
return delegate.get().cache();
}
/** {@inheritDoc} */
@Override public Iterator iterator() {
return delegate.get().iterator();
}
/**
* @param keys Keys.
*/
private Collection<Object> transform(Collection<Object> keys) {
Collection<Object> res = new LinkedList<>();
for (Object o : keys)
res.add(keyTransformer.transform(o));
return res;
}
/**
* @param map Map.
*/
private Map<Object, Object> transform(Map<Object, Object> map) {
Map<Object, Object> res = new HashMap<>();
Set<Map.Entry<Object, Object>> ents = map.entrySet();
for (Map.Entry<Object, Object> e : ents)
res.put(keyTransformer.transform(e.getKey()), e.getValue());
return res;
}
}
| apache-2.0 |
innerverse/typedoc | src/td/output/plugins/PrettyPrintPlugin.ts | 5741 | module td.output
{
/**
* List of states the parser of [[PrettyPrintPlugin]] can be in.
*/
enum PrettyPrintState {
/**
* Default state of the parser. Empty lines will be removed and indention will be adjusted.
*/
Default,
/**
* Comment state, the parser waits for a comment closing tag.
*/
Comment,
/**
* Pre state, the parser waits for the closing tag of the current pre block.
*/
Pre
}
/**
* A plugin that pretty prints the generated html.
*
* This not only aids in making the generated html source code more readable, by removing
* blank lines and unnecessary whitespaces the size of the documentation is reduced without
* visual impact.
*
* At the point writing this the docs of TypeDoc took 97.8 MB without and 66.4 MB with this
* plugin enabled, so it reduced the size to 68% of the original output.
*/
export class PrettyPrintPlugin extends RendererPlugin
{
/**
* Map of all tags that will be ignored.
*/
static IGNORED_TAGS:any = {
area: true,
base: true,
br: true,
wbr: true,
col: true,
command: true,
embed: true,
hr: true,
img: true,
input: true,
link: true,
meta: true,
param: true,
source: true
};
/**
* Map of all tags that prevent this plugin form modifying the following code.
*/
static PRE_TAGS:any = {
pre: true,
code: true,
textarea: true,
script: true,
style: true
};
/**
* Create a new PrettyPrintPlugin instance.
*
* @param renderer The renderer this plugin should be attached to.
*/
constructor(renderer:Renderer) {
super(renderer);
renderer.on(Renderer.EVENT_END_PAGE, this.onRendererEndPage, this, -1024);
}
/**
* Triggered after a document has been rendered, just before it is written to disc.
*
* @param event
*/
onRendererEndPage(event:OutputPageEvent) {
var match, line, lineState, lineDepth, tagName, preName;
var tagExp = /<\s*(\w+)[^>]*>|<\/\s*(\w+)[^>]*>|<!--|-->/g;
var emptyLineExp = /^[\s]*$/;
var minLineDepth = 1;
var state = PrettyPrintState.Default;
var stack = [];
var lines = event.contents.split(/\r\n?|\n/);
var index = 0;
var count = lines.length;
while (index < count) {
line = lines[index];
if (emptyLineExp.test(line)) {
if (state == PrettyPrintState.Default) {
lines.splice(index, 1);
count -= 1;
continue;
}
} else {
lineState = state;
lineDepth = stack.length;
while (match = tagExp.exec(line)) {
if (state == PrettyPrintState.Comment) {
if (match[0] == '-->') {
state = PrettyPrintState.Default;
}
} else if (state == PrettyPrintState.Pre) {
if (match[2] && match[2].toLowerCase() == preName) {
state = PrettyPrintState.Default;
}
} else {
if (match[0] == '<!--') {
state = PrettyPrintState.Comment;
} else if (match[1]) {
tagName = match[1].toLowerCase();
if (tagName in PrettyPrintPlugin.IGNORED_TAGS) continue;
if (tagName in PrettyPrintPlugin.PRE_TAGS) {
state = PrettyPrintState.Pre;
preName = tagName;
} else {
if (tagName == 'body') minLineDepth = 2;
stack.push(tagName);
}
} else if (match[2]) {
tagName = match[2].toLowerCase();
if (tagName in PrettyPrintPlugin.IGNORED_TAGS) continue;
var n = stack.lastIndexOf(tagName);
if (n != -1) {
stack.length = n;
}
}
}
}
if (lineState == PrettyPrintState.Default) {
lineDepth = Math.min(lineDepth, stack.length);
line = line.replace(/^\s+/, '').replace(/\s+$/, '');
if (lineDepth > minLineDepth) {
line = Array(lineDepth - minLineDepth + 1).join('\t') + line;
}
lines[index] = line;
}
}
index++;
}
event.contents = lines.join('\n');
}
}
/**
* Register this plugin.
*/
Renderer.registerPlugin('prettyPrint', PrettyPrintPlugin);
} | apache-2.0 |
apache/incubator-trafodion | dcs/src/main/java/org/trafodion/dcs/master/listener/ConnectionContext.java | 3102 | /**
* @@@ START COPYRIGHT @@@
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
* @@@ END COPYRIGHT @@@
*/
package org.trafodion.dcs.master.listener;
import java.sql.SQLException;
import java.io.*;
import java.nio.*;
import java.nio.channels.*;
import java.nio.channels.spi.*;
import java.net.*;
import java.util.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
class ConnectionContext {
private static final Log LOG = LogFactory.getLog(ConnectionContext.class);
String datasource = "";
String catalog = "";
String schema = "";
String location = "";
String userRole = "";
String connectOptions = "";
short accessMode;
short autoCommit;
int queryTimeoutSec;
int idleTimeoutSec;
int loginTimeoutSec;
short txnIsolationLevel;
short rowSetSize;
int diagnosticFlag;
int processId;
String computerName = "";
String windowText = "";
VersionList clientVersionList = null;
UserDesc user = null;
int ctxACP;
int ctxDataLang;
int ctxErrorLang;
short ctxCtrlInferNXHAR;
short cpuToUse;
short cpuToUseEnd;
int srvrType;
short retryCount;
int optionFlags1;
int optionFlags2;
String vproc;
String client;
ConnectionContext(){
clientVersionList = new VersionList();
user = new UserDesc();
}
void extractFromByteBuffer(ByteBuffer buf) throws java.io.UnsupportedEncodingException {
datasource = Util.extractString(buf);
catalog= Util.extractString(buf);
schema= Util.extractString(buf);
location= Util.extractString(buf);
userRole= Util.extractString(buf);
accessMode=buf.getShort();
autoCommit=buf.getShort();
queryTimeoutSec=buf.getInt();
idleTimeoutSec=buf.getInt();
loginTimeoutSec=buf.getInt();
txnIsolationLevel=buf.getShort();
rowSetSize=buf.getShort();
diagnosticFlag=buf.getInt();
processId=buf.getInt();
computerName=Util.extractString(buf);
windowText=Util.extractString(buf);
ctxACP=buf.getInt();
ctxDataLang=buf.getInt();
ctxErrorLang=buf.getInt();
ctxCtrlInferNXHAR=buf.getShort();
cpuToUse=buf.getShort();
cpuToUseEnd=buf.getShort();
connectOptions=Util.extractString(buf);
clientVersionList.extractFromByteBuffer(buf);
user.extractFromByteBuffer(buf);
srvrType = buf.getInt();
retryCount = buf.getShort();
optionFlags1 = buf.getInt();
optionFlags2 = buf.getInt();
vproc= Util.extractString(buf);
client= Util.extractString(buf);
}
}
| apache-2.0 |
RomanYudintsev/hunter | maintenance/local-upload/qt/win/run-win-vs-12-2013-win64.bat | 486 | REM {
set TOOLCHAIN=vs-12-2013-win64
set PROJECT_DIR=examples/qt-widgets
.\jenkins.py --verbose --clear-except-download || exit /b 1
set PROJECT_DIR=examples/qt-core
.\jenkins.py --verbose --nocreate || exit /b 1
set PROJECT_DIR=examples/qt-qml
.\jenkins.py --verbose --nocreate || exit /b 1
set PROJECT_DIR=examples/qt-location
.\jenkins.py --verbose --nocreate || exit /b 1
set PROJECT_DIR=examples/qt-camera
.\jenkins.py --verbose --upload --nocreate || exit /b 1
REM }
| bsd-2-clause |
pgdoval/pgdeploy | pgdeploy/src/test/resources/installation/include/postgresql/server/catalog/pg_enum.h | 1882 | /*-------------------------------------------------------------------------
*
* pg_enum.h
* definition of the system "enum" relation (pg_enum)
* along with the relation's initial contents.
*
*
* Copyright (c) 2006-2016, PostgreSQL Global Development Group
*
* src/include/catalog/pg_enum.h
*
* NOTES
* the genbki.pl script reads this file and generates .bki
* information from the DATA() statements.
*
* XXX do NOT break up DATA() statements into multiple lines!
* the scripts are not as smart as you might think...
*
*-------------------------------------------------------------------------
*/
#ifndef PG_ENUM_H
#define PG_ENUM_H
#include "catalog/genbki.h"
#include "nodes/pg_list.h"
/* ----------------
* pg_enum definition. cpp turns this into
* typedef struct FormData_pg_enum
* ----------------
*/
#define EnumRelationId 3501
CATALOG(pg_enum,3501)
{
Oid enumtypid; /* OID of owning enum type */
float4 enumsortorder; /* sort position of this enum value */
NameData enumlabel; /* text representation of enum value */
} FormData_pg_enum;
/* ----------------
* Form_pg_enum corresponds to a pointer to a tuple with
* the format of pg_enum relation.
* ----------------
*/
typedef FormData_pg_enum *Form_pg_enum;
/* ----------------
* compiler constants for pg_enum
* ----------------
*/
#define Natts_pg_enum 3
#define Anum_pg_enum_enumtypid 1
#define Anum_pg_enum_enumsortorder 2
#define Anum_pg_enum_enumlabel 3
/* ----------------
* pg_enum has no initial contents
* ----------------
*/
/*
* prototypes for functions in pg_enum.c
*/
extern void EnumValuesCreate(Oid enumTypeOid, List *vals);
extern void EnumValuesDelete(Oid enumTypeOid);
extern void AddEnumLabel(Oid enumTypeOid, const char *newVal,
const char *neighbor, bool newValIsAfter,
bool skipIfExists);
#endif /* PG_ENUM_H */
| bsd-2-clause |
MariaSolovyeva/watchkeeper | django_project/event_mapper/migrations/0006_auto_20150505_0922.py | 789 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('event_mapper', '0005_user_is_confirmed'),
]
operations = [
migrations.AlterField(
model_name='event',
name='date_time',
field=models.DateTimeField(help_text=b'Date and time when the event happened.', verbose_name=b'Date and Time'),
preserve_default=True,
),
migrations.AlterField(
model_name='event',
name='victim',
field=models.ForeignKey(default=0, verbose_name=b'Victim', to='event_mapper.Victim', help_text=b'The victim of the event.'),
preserve_default=True,
),
]
| bsd-2-clause |
tempbottle/idea-rust | src/java/main/vektah/rust/RustFileType.java | 790 | package vektah.rust;
import com.intellij.openapi.fileTypes.LanguageFileType;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import vektah.rust.i18n.RustBundle;
import javax.swing.*;
public class RustFileType extends LanguageFileType {
public static final RustFileType INSTANCE = new RustFileType();
private RustFileType() {
super(RustLanguage.INSTANCE);
}
@NotNull
@Override
public String getName() {
return RustBundle.message("file.type.name.rust");
}
@NotNull
@Override
public String getDescription() {
return RustBundle.message("file.type.description.rust");
}
@NotNull
@Override
public String getDefaultExtension() {
return "rs";
}
@Nullable
@Override
public Icon getIcon() {
return RustIcons.ICON_RUST_16;
}
}
| bsd-2-clause |
jawshooah/homebrew-cask | Casks/picka.rb | 284 | cask 'picka' do
version '1.0.0'
sha256 '981209f1bd432d99ce082429cbb182b17194063b6b0eb8ae9fa22a0dbe37bca8'
url 'https://getpicka.com/downloads/Picka.zip'
appcast 'https://getpicka.com/appcast-trial.xml'
name 'Picka'
homepage 'https://getpicka.com/'
app 'Picka.app'
end
| bsd-2-clause |
sscotth/homebrew-cask | Casks/watchguard-mobile-vpn-with-ssl.rb | 518 | cask 'watchguard-mobile-vpn-with-ssl' do
version '12.5.3,615421'
sha256 'b8a4f9ce908f19df6122fdf24445fdb233d812f2f6b5f08261ca2e4cca0c3784'
url "http://cdn.watchguard.com/SoftwareCenter/Files/MUVPN_SSL/#{version.before_comma.dots_to_underscores}/WG-MVPN-SSL_#{version.before_comma.dots_to_underscores}.dmg"
name 'WatchGuard Mobile VPN with SSL'
homepage 'https://www.watchguard.com/'
pkg "WatchGuard Mobile VPN with SSL Installer V#{version.after_comma}.mpkg"
uninstall pkgutil: 'com.watchguard.*'
end
| bsd-2-clause |
jokereactive/ORB_Android | suitesparse/SuiteSparse/CXSparse/SourceWrappers/cs_permute_cl.o.c | 106 | #ifndef NCOMPLEX
#define CS_LONG
#define CS_COMPLEX
#include <../Source/cs_permute.c>
#endif // NCOMPLEX
| bsd-2-clause |
jokereactive/ORB_Android | suitesparse/SuiteSparse/UMFPACK/Source/umf_internal.h | 25886 | /* ========================================================================== */
/* === umf_internal.h ======================================================= */
/* ========================================================================== */
/* -------------------------------------------------------------------------- */
/* Copyright (c) 2005-2012 by Timothy A. Davis, http://www.suitesparse.com. */
/* All Rights Reserved. See ../Doc/License for License. */
/* -------------------------------------------------------------------------- */
/*
This file is for internal use in UMFPACK itself, and should not be included
in user code. Use umfpack.h instead. User-accessible file names and
routine names all start with the letters "umfpack_". Non-user-accessible
file names and routine names all start with "umf_".
*/
#ifndef _UMF_INTERNAL
#define _UMF_INTERNAL
/* -------------------------------------------------------------------------- */
/* ANSI standard include files */
/* -------------------------------------------------------------------------- */
/* from float.h: DBL_EPSILON */
#include <float.h>
/* from string.h: strcmp */
#include <string.h>
/* when debugging, assert.h and the assert macro are used (see umf_dump.h) */
/* -------------------------------------------------------------------------- */
/* Architecture */
/* -------------------------------------------------------------------------- */
#if defined (__sun) || defined (MSOL2) || defined (ARCH_SOL2)
#define UMF_SOL2
#define UMFPACK_ARCHITECTURE "Sun Solaris"
#elif defined (__sgi) || defined (MSGI) || defined (ARCH_SGI)
#define UMF_SGI
#define UMFPACK_ARCHITECTURE "SGI Irix"
#elif defined (__linux) || defined (MGLNX86) || defined (ARCH_GLNX86)
#define UMF_LINUX
#define UMFPACK_ARCHITECTURE "Linux"
#elif defined (__APPLE__)
#define UMF_MAC
#define UMFPACK_ARCHITECTURE "Mac"
#elif defined (_AIX) || defined (MIBM_RS) || defined (ARCH_IBM_RS)
#define UMF_AIX
#define UMFPACK_ARCHITECTURE "IBM AIX"
#elif defined (__alpha) || defined (MALPHA) || defined (ARCH_ALPHA)
#define UMF_ALPHA
#define UMFPACK_ARCHITECTURE "Compaq Alpha"
#elif defined (_WIN32) || defined (WIN32)
#if defined (__MINGW32__)
#define UMF_MINGW
#elif defined (__CYGWIN32__)
#define UMF_CYGWIN
#else
#define UMF_WINDOWS
#endif
#define UMFPACK_ARCHITECTURE "Microsoft Windows"
#elif defined (__hppa) || defined (__hpux) || defined (MHPUX) || defined (ARCH_HPUX)
#define UMF_HP
#define UMFPACK_ARCHITECTURE "HP Unix"
#elif defined (__hp700) || defined (MHP700) || defined (ARCH_HP700)
#define UMF_HP
#define UMFPACK_ARCHITECTURE "HP 700 Unix"
#else
/* If the architecture is unknown, and you call the BLAS, you may need to */
/* define BLAS_BY_VALUE, BLAS_NO_UNDERSCORE, and/or BLAS_CHAR_ARG yourself. */
#define UMFPACK_ARCHITECTURE "unknown"
#endif
/* -------------------------------------------------------------------------- */
/* basic definitions (see also amd_internal.h) */
/* -------------------------------------------------------------------------- */
#define ONES_COMPLEMENT(r) (-(r)-1)
/* -------------------------------------------------------------------------- */
/* AMD include file */
/* -------------------------------------------------------------------------- */
/* stdio.h, stdlib.h, limits.h, and math.h, NDEBUG definition, assert.h */
#include "amd_internal.h"
/* -------------------------------------------------------------------------- */
/* MATLAB include files */
/* -------------------------------------------------------------------------- */
/* only used when compiling the UMFPACK mexFunction */
#ifdef MATLAB_MEX_FILE
#include "matrix.h"
#include "mex.h"
#endif
/* -------------------------------------------------------------------------- */
/* Real/complex and int/SuiteSparse_long definitions, double relops */
/* -------------------------------------------------------------------------- */
#include "umf_version.h"
/* -------------------------------------------------------------------------- */
/* Compile-time configurations */
/* -------------------------------------------------------------------------- */
#include "umf_config.h"
/* -------------------------------------------------------------------------- */
/* umfpack include file */
/* -------------------------------------------------------------------------- */
#include "umfpack.h"
/* -------------------------------------------------------------------------- */
/* for contents of Info. This must correlate with umfpack.h */
/* -------------------------------------------------------------------------- */
#define ESTIMATE (UMFPACK_NUMERIC_SIZE_ESTIMATE - UMFPACK_NUMERIC_SIZE)
#define ACTUAL 0
/* -------------------------------------------------------------------------- */
/* get a parameter from the Control array */
/* -------------------------------------------------------------------------- */
#define GET_CONTROL(i,default) \
((Control != (double *) NULL) ? \
(SCALAR_IS_NAN (Control [i]) ? default : Control [i]) \
: default)
/* -------------------------------------------------------------------------- */
/* for clearing the external degree counters */
/* -------------------------------------------------------------------------- */
#define MAX_MARK(n) Int_MAX - (2*(n)+1)
/* -------------------------------------------------------------------------- */
/* convert number of Units to MBytes */
/* -------------------------------------------------------------------------- */
#define MBYTES(units) (((units) * sizeof (Unit)) / 1048576.0)
/* -------------------------------------------------------------------------- */
/* dense row/column macro */
/* -------------------------------------------------------------------------- */
/* In order for a row or column to be treated as "dense", it must have more */
/* entries than the value returned by this macro. n is the dimension of the */
/* matrix, and alpha is the dense row/column control parameter. */
/* Note: this is not defined if alpha is NaN or Inf: */
#define UMFPACK_DENSE_DEGREE_THRESHOLD(alpha,n) \
((Int) MAX (16.0, (alpha) * 16.0 * sqrt ((double) (n))))
/* -------------------------------------------------------------------------- */
/* PRINTF */
/* -------------------------------------------------------------------------- */
#define PRINTFk(k,params) { if (prl >= (k)) { PRINTF (params) ; } }
#define PRINTF1(params) PRINTFk (1, params)
#define PRINTF2(params) PRINTFk (2, params)
#define PRINTF3(params) PRINTFk (3, params)
#define PRINTF4(params) PRINTFk (4, params)
#define PRINTF5(params) PRINTFk (5, params)
#define PRINTF6(params) PRINTFk (6, params)
/* -------------------------------------------------------------------------- */
/* Fixed control parameters */
/* -------------------------------------------------------------------------- */
/* maximum number of columns to consider at one time, in a single front */
#define MAX_CANDIDATES 128
/* reduce Numeric->Memory request by this ratio, if allocation fails */
#define UMF_REALLOC_REDUCTION (0.95)
/* increase Numeric->Memory request by this ratio, if we need more */
#define UMF_REALLOC_INCREASE (1.2)
/* increase the dimensions of the current frontal matrix by this factor
* when it needs to grow. */
#define UMF_FRONTAL_GROWTH (1.2)
/* largest BLAS block size permitted */
#define MAXNB 64
/* if abs (y) < RECIPROCAL_TOLERANCE, then compute x/y. Otherwise x*(1/y).
* Ignored if NRECIPROCAL is defined */
#define RECIPROCAL_TOLERANCE 1e-12
/* -------------------------------------------------------------------------- */
/* Memory allocator */
/* -------------------------------------------------------------------------- */
/* see SuiteSparse_config */
/* -------------------------------------------------------------------------- */
/* Memory space definitions */
/* -------------------------------------------------------------------------- */
/* for memory alignment - assume double has worst case alignment */
typedef double Align ;
/* get number of bytes required to hold n items of a type: */
/* note that this will not overflow, because sizeof (type) is always */
/* greater than or equal to sizeof (Int) >= 2 */
#define BYTES(type,n) (sizeof (type) * (n))
/* ceiling of (b/u). Assumes b >= 0 and u > 0 */
#define CEILING(b,u) (((b) + (u) - 1) / (u))
/* get number of Units required to hold n items of a type: */
#define UNITS(type,n) (CEILING (BYTES (type, n), sizeof (Unit)))
/* same as DUNITS, but use double instead of int to avoid overflow */
#define DUNITS(type,n) (ceil (BYTES (type, (double) n) / sizeof (Unit)))
union Unit_union
{ /* memory is allocated in multiples of Unit */
struct
{
Int
size, /* size, in Units, of the block, excl. header block */
/* size >= 0: block is in use */
/* size < 0: block is free, of |size| Units */
prevsize ; /* size, in Units, of preceding block in S->Memory */
/* during garbage_collection, prevsize is set to -e-1 */
/* for element e, or positive (and thus a free block) */
/* otherwise */
} header ; /* block header */
Align xxxxxx ; /* force alignment of blocks (xxxxxx is never used) */
} ;
typedef union Unit_union Unit ;
/* get the size of an allocated block */
#define GET_BLOCK_SIZE(p) (((p)-1)->header.size)
/* -------------------------------------------------------------------------- */
/* Numeric */
/* -------------------------------------------------------------------------- */
/*
NUMERIC_VALID and SYMBOLIC_VALID:
The different values of SYBOLIC_VALID and NUMERIC_VALID are chosen as a
first defense against corrupted *Symbolic or *Numeric pointers passed to an
UMFPACK routine. They also ensure that the objects are used only by the
same version that created them (umfpack_di_*, umfpack_dl_*, umfpack_zi_*,
or umfpack_zl_*). The values have also been changed since prior releases of
the code to ensure that all routines that operate on the objects are of the
same release. The values themselves are purely arbitrary. The are less
than the ANSI C required minimums of INT_MAX and LONG_MAX, respectively.
*/
#ifdef DINT
#define NUMERIC_VALID 15977
#define SYMBOLIC_VALID 41937
#endif
#ifdef DLONG
#define NUMERIC_VALID 399789720
#define SYMBOLIC_VALID 399192713
#endif
#ifdef ZINT
#define NUMERIC_VALID 17957
#define SYMBOLIC_VALID 40927
#endif
#ifdef ZLONG
#define NUMERIC_VALID 129987754
#define SYMBOLIC_VALID 110291734
#endif
typedef struct /* NumericType */
{
double
flops, /* "true" flop count */
relpt, /* relative pivot tolerance used */
relpt2, /* relative pivot tolerance used for sym. */
droptol,
alloc_init, /* initial allocation of Numeric->memory */
front_alloc_init, /* frontal matrix allocation parameter */
rsmin, /* smallest row sum */
rsmax, /* largest row sum */
min_udiag, /* smallest abs value on diagonal of D */
max_udiag, /* smallest abs value on diagonal of D */
rcond ; /* min (D) / max (D) */
Int
scale ;
Int valid ; /* set to NUMERIC_VALID, for validity check */
/* Memory space for A and LU factors */
Unit
*Memory ; /* working memory for A and LU factors */
Int
ihead, /* pointer to tail of LU factors, in Numeric->Memory */
itail, /* pointer to top of elements & tuples, */
/* in Numeric->Memory */
ibig, /* pointer to largest free block seen in tail */
size ; /* size of Memory, in Units */
Int
*Rperm, /* pointer to row perm array, size: n+1 */
/* after UMF_kernel: Rperm [new] = old */
/* during UMF_kernel: Rperm [old] = new */
*Cperm, /* pointer to col perm array, size: n+1 */
/* after UMF_kernel: Cperm [new] = old */
/* during UMF_kernel: Cperm [old] = new */
*Upos, /* see UMFPACK_get_numeric for a description */
*Lpos,
*Lip,
*Lilen,
*Uip,
*Uilen,
*Upattern ; /* pattern of last row of U (if singular) */
Int
ulen, /* length of Upattern */
npiv, /* number of structural pivots found (sprank approx) */
nnzpiv ; /* number of numerical (nonzero) pivots found */
Entry
*D ; /* D [i] is the diagonal entry of U */
Int do_recip ;
double *Rs ; /* scale factors for the rows of A and b */
/* do_recip FALSE: Divide row i by Rs [i] */
/* do_recip TRUE: Multiply row i by Rs [i] */
Int
n_row, n_col, /* A is n_row-by-n_row */
n1 ; /* number of singletons */
/* for information only: */
Int
tail_usage, /* amount of memory allocated in tail */
/* head_usage is Numeric->ihead */
init_usage, /* memory usage just after UMF_kernel_init */
max_usage, /* peak memory usage (excludes internal and external */
/* fragmentation in the tail) */
ngarbage, /* number of garbage collections performed */
nrealloc, /* number of reallocations performed */
ncostly, /* number of costly reallocations performed */
isize, /* size of integer pattern of L and U */
nLentries, /* number of entries in L, excluding diagonal */
nUentries, /* number of entries in U, including diagonal */
/* Some entries may be numerically zero. */
lnz, /* number of nonzero entries in L, excl. diagonal */
all_lnz, /* lnz plus entries dropped from L */
unz, /* number of nonzero entries in U, excl. diagonal */
all_unz, /* unz plus entries dropped form U */
maxfrsize ; /* largest actual front size */
Int maxnrows, maxncols ; /* not the same as Symbolic->maxnrows/cols* */
} NumericType ;
/* -------------------------------------------------------------------------- */
/* Element tuples for connecting elements together in a matrix */
/* -------------------------------------------------------------------------- */
typedef struct /* Tuple */
{
/* The (e,f) tuples for the element lists */
Int
e, /* element */
f ; /* contribution to the row/col appears at this offset */
} Tuple ;
#define TUPLES(t) MAX (4, (t) + 1)
/* Col_degree is aliased with Cperm, and Row_degree with Rperm */
#define NON_PIVOTAL_COL(col) (Col_degree [col] >= 0)
#define NON_PIVOTAL_ROW(row) (Row_degree [row] >= 0)
/* -------------------------------------------------------------------------- */
/* An element */
/* -------------------------------------------------------------------------- */
typedef struct /* Element */
{
Int
cdeg, /* external column degree + cdeg0 offset */
rdeg, /* external row degree + rdeg0 offset */
nrowsleft, /* number of rows remaining */
ncolsleft, /* number of columns remaining */
nrows, /* number of rows */
ncols, /* number of columns */
next ; /* for list link of sons, used during assembly only */
/* followed in memory by:
Int
col [0..ncols-1], column indices of this element
row [0..nrows-1] ; row indices of this element
Entry (suitably aligned, see macro below)
C [0...nrows-1, 0...ncols-1] ;
size of C is nrows*ncols Entry's
*/
} Element ;
/* macros for computing pointers to row/col indices, and contribution block: */
#define GET_ELEMENT_SIZE(nr,nc) \
(UNITS (Element, 1) + UNITS (Int, (nc) + (nr)) + UNITS (Entry, (nc) * (nr)))
#define DGET_ELEMENT_SIZE(nr,nc) \
(DUNITS (Element, 1) + DUNITS (Int, (nc) + (nr)) + DUNITS (Entry, (nc) * (nr)))
#define GET_ELEMENT_COLS(ep,p,Cols) { \
ASSERT (p != (Unit *) NULL) ; \
ASSERT (p >= Numeric->Memory + Numeric->itail) ; \
ASSERT (p <= Numeric->Memory + Numeric->size) ; \
ep = (Element *) p ; \
p += UNITS (Element, 1) ; \
Cols = (Int *) p ; \
}
#define GET_ELEMENT_PATTERN(ep,p,Cols,Rows,ncm) { \
GET_ELEMENT_COLS (ep, p, Cols) ; \
ncm = ep->ncols ; \
Rows = Cols + ncm ; \
}
#define GET_ELEMENT(ep,p,Cols,Rows,ncm,nrm,C) { \
GET_ELEMENT_PATTERN (ep, p, Cols, Rows, ncm) ; \
nrm = ep->nrows ; \
p += UNITS (Int, ncm + nrm) ; \
C = (Entry *) p ; \
}
/* -------------------------------------------------------------------------- */
/* Work data structure */
/* -------------------------------------------------------------------------- */
/*
This data structure holds items needed only during factorization.
All of this is freed when UMFPACK_numeric completes. Note that some of
it is stored in the tail end of Numeric->S (namely, the Tuples and the
Elements).
*/
typedef struct /* WorkType */
{
/* ---------------------------------------------------------------------- */
/* information about each row and col of A */
/* ---------------------------------------------------------------------- */
/*
Row_tuples: pointer to tuple list (alias with Numeric->Uip)
Row_tlen: number of tuples (alias with Numeric->Uilen)
Col_tuples: pointer to tuple list (alias with Numeric->Lip)
Col_tlen: number of tuples (alias with Numeric->Lilen)
Row_degree: degree of the row or column (alias Numeric->Rperm)
Col_degree: degree of the row or column (alias Numeric->Cperm)
The Row_degree and Col_degree are MATLAB-style colmmd approximations,
are equal to the sum of the sizes of the elements (contribution blocks)
in each row and column. They are maintained when elements are created
and assembled. They are used only during the pivot row and column
search. They are not needed to represent the pattern of the remaining
matrix.
*/
/* ---------------------------------------------------------------------- */
/* information about each element */
/* ---------------------------------------------------------------------- */
Int *E ; /* E [0 .. Work->elen-1] element "pointers" */
/* (offsets in Numeric->Memory) */
/* ---------------------------------------------------------------------- */
/* generic workspace */
/* ---------------------------------------------------------------------- */
Entry *Wx, *Wy ; /* each of size maxnrows+1 */
Int /* Sizes: nn = MAX (n_row, n_col) */
*Wp, /* nn+1 */
*Wrp, /* n_col+1 */
*Wm, /* maxnrows+1 */
*Wio, /* maxncols+1 */
*Woi, /* maxncols+1 */
*Woo, /* MAX (maxnrows,maxncols)+1 */
*Wrow, /* pointer to Fcols, Wio, or Woi */
*NewRows, /* list of rows to scan */
*NewCols ; /* list of cols to scan */
/* ---------------------------------------------------------------------- */
Int
*Lpattern, /* pattern of column of L, for one Lchain */
*Upattern, /* pattern of row of U, for one Uchain */
ulen, llen ; /* length of Upattern and Lpattern */
Int
*Diagonal_map, /* used for symmetric pivoting, of size nn+1 */
*Diagonal_imap ;/* used for symmetric pivoting, of size nn+1 */
/* ---------------------------------------------------------------------- */
Int
n_row, n_col, /* matrix is n_row-by-n_col */
nz, /* nonzeros in the elements for this matrix */
n1, /* number of row and col singletons */
elen, /* max possible number of elements */
npiv, /* number of pivot rows and columns so far */
ndiscard, /* number of discarded pivot columns */
Wrpflag,
nel, /* elements in use are in the range 1..nel */
noff_diagonal,
prior_element,
rdeg0, cdeg0,
rrdeg, ccdeg,
Candidates [MAX_CANDIDATES], /* current candidate pivot columns */
nCandidates, /* number of candidates in Candidate set */
ksuper,
firstsuper,
jsuper,
ncand, /* number of candidates (some not in Candidates[ ]) */
nextcand, /* next candidate to place in Candidate search set */
lo,
hi,
pivrow, /* current pivot row */
pivcol, /* current pivot column */
do_extend, /* true if the next pivot extends the current front */
do_update, /* true if update should be applied */
nforced, /* number of forced updates because of frontal growth */
any_skip,
do_scan2row,
do_scan2col,
do_grow,
pivot_case,
frontid, /* id of current frontal matrix */
nfr ; /* number of frontal matrices */
/* ---------------------------------------------------------------------- */
/* For row-merge tree */
/* ---------------------------------------------------------------------- */
Int
*Front_new1strow ;
/* ---------------------------------------------------------------------- */
/* current frontal matrix, F */
/* ---------------------------------------------------------------------- */
Int Pivrow [MAXNB],
Pivcol [MAXNB] ;
Entry
*Flublock, /* LU block, nb-by-nb */
*Flblock, /* L block, fnr_curr-by-nb */
*Fublock, /* U block, nb-by-fnc_curr, or U' fnc_curr-by-nb */
*Fcblock ; /* C block, fnr_curr-by-fnc_curr */
Int
*Frows, /* Frows [0.. ]: row indices of F */
*Fcols, /* Fcols [0.. ]: column indices of F */
*Frpos, /* position of row indices in F, or -1 if not present */
/* if Frows[i] == row, then Frpos[row] == i */
*Fcpos, /* position of col indices in F, or -1 if not present */
/* if Fcols[j] == col, then */
/* Fcpos[col] == j*Work->fnr_curr */
fnrows, /* number of rows in contribution block in F */
fncols, /* number of columns in contribution block in F */
fnr_curr, /* maximum # of rows in F (leading dimension) */
fnc_curr, /* maximum # of columns in F */
fcurr_size, /* current size of F */
fnrows_max, /* max possible column-dimension (max # of rows) of F */
fncols_max, /* max possible row-dimension (max # of columns) of F */
nb,
fnpiv, /* number of pivots in F */
fnzeros, /* number of explicit zero entries in LU block */
fscan_row, /* where to start scanning rows of F in UMF_assemble */
fscan_col, /* where to start scanning cols of F in UMF_assemble */
fnrows_new, /* number of new row indices in F after pivot added */
fncols_new, /* number of new col indices in F after pivot added */
pivrow_in_front, /* true if current pivot row in Frows */
pivcol_in_front ; /* true if current pivot column in Fcols */
/* ----------------------------------------------------------------------
* Current frontal matrix
* ----------------------------------------------------------------------
* The current frontal matrix is held as a single block of memory allocated
* from the "tail" end of Numeric->Memory. It is subdivided into four
* parts: an LU block, an L block, a U block, and a C block.
*
* Let k = fnpiv, r = fnrows, and c = fncols for the following discussion.
* Let dr = fnr_curr and dc = fnc_curr. Note that r <= dr and c <= dc.
*
* The LU block is of dimension nb-by-nb. The first k-by-k part holds the
* "diagonal" part of the LU factors for these k pivot rows and columns.
* The k pivot row and column indices in this part are Pivrow [0..k-1] and
* Pivcol [0..k-1], respectively.
*
* The L block is of dimension dr-by-nb. It holds the k pivot columns,
* except for the leading k-by-k part in the LU block. Only the leading
* r-by-k part is in use.
*
* The U block is of dimension dc-by-nb. It holds the k pivot rows,
* except for the leading k-by-k part in the LU block. It is stored in
* row-oriented form. Only the leading c-by-k part is in use.
*
* The C block is of dimension dr-by-dc. It holds the current contribution
* block. Only the leading r-by-c part is in use. The column indices in
* the C block are Fcols [0..c-1], and the row indices are Frows [0..r-1].
*
* dr is always odd, to avoid bad cache behavior.
*/
} WorkType ;
/* -------------------------------------------------------------------------- */
/* Symbolic */
/* -------------------------------------------------------------------------- */
/*
This is is constructed by UMFPACK_symbolic, and is needed by UMFPACK_numeric
to factor the matrix.
*/
typedef struct /* SymbolicType */
{
double
num_mem_usage_est, /* estimated max Numeric->Memory size */
num_mem_size_est, /* estimated final Numeric->Memory size */
peak_sym_usage, /* peak Symbolic and SymbolicWork usage */
sym, /* symmetry of pattern */
dnum_mem_init_usage, /* min Numeric->Memory for UMF_kernel_init */
amd_lunz, /* nz in LU for AMD, with symmetric pivoting */
lunz_bound ; /* max nx in LU, for arbitrary row pivoting */
Int valid, /* set to SYMBOLIC_VALID, for validity check */
max_nchains,
nchains,
*Chain_start,
*Chain_maxrows,
*Chain_maxcols,
maxnrows, /* largest number of rows in any front */
maxncols, /* largest number of columns in any front */
*Front_npivcol, /* Front_npivcol [j] = size of jth supercolumn*/
*Front_1strow, /* first row in front j */
*Front_leftmostdesc, /* leftmost desc of front j */
*Front_parent, /* super-column elimination tree */
*Cperm_init, /* initial column ordering */
*Rperm_init, /* initial row ordering */
*Cdeg, *Rdeg,
*Esize,
dense_row_threshold,
n1, /* number of singletons */
nempty, /* MIN (nempty_row, nempty_col) */
*Diagonal_map, /* initial "diagonal" */
esize, /* size of Esize array */
nfr,
n_row, n_col, /* matrix A is n_row-by-n_col */
nz, /* nz of original matrix */
nb, /* block size for BLAS 3 */
num_mem_init_usage, /* min Numeric->Memory for UMF_kernel_init */
nempty_row, nempty_col,
strategy,
ordering,
fixQ,
prefer_diagonal,
nzaat,
nzdiag,
amd_dmax ;
} SymbolicType ;
/* -------------------------------------------------------------------------- */
/* for debugging only: */
/* -------------------------------------------------------------------------- */
#include "umf_dump.h"
/* -------------------------------------------------------------------------- */
/* for statement coverage testing only: */
/* -------------------------------------------------------------------------- */
#ifdef TESTING
/* for testing integer overflow: */
#ifdef TEST_FOR_INTEGER_OVERFLOW
#undef MAX_MARK
#define MAX_MARK(n) (3*(n))
#endif
/* for testing out-of-memory conditions: */
#define UMF_TCOV_TEST
#ifndef EXTERN
#define EXTERN extern
#endif
GLOBAL EXTERN int umf_fail, umf_fail_lo, umf_fail_hi ;
GLOBAL EXTERN int umf_realloc_fail, umf_realloc_lo, umf_realloc_hi ;
/* for testing malloc count: */
#define UMF_MALLOC_COUNT
#endif
#endif
| bsd-2-clause |
Alloyed/Play- | tools/McServTest/AppConfig.cpp | 568 | #include "AppConfig.h"
#include "PathUtils.h"
#define BASE_DATA_PATH (L"McServTest Data Files")
#define CONFIG_FILENAME (L"config.xml")
CAppConfig::CAppConfig()
: CConfig(BuildConfigPath())
{
}
CAppConfig::~CAppConfig()
{
}
Framework::CConfig::PathType CAppConfig::GetBasePath()
{
auto result = Framework::PathUtils::GetPersonalDataPath() / BASE_DATA_PATH;
return result;
}
Framework::CConfig::PathType CAppConfig::BuildConfigPath()
{
auto userPath(GetBasePath());
Framework::PathUtils::EnsurePathExists(userPath);
return userPath / CONFIG_FILENAME;
}
| bsd-2-clause |
lxp/sulong | tests/sulong/c/truffle-c/bitFields/testOverflow2.c | 103 | struct test {
unsigned int val : 3;
};
int main() {
struct test t;
t.val = 9;
return t.val;
}
| bsd-3-clause |
chromium/chromium | chrome/browser/resources/print_preview/ui/advanced_options_settings.ts | 1620 | // Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
import 'chrome://resources/cr_elements/cr_button/cr_button.m.js';
import './advanced_settings_dialog.js';
import './print_preview_shared_css.js';
import './settings_section.js';
import {CrButtonElement} from 'chrome://resources/cr_elements/cr_button/cr_button.m.js';
import {PolymerElement} from 'chrome://resources/polymer/v3_0/polymer/polymer_bundled.min.js';
import {Destination} from '../data/destination.js';
import {Settings} from '../data/model.js';
import {getTemplate} from './advanced_options_settings.html.js';
interface PrintPreviewAdvancedOptionsSettingsElement {
$: {
button: CrButtonElement,
}
}
class PrintPreviewAdvancedOptionsSettingsElement extends PolymerElement {
static get is() {
return 'print-preview-advanced-options-settings';
}
static get template() {
return getTemplate();
}
static get properties() {
return {
disabled: Boolean,
destination: Object,
settings: Object,
showAdvancedDialog_: {
type: Boolean,
value: false,
},
};
}
disabled: boolean;
destination: Destination;
settings: Settings;
private showAdvancedDialog_: boolean;
private onButtonClick_() {
this.showAdvancedDialog_ = true;
}
private onDialogClose_() {
this.showAdvancedDialog_ = false;
this.$.button.focus();
}
}
customElements.define(
PrintPreviewAdvancedOptionsSettingsElement.is,
PrintPreviewAdvancedOptionsSettingsElement);
| bsd-3-clause |
faclib/ezcomponents | Webdav/tests/client_ie6_auth_test.php | 748 | <?php
/**
* Client test for InternetExplorer 6 (auth).
*
* @package Webdav
* @subpackage Tests
* @version 1.1.4
* @copyright Copyright (C) 2005-2010 eZ Systems AS. All rights reserved.
* @license http://ez.no/licenses/new_bsd New BSD License
*/
require_once 'client_test_suite.php';
require_once 'client_test_continuous_ie_auth_setup.php';
/**
* Client test for InternetExplorer 6 (auth).
*
* @package Webdav
* @subpackage Tests
*/
class ezcWebdavIe6AuthClientTest extends ezcTestCase
{
public static function suite()
{
return new ezcWebdavClientTestSuite(
'InternetExplorer 6 (auth)',
'clients/ie6_auth.php',
new ezcWebdavClientTestContinuousIeAuthSetup()
);
}
}
?>
| bsd-3-clause |
scaryml1000/ZendSkeleton | vendor/squizlabs/php_codesniffer/CodeSniffer/Standards/PSR1/Sniffs/Methods/CamelCapsMethodNameSniff.php | 2984 | <?php
/**
* PSR1_Sniffs_Methods_CamelCapsMethodNameSniff.
*
* PHP version 5
*
* @category PHP
* @package PHP_CodeSniffer
* @author Greg Sherwood <[email protected]>
* @copyright 2006-2012 Squiz Pty Ltd (ABN 77 084 670 600)
* @license https://github.com/squizlabs/PHP_CodeSniffer/blob/master/licence.txt BSD Licence
* @link http://pear.php.net/package/PHP_CodeSniffer
*/
if (class_exists('PHP_CodeSniffer_Standards_AbstractScopeSniff', true) === false) {
throw new PHP_CodeSniffer_Exception('Class PHP_CodeSniffer_Standards_AbstractScopeSniff not found');
}
/**
* PSR1_Sniffs_Methods_CamelCapsMethodNameSniff.
*
* Ensures method names are defined using camel case.
*
* @category PHP
* @package PHP_CodeSniffer
* @author Greg Sherwood <[email protected]>
* @copyright 2006-2012 Squiz Pty Ltd (ABN 77 084 670 600)
* @license https://github.com/squizlabs/PHP_CodeSniffer/blob/master/licence.txt BSD Licence
* @version Release: @package_version@
* @link http://pear.php.net/package/PHP_CodeSniffer
*/
class PSR1_Sniffs_Methods_CamelCapsMethodNameSniff extends PHP_CodeSniffer_Standards_AbstractScopeSniff
{
/**
* Constructs a PSR1_Sniffs_Methods_CamelCapsMethodNameSniff.
*/
public function __construct()
{
parent::__construct(array(T_CLASS, T_INTERFACE, T_TRAIT), array(T_FUNCTION), true);
}//end __construct()
/**
* Processes the tokens within the scope.
*
* @param PHP_CodeSniffer_File $phpcsFile The file being processed.
* @param int $stackPtr The position where this token was
* found.
* @param int $currScope The position of the current scope.
*
* @return void
*/
protected function processTokenWithinScope(PHP_CodeSniffer_File $phpcsFile, $stackPtr, $currScope)
{
$methodName = $phpcsFile->getDeclarationName($stackPtr);
if ($methodName === null) {
// Ignore closures.
return;
}
$testName = ltrim($methodName, '_');
if (PHP_CodeSniffer::isCamelCaps($testName, false, true, false) === false) {
$error = 'Method name "%s" is not in camel caps format';
$className = $phpcsFile->getDeclarationName($currScope);
$errorData = array($className.'::'.$methodName);
$phpcsFile->addError($error, $stackPtr, 'NotCamelCaps', $errorData);
}
}//end processTokenWithinScope()
/**
* Processes the tokens outside the scope.
*
* @param PHP_CodeSniffer_File $phpcsFile The file being processed.
* @param int $stackPtr The position where this token was
* found.
*
* @return void
*/
protected function processTokenOutsideScope(PHP_CodeSniffer_File $phpcsFile, $stackPtr)
{
}//end processTokenOutsideScope()
}//end class
?>
| bsd-3-clause |
SaschaMester/delicium | net/base/net_error_list.h | 28541 | // Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file intentionally does not have header guards, it's included
// inside a macro to generate enum values.
// This file contains the list of network errors.
//
// Ranges:
// 0- 99 System related errors
// 100-199 Connection related errors
// 200-299 Certificate errors
// 300-399 HTTP errors
// 400-499 Cache errors
// 500-599 ?
// 600-699 FTP errors
// 700-799 Certificate manager errors
// 800-899 DNS resolver errors
// An asynchronous IO operation is not yet complete. This usually does not
// indicate a fatal error. Typically this error will be generated as a
// notification to wait for some external notification that the IO operation
// finally completed.
NET_ERROR(IO_PENDING, -1)
// A generic failure occurred.
NET_ERROR(FAILED, -2)
// An operation was aborted (due to user action).
NET_ERROR(ABORTED, -3)
// An argument to the function is incorrect.
NET_ERROR(INVALID_ARGUMENT, -4)
// The handle or file descriptor is invalid.
NET_ERROR(INVALID_HANDLE, -5)
// The file or directory cannot be found.
NET_ERROR(FILE_NOT_FOUND, -6)
// An operation timed out.
NET_ERROR(TIMED_OUT, -7)
// The file is too large.
NET_ERROR(FILE_TOO_BIG, -8)
// An unexpected error. This may be caused by a programming mistake or an
// invalid assumption.
NET_ERROR(UNEXPECTED, -9)
// Permission to access a resource, other than the network, was denied.
NET_ERROR(ACCESS_DENIED, -10)
// The operation failed because of unimplemented functionality.
NET_ERROR(NOT_IMPLEMENTED, -11)
// There were not enough resources to complete the operation.
NET_ERROR(INSUFFICIENT_RESOURCES, -12)
// Memory allocation failed.
NET_ERROR(OUT_OF_MEMORY, -13)
// The file upload failed because the file's modification time was different
// from the expectation.
NET_ERROR(UPLOAD_FILE_CHANGED, -14)
// The socket is not connected.
NET_ERROR(SOCKET_NOT_CONNECTED, -15)
// The file already exists.
NET_ERROR(FILE_EXISTS, -16)
// The path or file name is too long.
NET_ERROR(FILE_PATH_TOO_LONG, -17)
// Not enough room left on the disk.
NET_ERROR(FILE_NO_SPACE, -18)
// The file has a virus.
NET_ERROR(FILE_VIRUS_INFECTED, -19)
// The client chose to block the request.
NET_ERROR(BLOCKED_BY_CLIENT, -20)
// The network changed.
NET_ERROR(NETWORK_CHANGED, -21)
// The request was blocked by the URL blacklist configured by the domain
// administrator.
NET_ERROR(BLOCKED_BY_ADMINISTRATOR, -22)
// The socket is already connected.
NET_ERROR(SOCKET_IS_CONNECTED, -23)
// The request was blocked because the forced reenrollment check is still
// pending. This error can only occur on ChromeOS.
// The error can be emitted by code in chrome/browser/policy/policy_helpers.cc.
NET_ERROR(BLOCKED_ENROLLMENT_CHECK_PENDING, -24)
// The upload failed because the upload stream needed to be re-read, due to a
// retry or a redirect, but the upload stream doesn't support that operation.
NET_ERROR(UPLOAD_STREAM_REWIND_NOT_SUPPORTED, -25)
// The request failed because the URLRequestContext is shutting down, or has
// been shut down.
NET_ERROR(CONTEXT_SHUT_DOWN, -26)
// A connection was closed (corresponding to a TCP FIN).
NET_ERROR(CONNECTION_CLOSED, -100)
// A connection was reset (corresponding to a TCP RST).
NET_ERROR(CONNECTION_RESET, -101)
// A connection attempt was refused.
NET_ERROR(CONNECTION_REFUSED, -102)
// A connection timed out as a result of not receiving an ACK for data sent.
// This can include a FIN packet that did not get ACK'd.
NET_ERROR(CONNECTION_ABORTED, -103)
// A connection attempt failed.
NET_ERROR(CONNECTION_FAILED, -104)
// The host name could not be resolved.
NET_ERROR(NAME_NOT_RESOLVED, -105)
// The Internet connection has been lost.
NET_ERROR(INTERNET_DISCONNECTED, -106)
// An SSL protocol error occurred.
NET_ERROR(SSL_PROTOCOL_ERROR, -107)
// The IP address or port number is invalid (e.g., cannot connect to the IP
// address 0 or the port 0).
NET_ERROR(ADDRESS_INVALID, -108)
// The IP address is unreachable. This usually means that there is no route to
// the specified host or network.
NET_ERROR(ADDRESS_UNREACHABLE, -109)
// The server requested a client certificate for SSL client authentication.
NET_ERROR(SSL_CLIENT_AUTH_CERT_NEEDED, -110)
// A tunnel connection through the proxy could not be established.
NET_ERROR(TUNNEL_CONNECTION_FAILED, -111)
// No SSL protocol versions are enabled.
NET_ERROR(NO_SSL_VERSIONS_ENABLED, -112)
// The client and server don't support a common SSL protocol version or
// cipher suite.
NET_ERROR(SSL_VERSION_OR_CIPHER_MISMATCH, -113)
// The server requested a renegotiation (rehandshake).
NET_ERROR(SSL_RENEGOTIATION_REQUESTED, -114)
// The proxy requested authentication (for tunnel establishment) with an
// unsupported method.
NET_ERROR(PROXY_AUTH_UNSUPPORTED, -115)
// During SSL renegotiation (rehandshake), the server sent a certificate with
// an error.
//
// Note: this error is not in the -2xx range so that it won't be handled as a
// certificate error.
NET_ERROR(CERT_ERROR_IN_SSL_RENEGOTIATION, -116)
// The SSL handshake failed because of a bad or missing client certificate.
NET_ERROR(BAD_SSL_CLIENT_AUTH_CERT, -117)
// A connection attempt timed out.
NET_ERROR(CONNECTION_TIMED_OUT, -118)
// There are too many pending DNS resolves, so a request in the queue was
// aborted.
NET_ERROR(HOST_RESOLVER_QUEUE_TOO_LARGE, -119)
// Failed establishing a connection to the SOCKS proxy server for a target host.
NET_ERROR(SOCKS_CONNECTION_FAILED, -120)
// The SOCKS proxy server failed establishing connection to the target host
// because that host is unreachable.
NET_ERROR(SOCKS_CONNECTION_HOST_UNREACHABLE, -121)
// The request to negotiate an alternate protocol failed.
NET_ERROR(NPN_NEGOTIATION_FAILED, -122)
// The peer sent an SSL no_renegotiation alert message.
NET_ERROR(SSL_NO_RENEGOTIATION, -123)
// Winsock sometimes reports more data written than passed. This is probably
// due to a broken LSP.
NET_ERROR(WINSOCK_UNEXPECTED_WRITTEN_BYTES, -124)
// An SSL peer sent us a fatal decompression_failure alert. This typically
// occurs when a peer selects DEFLATE compression in the mistaken belief that
// it supports it.
NET_ERROR(SSL_DECOMPRESSION_FAILURE_ALERT, -125)
// An SSL peer sent us a fatal bad_record_mac alert. This has been observed
// from servers with buggy DEFLATE support.
NET_ERROR(SSL_BAD_RECORD_MAC_ALERT, -126)
// The proxy requested authentication (for tunnel establishment).
NET_ERROR(PROXY_AUTH_REQUESTED, -127)
// The SSL server attempted to use a weak ephemeral Diffie-Hellman key.
NET_ERROR(SSL_WEAK_SERVER_EPHEMERAL_DH_KEY, -129)
// Could not create a connection to the proxy server. An error occurred
// either in resolving its name, or in connecting a socket to it.
// Note that this does NOT include failures during the actual "CONNECT" method
// of an HTTP proxy.
NET_ERROR(PROXY_CONNECTION_FAILED, -130)
// A mandatory proxy configuration could not be used. Currently this means
// that a mandatory PAC script could not be fetched, parsed or executed.
NET_ERROR(MANDATORY_PROXY_CONFIGURATION_FAILED, -131)
// -132 was formerly ERR_ESET_ANTI_VIRUS_SSL_INTERCEPTION
// We've hit the max socket limit for the socket pool while preconnecting. We
// don't bother trying to preconnect more sockets.
NET_ERROR(PRECONNECT_MAX_SOCKET_LIMIT, -133)
// The permission to use the SSL client certificate's private key was denied.
NET_ERROR(SSL_CLIENT_AUTH_PRIVATE_KEY_ACCESS_DENIED, -134)
// The SSL client certificate has no private key.
NET_ERROR(SSL_CLIENT_AUTH_CERT_NO_PRIVATE_KEY, -135)
// The certificate presented by the HTTPS Proxy was invalid.
NET_ERROR(PROXY_CERTIFICATE_INVALID, -136)
// An error occurred when trying to do a name resolution (DNS).
NET_ERROR(NAME_RESOLUTION_FAILED, -137)
// Permission to access the network was denied. This is used to distinguish
// errors that were most likely caused by a firewall from other access denied
// errors. See also ERR_ACCESS_DENIED.
NET_ERROR(NETWORK_ACCESS_DENIED, -138)
// The request throttler module cancelled this request to avoid DDOS.
NET_ERROR(TEMPORARILY_THROTTLED, -139)
// A request to create an SSL tunnel connection through the HTTPS proxy
// received a non-200 (OK) and non-407 (Proxy Auth) response. The response
// body might include a description of why the request failed.
NET_ERROR(HTTPS_PROXY_TUNNEL_RESPONSE, -140)
// We were unable to sign the CertificateVerify data of an SSL client auth
// handshake with the client certificate's private key.
//
// Possible causes for this include the user implicitly or explicitly
// denying access to the private key, the private key may not be valid for
// signing, the key may be relying on a cached handle which is no longer
// valid, or the CSP won't allow arbitrary data to be signed.
NET_ERROR(SSL_CLIENT_AUTH_SIGNATURE_FAILED, -141)
// The message was too large for the transport. (for example a UDP message
// which exceeds size threshold).
NET_ERROR(MSG_TOO_BIG, -142)
// A SPDY session already exists, and should be used instead of this connection.
NET_ERROR(SPDY_SESSION_ALREADY_EXISTS, -143)
// Error -144 was removed (LIMIT_VIOLATION).
// Websocket protocol error. Indicates that we are terminating the connection
// due to a malformed frame or other protocol violation.
NET_ERROR(WS_PROTOCOL_ERROR, -145)
// Error -146 was removed (PROTOCOL_SWITCHED)
// Returned when attempting to bind an address that is already in use.
NET_ERROR(ADDRESS_IN_USE, -147)
// An operation failed because the SSL handshake has not completed.
NET_ERROR(SSL_HANDSHAKE_NOT_COMPLETED, -148)
// SSL peer's public key is invalid.
NET_ERROR(SSL_BAD_PEER_PUBLIC_KEY, -149)
// The certificate didn't match the built-in public key pins for the host name.
// The pins are set in net/http/transport_security_state.cc and require that
// one of a set of public keys exist on the path from the leaf to the root.
NET_ERROR(SSL_PINNED_KEY_NOT_IN_CERT_CHAIN, -150)
// Server request for client certificate did not contain any types we support.
NET_ERROR(CLIENT_AUTH_CERT_TYPE_UNSUPPORTED, -151)
// Server requested one type of cert, then requested a different type while the
// first was still being generated.
NET_ERROR(ORIGIN_BOUND_CERT_GENERATION_TYPE_MISMATCH, -152)
// An SSL peer sent us a fatal decrypt_error alert. This typically occurs when
// a peer could not correctly verify a signature (in CertificateVerify or
// ServerKeyExchange) or validate a Finished message.
NET_ERROR(SSL_DECRYPT_ERROR_ALERT, -153)
// There are too many pending WebSocketJob instances, so the new job was not
// pushed to the queue.
NET_ERROR(WS_THROTTLE_QUEUE_TOO_LARGE, -154)
// Error -155 was removed (TOO_MANY_SOCKET_STREAMS)
// The SSL server certificate changed in a renegotiation.
NET_ERROR(SSL_SERVER_CERT_CHANGED, -156)
// The SSL server indicated that an unnecessary TLS version fallback was
// performed.
NET_ERROR(SSL_INAPPROPRIATE_FALLBACK, -157)
// Certificate Transparency: All Signed Certificate Timestamps failed to verify.
NET_ERROR(CT_NO_SCTS_VERIFIED_OK, -158)
// The SSL server sent us a fatal unrecognized_name alert.
NET_ERROR(SSL_UNRECOGNIZED_NAME_ALERT, -159)
// Failed to set the socket's receive buffer size as requested.
NET_ERROR(SOCKET_SET_RECEIVE_BUFFER_SIZE_ERROR, -160)
// Failed to set the socket's send buffer size as requested.
NET_ERROR(SOCKET_SET_SEND_BUFFER_SIZE_ERROR, -161)
// Failed to set the socket's receive buffer size as requested, despite success
// return code from setsockopt.
NET_ERROR(SOCKET_RECEIVE_BUFFER_SIZE_UNCHANGEABLE, -162)
// Failed to set the socket's send buffer size as requested, despite success
// return code from setsockopt.
NET_ERROR(SOCKET_SEND_BUFFER_SIZE_UNCHANGEABLE, -163)
// Failed to import a client certificate from the platform store into the SSL
// library.
NET_ERROR(SSL_CLIENT_AUTH_CERT_BAD_FORMAT, -164)
// The SSL server requires falling back to a version older than the configured
// minimum fallback version, and thus fallback failed.
NET_ERROR(SSL_FALLBACK_BEYOND_MINIMUM_VERSION, -165)
// Resolving a hostname to an IP address list included the IPv4 address
// "127.0.53.53". This is a special IP address which ICANN has recommended to
// indicate there was a name collision, and alert admins to a potential
// problem.
NET_ERROR(ICANN_NAME_COLLISION, -166)
// Certificate error codes
//
// The values of certificate error codes must be consecutive.
// The server responded with a certificate whose common name did not match
// the host name. This could mean:
//
// 1. An attacker has redirected our traffic to their server and is
// presenting a certificate for which they know the private key.
//
// 2. The server is misconfigured and responding with the wrong cert.
//
// 3. The user is on a wireless network and is being redirected to the
// network's login page.
//
// 4. The OS has used a DNS search suffix and the server doesn't have
// a certificate for the abbreviated name in the address bar.
//
NET_ERROR(CERT_COMMON_NAME_INVALID, -200)
// The server responded with a certificate that, by our clock, appears to
// either not yet be valid or to have expired. This could mean:
//
// 1. An attacker is presenting an old certificate for which they have
// managed to obtain the private key.
//
// 2. The server is misconfigured and is not presenting a valid cert.
//
// 3. Our clock is wrong.
//
NET_ERROR(CERT_DATE_INVALID, -201)
// The server responded with a certificate that is signed by an authority
// we don't trust. The could mean:
//
// 1. An attacker has substituted the real certificate for a cert that
// contains their public key and is signed by their cousin.
//
// 2. The server operator has a legitimate certificate from a CA we don't
// know about, but should trust.
//
// 3. The server is presenting a self-signed certificate, providing no
// defense against active attackers (but foiling passive attackers).
//
NET_ERROR(CERT_AUTHORITY_INVALID, -202)
// The server responded with a certificate that contains errors.
// This error is not recoverable.
//
// MSDN describes this error as follows:
// "The SSL certificate contains errors."
// NOTE: It's unclear how this differs from ERR_CERT_INVALID. For consistency,
// use that code instead of this one from now on.
//
NET_ERROR(CERT_CONTAINS_ERRORS, -203)
// The certificate has no mechanism for determining if it is revoked. In
// effect, this certificate cannot be revoked.
NET_ERROR(CERT_NO_REVOCATION_MECHANISM, -204)
// Revocation information for the security certificate for this site is not
// available. This could mean:
//
// 1. An attacker has compromised the private key in the certificate and is
// blocking our attempt to find out that the cert was revoked.
//
// 2. The certificate is unrevoked, but the revocation server is busy or
// unavailable.
//
NET_ERROR(CERT_UNABLE_TO_CHECK_REVOCATION, -205)
// The server responded with a certificate has been revoked.
// We have the capability to ignore this error, but it is probably not the
// thing to do.
NET_ERROR(CERT_REVOKED, -206)
// The server responded with a certificate that is invalid.
// This error is not recoverable.
//
// MSDN describes this error as follows:
// "The SSL certificate is invalid."
//
NET_ERROR(CERT_INVALID, -207)
// The server responded with a certificate that is signed using a weak
// signature algorithm.
NET_ERROR(CERT_WEAK_SIGNATURE_ALGORITHM, -208)
// -209 is availible: was CERT_NOT_IN_DNS.
// The host name specified in the certificate is not unique.
NET_ERROR(CERT_NON_UNIQUE_NAME, -210)
// The server responded with a certificate that contains a weak key (e.g.
// a too-small RSA key).
NET_ERROR(CERT_WEAK_KEY, -211)
// The certificate claimed DNS names that are in violation of name constraints.
NET_ERROR(CERT_NAME_CONSTRAINT_VIOLATION, -212)
// The certificate's validity period is too long.
NET_ERROR(CERT_VALIDITY_TOO_LONG, -213)
// Add new certificate error codes here.
//
// Update the value of CERT_END whenever you add a new certificate error
// code.
// The value immediately past the last certificate error code.
NET_ERROR(CERT_END, -214)
// The URL is invalid.
NET_ERROR(INVALID_URL, -300)
// The scheme of the URL is disallowed.
NET_ERROR(DISALLOWED_URL_SCHEME, -301)
// The scheme of the URL is unknown.
NET_ERROR(UNKNOWN_URL_SCHEME, -302)
// Attempting to load an URL resulted in too many redirects.
NET_ERROR(TOO_MANY_REDIRECTS, -310)
// Attempting to load an URL resulted in an unsafe redirect (e.g., a redirect
// to file:// is considered unsafe).
NET_ERROR(UNSAFE_REDIRECT, -311)
// Attempting to load an URL with an unsafe port number. These are port
// numbers that correspond to services, which are not robust to spurious input
// that may be constructed as a result of an allowed web construct (e.g., HTTP
// looks a lot like SMTP, so form submission to port 25 is denied).
NET_ERROR(UNSAFE_PORT, -312)
// The server's response was invalid.
NET_ERROR(INVALID_RESPONSE, -320)
// Error in chunked transfer encoding.
NET_ERROR(INVALID_CHUNKED_ENCODING, -321)
// The server did not support the request method.
NET_ERROR(METHOD_NOT_SUPPORTED, -322)
// The response was 407 (Proxy Authentication Required), yet we did not send
// the request to a proxy.
NET_ERROR(UNEXPECTED_PROXY_AUTH, -323)
// The server closed the connection without sending any data.
NET_ERROR(EMPTY_RESPONSE, -324)
// The headers section of the response is too large.
NET_ERROR(RESPONSE_HEADERS_TOO_BIG, -325)
// The PAC requested by HTTP did not have a valid status code (non-200).
NET_ERROR(PAC_STATUS_NOT_OK, -326)
// The evaluation of the PAC script failed.
NET_ERROR(PAC_SCRIPT_FAILED, -327)
// The response was 416 (Requested range not satisfiable) and the server cannot
// satisfy the range requested.
NET_ERROR(REQUEST_RANGE_NOT_SATISFIABLE, -328)
// The identity used for authentication is invalid.
NET_ERROR(MALFORMED_IDENTITY, -329)
// Content decoding of the response body failed.
NET_ERROR(CONTENT_DECODING_FAILED, -330)
// An operation could not be completed because all network IO
// is suspended.
NET_ERROR(NETWORK_IO_SUSPENDED, -331)
// FLIP data received without receiving a SYN_REPLY on the stream.
NET_ERROR(SYN_REPLY_NOT_RECEIVED, -332)
// Converting the response to target encoding failed.
NET_ERROR(ENCODING_CONVERSION_FAILED, -333)
// The server sent an FTP directory listing in a format we do not understand.
NET_ERROR(UNRECOGNIZED_FTP_DIRECTORY_LISTING_FORMAT, -334)
// Attempted use of an unknown SPDY stream id.
NET_ERROR(INVALID_SPDY_STREAM, -335)
// There are no supported proxies in the provided list.
NET_ERROR(NO_SUPPORTED_PROXIES, -336)
// There is a SPDY protocol error.
NET_ERROR(SPDY_PROTOCOL_ERROR, -337)
// Credentials could not be established during HTTP Authentication.
NET_ERROR(INVALID_AUTH_CREDENTIALS, -338)
// An HTTP Authentication scheme was tried which is not supported on this
// machine.
NET_ERROR(UNSUPPORTED_AUTH_SCHEME, -339)
// Detecting the encoding of the response failed.
NET_ERROR(ENCODING_DETECTION_FAILED, -340)
// (GSSAPI) No Kerberos credentials were available during HTTP Authentication.
NET_ERROR(MISSING_AUTH_CREDENTIALS, -341)
// An unexpected, but documented, SSPI or GSSAPI status code was returned.
NET_ERROR(UNEXPECTED_SECURITY_LIBRARY_STATUS, -342)
// The environment was not set up correctly for authentication (for
// example, no KDC could be found or the principal is unknown.
NET_ERROR(MISCONFIGURED_AUTH_ENVIRONMENT, -343)
// An undocumented SSPI or GSSAPI status code was returned.
NET_ERROR(UNDOCUMENTED_SECURITY_LIBRARY_STATUS, -344)
// The HTTP response was too big to drain.
NET_ERROR(RESPONSE_BODY_TOO_BIG_TO_DRAIN, -345)
// The HTTP response contained multiple distinct Content-Length headers.
NET_ERROR(RESPONSE_HEADERS_MULTIPLE_CONTENT_LENGTH, -346)
// SPDY Headers have been received, but not all of them - status or version
// headers are missing, so we're expecting additional frames to complete them.
NET_ERROR(INCOMPLETE_SPDY_HEADERS, -347)
// No PAC URL configuration could be retrieved from DHCP. This can indicate
// either a failure to retrieve the DHCP configuration, or that there was no
// PAC URL configured in DHCP.
NET_ERROR(PAC_NOT_IN_DHCP, -348)
// The HTTP response contained multiple Content-Disposition headers.
NET_ERROR(RESPONSE_HEADERS_MULTIPLE_CONTENT_DISPOSITION, -349)
// The HTTP response contained multiple Location headers.
NET_ERROR(RESPONSE_HEADERS_MULTIPLE_LOCATION, -350)
// SPDY server refused the stream. Client should retry. This should never be a
// user-visible error.
NET_ERROR(SPDY_SERVER_REFUSED_STREAM, -351)
// SPDY server didn't respond to the PING message.
NET_ERROR(SPDY_PING_FAILED, -352)
// Obsolete. Kept here to avoid reuse, as the old error can still appear on
// histograms.
// NET_ERROR(PIPELINE_EVICTION, -353)
// The HTTP response body transferred fewer bytes than were advertised by the
// Content-Length header when the connection is closed.
NET_ERROR(CONTENT_LENGTH_MISMATCH, -354)
// The HTTP response body is transferred with Chunked-Encoding, but the
// terminating zero-length chunk was never sent when the connection is closed.
NET_ERROR(INCOMPLETE_CHUNKED_ENCODING, -355)
// There is a QUIC protocol error.
NET_ERROR(QUIC_PROTOCOL_ERROR, -356)
// The HTTP headers were truncated by an EOF.
NET_ERROR(RESPONSE_HEADERS_TRUNCATED, -357)
// The QUIC crytpo handshake failed. This means that the server was unable
// to read any requests sent, so they may be resent.
NET_ERROR(QUIC_HANDSHAKE_FAILED, -358)
// An https resource was requested over an insecure QUIC connection.
NET_ERROR(REQUEST_FOR_SECURE_RESOURCE_OVER_INSECURE_QUIC, -359)
// Transport security is inadequate for the SPDY version.
NET_ERROR(SPDY_INADEQUATE_TRANSPORT_SECURITY, -360)
// The peer violated SPDY flow control.
NET_ERROR(SPDY_FLOW_CONTROL_ERROR, -361)
// The peer sent an improperly sized SPDY frame.
NET_ERROR(SPDY_FRAME_SIZE_ERROR, -362)
// Decoding or encoding of compressed SPDY headers failed.
NET_ERROR(SPDY_COMPRESSION_ERROR, -363)
// Proxy Auth Requested without a valid Client Socket Handle.
NET_ERROR(PROXY_AUTH_REQUESTED_WITH_NO_CONNECTION, -364)
// HTTP_1_1_REQUIRED error code received on HTTP/2 session.
NET_ERROR(HTTP_1_1_REQUIRED, -365)
// HTTP_1_1_REQUIRED error code received on HTTP/2 session to proxy.
NET_ERROR(PROXY_HTTP_1_1_REQUIRED, -366)
// The PAC script terminated fatally and must be reloaded.
NET_ERROR(PAC_SCRIPT_TERMINATED, -367)
// The certificate offered by the alternative server is not valid for the
// origin, a violation of HTTP Alternative Services specification Section 2.1,
// https://tools.ietf.org/id/draft-ietf-httpbis-alt-svc-06.html#host_auth.
NET_ERROR(ALTERNATIVE_CERT_NOT_VALID_FOR_ORIGIN, -368)
// The cache does not have the requested entry.
NET_ERROR(CACHE_MISS, -400)
// Unable to read from the disk cache.
NET_ERROR(CACHE_READ_FAILURE, -401)
// Unable to write to the disk cache.
NET_ERROR(CACHE_WRITE_FAILURE, -402)
// The operation is not supported for this entry.
NET_ERROR(CACHE_OPERATION_NOT_SUPPORTED, -403)
// The disk cache is unable to open this entry.
NET_ERROR(CACHE_OPEN_FAILURE, -404)
// The disk cache is unable to create this entry.
NET_ERROR(CACHE_CREATE_FAILURE, -405)
// Multiple transactions are racing to create disk cache entries. This is an
// internal error returned from the HttpCache to the HttpCacheTransaction that
// tells the transaction to restart the entry-creation logic because the state
// of the cache has changed.
NET_ERROR(CACHE_RACE, -406)
// The cache was unable to read a checksum record on an entry. This can be
// returned from attempts to read from the cache. It is an internal error,
// returned by the SimpleCache backend, but not by any URLRequest methods
// or members.
NET_ERROR(CACHE_CHECKSUM_READ_FAILURE, -407)
// The cache found an entry with an invalid checksum. This can be returned from
// attempts to read from the cache. It is an internal error, returned by the
// SimpleCache backend, but not by any URLRequest methods or members.
NET_ERROR(CACHE_CHECKSUM_MISMATCH, -408)
// Internal error code for the HTTP cache. The cache lock timeout has fired.
NET_ERROR(CACHE_LOCK_TIMEOUT, -409)
// Received a challenge after the transaction has read some data, and the
// credentials aren't available. There isn't a way to get them at that point.
NET_ERROR(CACHE_AUTH_FAILURE_AFTER_READ, -410)
// The server's response was insecure (e.g. there was a cert error).
NET_ERROR(INSECURE_RESPONSE, -501)
// The server responded to a <keygen> with a generated client cert that we
// don't have the matching private key for.
NET_ERROR(NO_PRIVATE_KEY_FOR_CERT, -502)
// An error adding to the OS certificate database (e.g. OS X Keychain).
NET_ERROR(ADD_USER_CERT_FAILED, -503)
// *** Code -600 is reserved (was FTP_PASV_COMMAND_FAILED). ***
// A generic error for failed FTP control connection command.
// If possible, please use or add a more specific error code.
NET_ERROR(FTP_FAILED, -601)
// The server cannot fulfill the request at this point. This is a temporary
// error.
// FTP response code 421.
NET_ERROR(FTP_SERVICE_UNAVAILABLE, -602)
// The server has aborted the transfer.
// FTP response code 426.
NET_ERROR(FTP_TRANSFER_ABORTED, -603)
// The file is busy, or some other temporary error condition on opening
// the file.
// FTP response code 450.
NET_ERROR(FTP_FILE_BUSY, -604)
// Server rejected our command because of syntax errors.
// FTP response codes 500, 501.
NET_ERROR(FTP_SYNTAX_ERROR, -605)
// Server does not support the command we issued.
// FTP response codes 502, 504.
NET_ERROR(FTP_COMMAND_NOT_SUPPORTED, -606)
// Server rejected our command because we didn't issue the commands in right
// order.
// FTP response code 503.
NET_ERROR(FTP_BAD_COMMAND_SEQUENCE, -607)
// PKCS #12 import failed due to incorrect password.
NET_ERROR(PKCS12_IMPORT_BAD_PASSWORD, -701)
// PKCS #12 import failed due to other error.
NET_ERROR(PKCS12_IMPORT_FAILED, -702)
// CA import failed - not a CA cert.
NET_ERROR(IMPORT_CA_CERT_NOT_CA, -703)
// Import failed - certificate already exists in database.
// Note it's a little weird this is an error but reimporting a PKCS12 is ok
// (no-op). That's how Mozilla does it, though.
NET_ERROR(IMPORT_CERT_ALREADY_EXISTS, -704)
// CA import failed due to some other error.
NET_ERROR(IMPORT_CA_CERT_FAILED, -705)
// Server certificate import failed due to some internal error.
NET_ERROR(IMPORT_SERVER_CERT_FAILED, -706)
// PKCS #12 import failed due to invalid MAC.
NET_ERROR(PKCS12_IMPORT_INVALID_MAC, -707)
// PKCS #12 import failed due to invalid/corrupt file.
NET_ERROR(PKCS12_IMPORT_INVALID_FILE, -708)
// PKCS #12 import failed due to unsupported features.
NET_ERROR(PKCS12_IMPORT_UNSUPPORTED, -709)
// Key generation failed.
NET_ERROR(KEY_GENERATION_FAILED, -710)
// Error -711 was removed (ORIGIN_BOUND_CERT_GENERATION_FAILED)
// Failure to export private key.
NET_ERROR(PRIVATE_KEY_EXPORT_FAILED, -712)
// Self-signed certificate generation failed.
NET_ERROR(SELF_SIGNED_CERT_GENERATION_FAILED, -713)
// The certificate database changed in some way.
NET_ERROR(CERT_DATABASE_CHANGED, -714)
// Failure to import Channel ID.
NET_ERROR(CHANNEL_ID_IMPORT_FAILED, -715)
// DNS error codes.
// DNS resolver received a malformed response.
NET_ERROR(DNS_MALFORMED_RESPONSE, -800)
// DNS server requires TCP
NET_ERROR(DNS_SERVER_REQUIRES_TCP, -801)
// DNS server failed. This error is returned for all of the following
// error conditions:
// 1 - Format error - The name server was unable to interpret the query.
// 2 - Server failure - The name server was unable to process this query
// due to a problem with the name server.
// 4 - Not Implemented - The name server does not support the requested
// kind of query.
// 5 - Refused - The name server refuses to perform the specified
// operation for policy reasons.
NET_ERROR(DNS_SERVER_FAILED, -802)
// DNS transaction timed out.
NET_ERROR(DNS_TIMED_OUT, -803)
// The entry was not found in cache, for cache-only lookups.
NET_ERROR(DNS_CACHE_MISS, -804)
// Suffix search list rules prevent resolution of the given host name.
NET_ERROR(DNS_SEARCH_EMPTY, -805)
// Failed to sort addresses according to RFC3484.
NET_ERROR(DNS_SORT_ERROR, -806)
| bsd-3-clause |
ryanrhymes/openblas | lib/OpenBLAS-0.2.19/lapack-netlib/LAPACKE/src/lapacke_zgetri.c | 3393 | /*****************************************************************************
Copyright (c) 2014, Intel Corp.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************
* Contents: Native high-level C interface to LAPACK function zgetri
* Author: Intel Corporation
* Generated November 2015
*****************************************************************************/
#include "lapacke_utils.h"
lapack_int LAPACKE_zgetri( int matrix_layout, lapack_int n,
lapack_complex_double* a, lapack_int lda,
const lapack_int* ipiv )
{
lapack_int info = 0;
lapack_int lwork = -1;
lapack_complex_double* work = NULL;
lapack_complex_double work_query;
if( matrix_layout != LAPACK_COL_MAJOR && matrix_layout != LAPACK_ROW_MAJOR ) {
LAPACKE_xerbla( "LAPACKE_zgetri", -1 );
return -1;
}
#ifndef LAPACK_DISABLE_NAN_CHECK
/* Optionally check input matrices for NaNs */
if( LAPACKE_zge_nancheck( matrix_layout, n, n, a, lda ) ) {
return -3;
}
#endif
/* Query optimal working array(s) size */
info = LAPACKE_zgetri_work( matrix_layout, n, a, lda, ipiv, &work_query,
lwork );
if( info != 0 ) {
goto exit_level_0;
}
lwork = LAPACK_Z2INT( work_query );
/* Allocate memory for work arrays */
work = (lapack_complex_double*)
LAPACKE_malloc( sizeof(lapack_complex_double) * lwork );
if( work == NULL ) {
info = LAPACK_WORK_MEMORY_ERROR;
goto exit_level_0;
}
/* Call middle-level interface */
info = LAPACKE_zgetri_work( matrix_layout, n, a, lda, ipiv, work, lwork );
/* Release memory and exit */
LAPACKE_free( work );
exit_level_0:
if( info == LAPACK_WORK_MEMORY_ERROR ) {
LAPACKE_xerbla( "LAPACKE_zgetri", info );
}
return info;
}
| bsd-3-clause |
Firehed/phpunit | tests/_files/Metadata/Attribute/tests/DoesNotPerformAssertionsTest.php | 566 | <?php declare(strict_types=1);
/*
* This file is part of PHPUnit.
*
* (c) Sebastian Bergmann <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace PHPUnit\TestFixture\Metadata\Attribute;
use PHPUnit\Framework\Attributes\DoesNotPerformAssertions;
use PHPUnit\Framework\TestCase;
#[DoesNotPerformAssertions]
final class DoesNotPerformAssertionsTest extends TestCase
{
#[DoesNotPerformAssertions]
public function testOne(): void
{
}
}
| bsd-3-clause |
cetium/evproto | evproto/evproto.cc | 2066 | // Copyright 2010, Shuo Chen. All rights reserved.
// http://code.google.com/p/evproto/
//
// Use of this source code is governed by a BSD-style license
// that can be found in the License file.
// Author: Shuo Chen (chenshuo at chenshuo dot com)
//
#include "evproto/evproto.h"
#include <gflags/gflags.h>
#include <glog/logging.h>
#include <google/protobuf/message.h>
#include <event2/event.h>
#include <event2/thread.h>
#if !defined(LIBEVENT_VERSION_NUMBER) || LIBEVENT_VERSION_NUMBER < 0x02000400
#error "This version of Libevent is not supported; Get 2.0.4-alpha or later."
#endif
namespace evproto
{
namespace internal
{
void eventLogToGlog(int severity, const char *msg)
{
switch (severity) {
case _EVENT_LOG_DEBUG:
VLOG(1) << msg;
break;
case _EVENT_LOG_MSG:
LOG(INFO) << msg;
break;
case _EVENT_LOG_WARN:
LOG(WARNING) << msg;
break;
case _EVENT_LOG_ERR:
LOG(ERROR) << msg;
break;
default:
LOG(ERROR) << msg;
break;
}
}
void protobufLogHandler(google::protobuf::LogLevel level, const char* filename, int line,
const std::string& message)
{
google::LogMessage(filename, line, level).stream() << message;
}
void eventFatal(int err)
{
LOG(FATAL) << "libevent2 fatal " << err;
}
} // namespace internal
// TODO: pass back modified argc and argv.
void initialize(int argc, char* argv[])
{
google::InitGoogleLogging(argv[0]);
::event_set_log_callback(internal::eventLogToGlog);
google::protobuf::SetLogHandler(internal::protobufLogHandler);
#if EVTHREAD_USE_WINDOWS_THREADS_IMPLEMENTED
CHECK_EQ(::evthread_use_windows_threads(), 0);
#elif EVTHREAD_USE_PTHREADS_IMPLEMENTED
CHECK_EQ(::evthread_use_pthreads(), 0);
#endif
#ifndef NDEBUG
// ::evthread_enable_lock_debuging();
// ::event_enable_debug_mode();
#endif
CHECK_EQ(LIBEVENT_VERSION_NUMBER, ::event_get_version_number())
<< "libevent2 version number mismatch";
google::ParseCommandLineFlags(&argc, &argv, true);
LOG(INFO) << argv[0] << " initialized";
}
}
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.