max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
340 | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""Test cases for the packages module"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
from helpers import centos, docker, invalid_distro, ubuntu
from hpccm.building_blocks.packages import packages
class Test_packages(unittest.TestCase):
def setUp(self):
"""Disable logging output messages"""
logging.disable(logging.ERROR)
@ubuntu
@docker
def test_basic_ubuntu(self):
"""Basic packages"""
p = packages(ospackages=['gcc', 'g++', 'gfortran'])
self.assertEqual(str(p),
r'''RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
g++ \
gcc \
gfortran && \
rm -rf /var/lib/apt/lists/*''')
@centos
@docker
def test_basic_centos(self):
"""Basic packages"""
p = packages(ospackages=['gcc', 'gcc-c++', 'gcc-fortran'])
self.assertEqual(str(p),
r'''RUN yum install -y \
gcc \
gcc-c++ \
gcc-fortran && \
rm -rf /var/cache/yum/*''')
@invalid_distro
def test_invalid_distro(self):
"""Invalid package type specified"""
with self.assertRaises(RuntimeError):
packages(ospackages=['gcc', 'g++', 'gfortran'])
| 747 |
9,491 | /*
+----------------------------------------------------------------------+
| HipHop for PHP |
+----------------------------------------------------------------------+
| Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| <EMAIL> so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
*/
#include "hphp/util/stack-trace.h"
#include <gtest/gtest.h>
namespace HPHP {
static int rangeCmp(StackTrace::PerfMap::Range a,
StackTrace::PerfMap::Range b) {
StackTrace::PerfMap::Range::Cmp cmp;
return (cmp(b, a) ? 1 : 0) - (cmp(a, b) ? 1 : 0);
}
static bool rangeLt(StackTrace::PerfMap::Range a,
StackTrace::PerfMap::Range b) {
return rangeCmp(a, b) < 0;
}
static bool rangeEq(StackTrace::PerfMap::Range a,
StackTrace::PerfMap::Range b) {
return rangeCmp(a, b) == 0;
}
static bool rangeGt(StackTrace::PerfMap::Range a,
StackTrace::PerfMap::Range b) {
return rangeCmp(a, b) > 0;
}
TEST(StackTraceTest, Cmp) {
// 1 1
// 0 1 234 5 6 789 0 1
// a: [----------)
// b: |
EXPECT_TRUE(rangeGt({3,8}, {0,0}));
// a: [----------)
// b: [--)
EXPECT_TRUE(rangeGt({3,8}, {0,1}));
// a: [----------)
// b: [-----)
EXPECT_TRUE(rangeGt({3,8}, {0,2}));
// a: [----------)
// b: [------)
EXPECT_TRUE(rangeGt({3,8}, {0,3}));
// a: [----------)
// b: [-------)
EXPECT_TRUE(rangeEq({3,8}, {0,4}));
// a: [----------)
// b: [-------)
EXPECT_TRUE(rangeEq({3,8}, {0,5}));
// a: [----------)
// b: [----------------)
EXPECT_TRUE(rangeEq({3,8}, {0,7}));
// a: [----------)
// b: [-----------------)
EXPECT_TRUE(rangeEq({3,8}, {0,8}));
// a: [----------)
// b: [------------------)
EXPECT_TRUE(rangeEq({3,8}, {0,9}));
// a: [----------)
// b: [---------------------)
EXPECT_TRUE(rangeEq({3,8}, {0,10}));
// 1 1
// 0 1 234 5 6 789 0 1
// a: [----------)
// b: |
EXPECT_TRUE(rangeGt({3,8}, {2,2}));
// a: [----------)
// b: [)
EXPECT_TRUE(rangeGt({3,8}, {2,3}));
// a: [----------)
// b: [-)
EXPECT_TRUE(rangeEq({3,8}, {2,4}));
// a: [----------)
// b: [----)
EXPECT_TRUE(rangeEq({3,8}, {2,5}));
// a: [----------)
// b: [----------)
EXPECT_TRUE(rangeEq({3,8}, {2,7}));
// a: [----------)
// b: [-----------)
EXPECT_TRUE(rangeEq({3,8}, {2,8}));
// a: [----------)
// b: [------------)
EXPECT_TRUE(rangeEq({3,8}, {2,9}));
// a: [----------)
// b: [---------------)
EXPECT_TRUE(rangeEq({3,8}, {2,10}));
// 1 1
// 0 1 234 5 6 789 0 1
// a: [----------)
// b: |
EXPECT_TRUE(rangeGt({3,8}, {3,3}));
// a: [----------)
// b: [)
EXPECT_TRUE(rangeEq({3,8}, {3,4}));
// a: [----------)
// b: [---)
EXPECT_TRUE(rangeEq({3,8}, {3,5}));
// a: [----------)
// b: [---------)
EXPECT_TRUE(rangeEq({3,8}, {3,7}));
// a: [----------)
// b: [----------)
EXPECT_TRUE(rangeEq({3,8}, {3,8}));
// a: [----------)
// b: [-----------)
EXPECT_TRUE(rangeEq({3,8}, {3,9}));
// a: [----------)
// b: [--------------)
EXPECT_TRUE(rangeEq({3,8}, {3,10}));
// 1 1
// 0 1 234 5 6 789 0 1
// a: [----------)
// b: |
EXPECT_TRUE(rangeEq({3,8}, {4,4}));
// a: [----------)
// b: [--)
EXPECT_TRUE(rangeEq({3,8}, {4,5}));
// a: [----------)
// b: [--------)
EXPECT_TRUE(rangeEq({3,8}, {4,7}));
// a: [----------)
// b: [---------)
EXPECT_TRUE(rangeEq({3,8}, {4,8}));
// a: [----------)
// b: [----------)
EXPECT_TRUE(rangeEq({3,8}, {4,9}));
// a: [----------)
// b: [-------------)
EXPECT_TRUE(rangeEq({3,8}, {4,10}));
// 1 1
// 0 1 234 5 6 789 0 1
// a: [----------)
// b: |
EXPECT_TRUE(rangeEq({3,8}, {5,5}));
// a: [----------)
// b: [--)
EXPECT_TRUE(rangeEq({3,8}, {5,6}));
// a: [----------)
// b: [-----)
EXPECT_TRUE(rangeEq({3,8}, {5,7}));
// a: [----------)
// b: [------)
EXPECT_TRUE(rangeEq({3,8}, {5,8}));
// a: [----------)
// b: [-------)
EXPECT_TRUE(rangeEq({3,8}, {5,9}));
// a: [----------)
// b: [----------)
EXPECT_TRUE(rangeEq({3,8}, {5,10}));
// 1 1
// 0 1 234 5 6 789 0 1
// a: [----------)
// b: |
EXPECT_TRUE(rangeEq({3,8}, {7,7}));
// a: [----------)
// b: [)
EXPECT_TRUE(rangeEq({3,8}, {7,8}));
// a: [----------)
// b: [-)
EXPECT_TRUE(rangeEq({3,8}, {7,9}));
// a: [----------)
// b: [----)
EXPECT_TRUE(rangeEq({3,8}, {7,10}));
// 1 1
// 0 1 234 5 6 789 0 1
// a: [----------)
// b: |
EXPECT_TRUE(rangeLt({3,8}, {8,8}));
// a: [----------)
// b: [)
EXPECT_TRUE(rangeLt({3,8}, {8,9}));
// a: [----------)
// b: [---)
EXPECT_TRUE(rangeLt({3,8}, {8,10}));
// 1 1
// 0 1 234 5 6 789 0 1
// a: [----------)
// b: |
EXPECT_TRUE(rangeLt({3,8}, {9,9}));
// a: [----------)
// b: [--)
EXPECT_TRUE(rangeLt({3,8}, {9,10}));
// 1 1
// 0 1 234 5 6 789 0 1
// a: [----------)
// b: |
EXPECT_TRUE(rangeLt({3,8}, {10,10}));
// a: [----------)
// b: [--)
EXPECT_TRUE(rangeLt({3,8}, {10,11}));
// 0 1 2
// a: |
// b: |
EXPECT_TRUE(rangeGt({1,1}, {0,0}));
// a: |
// b: |
EXPECT_TRUE(rangeEq({1,1}, {1,1}));
// a: |
// b: |
EXPECT_TRUE(rangeLt({1,1}, {2,2}));
}
}
| 3,841 |
1,408 | /*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef SCU_H
#define SCU_H
void disable_scu(unsigned long mpidr);
void enable_scu(unsigned long mpidr);
#endif /* SCU_H */
| 95 |
1,091 | /*
* Copyright 2018-present Open Networking Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onosproject.odtn.utils.openconfig;
import java.util.List;
import org.onosproject.yang.gen.v1.openconfigplatform.rev20161222.openconfigplatform.platformcomponenttop.DefaultComponents;
import org.onosproject.yang.gen.v1.openconfigplatform.rev20161222.openconfigplatform.platformcomponenttop.components.Component;
import org.onosproject.yang.gen.v1.openconfigplatformtransceiver.rev20170708.openconfigplatformtransceiver.components.component.DefaultAugmentedOcPlatformComponent;
import org.onosproject.yang.gen.v1.openconfigplatformtransceiver.rev20170708.openconfigplatformtransceiver.porttransceivertop.DefaultTransceiver;
import org.onosproject.yang.gen.v1.openconfigplatformtransceiver.rev20170708.openconfigplatformtransceiver.porttransceivertop.transceiver.Config;
import org.onosproject.yang.gen.v1.openconfigplatformtransceiver.rev20170708.openconfigplatformtransceiver.porttransceivertop.transceiver.DefaultConfig;
import org.onosproject.yang.gen.v1.openconfigtransporttypes.rev20170816.openconfigtransporttypes.Eth100GbaseLr4;
import org.onosproject.yang.gen.v1.openconfigtransporttypes.rev20170816.openconfigtransporttypes.Qsfp28;
import org.onosproject.yang.model.DataNode;
import com.google.common.annotations.Beta;
import com.google.common.collect.ImmutableList;
import static org.onosproject.odtn.utils.YangToolUtil.toDataNode;
/**
* Utility methods dealing with OpenConfig transceiver.
* <p>
* Split into classes for the purpose of avoiding "Config" class collisions.
*/
@Beta
public abstract class Transceiver {
public static List<DataNode> preconf(String componentName) {
DefaultComponents components = new DefaultComponents();
Component component = PlainPlatform.componentWithName(componentName);
components.addToComponent(component);
// augmented 'component' shim
DefaultAugmentedOcPlatformComponent tcomponent = new DefaultAugmentedOcPlatformComponent();
DefaultTransceiver transceiver = new DefaultTransceiver();
Config configt = new DefaultConfig();
// TODO make these configurable
configt.formFactorPreconf(Qsfp28.class);
configt.ethernetPmdPreconf(Eth100GbaseLr4.class);
transceiver.config(configt);
tcomponent.transceiver(transceiver);
component.addAugmentation(tcomponent);
return ImmutableList.of(toDataNode(components));
}
}
| 915 |
852 | #!/usr/bin/env python3
"""
_trackingOnlyEra_Run2_2018
Scenario supporting proton collisions and tracking only reconstruction for HP beamspot
"""
import os
import sys
from Configuration.DataProcessing.Impl.trackingOnly import trackingOnly
import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Era_Run2_2018_cff import Run2_2018
from Configuration.DataProcessing.Impl.pp import pp
class trackingOnlyEra_Run2_2018(trackingOnly):
def __init__(self):
trackingOnly.__init__(self)
# tracking only RECO is sufficient, to run high performance BS at PCL;
# some dedicated customization are required, though: customisePostEra_Run2_2018_trackingOnly
self.recoSeq=':reconstruction_trackingOnly'
self.cbSc='pp'
self.addEI=False
self.eras=Run2_2018
self.promptCustoms += [ 'Configuration/DataProcessing/RecoTLR.customisePostEra_Run2_2018' ]
self.expressCustoms += [ 'Configuration/DataProcessing/RecoTLR.customisePostEra_Run2_2018_express_trackingOnly' ]
self.visCustoms += [ 'Configuration/DataProcessing/RecoTLR.customisePostEra_Run2_2018' ]
"""
_trackingOnlyEra_Run2_2018
Implement configuration building for data processing for proton
collision data taking for Run2, 2018 high performance beamspot
"""
| 464 |
561 | <reponame>ddesmond/gaffer<filename>src/GafferScene/CopyPrimitiveVariables.cpp
//////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2019, <NAME>. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above
// copyright notice, this list of conditions and the following
// disclaimer.
//
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided with
// the distribution.
//
// * Neither the name of <NAME> nor the names of
// any other contributors to this software may be used to endorse or
// promote products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
//////////////////////////////////////////////////////////////////////////
#include "GafferScene/CopyPrimitiveVariables.h"
#include "GafferScene/SceneAlgo.h"
#include "Gaffer/ArrayPlug.h"
#include "IECoreScene/Primitive.h"
using namespace std;
using namespace IECore;
using namespace IECoreScene;
using namespace Gaffer;
using namespace GafferScene;
GAFFER_NODE_DEFINE_TYPE( CopyPrimitiveVariables );
size_t CopyPrimitiveVariables::g_firstPlugIndex = 0;
CopyPrimitiveVariables::CopyPrimitiveVariables( const std::string &name )
: Deformer( name )
{
storeIndexOfNextChild( g_firstPlugIndex );
addChild( new ScenePlug( "source" ) );
addChild( new StringPlug( "primitiveVariables", Plug::In, "" ) );
addChild( new StringPlug( "sourceLocation" ) );
}
CopyPrimitiveVariables::~CopyPrimitiveVariables()
{
}
GafferScene::ScenePlug *CopyPrimitiveVariables::sourcePlug()
{
return getChild<ScenePlug>( g_firstPlugIndex );
}
const GafferScene::ScenePlug *CopyPrimitiveVariables::sourcePlug() const
{
return getChild<ScenePlug>( g_firstPlugIndex );
}
Gaffer::StringPlug *CopyPrimitiveVariables::primitiveVariablesPlug()
{
return getChild<StringPlug>( g_firstPlugIndex + 1 );
}
const Gaffer::StringPlug *CopyPrimitiveVariables::primitiveVariablesPlug() const
{
return getChild<StringPlug>( g_firstPlugIndex + 1 );
}
Gaffer::StringPlug *CopyPrimitiveVariables::sourceLocationPlug()
{
return getChild<StringPlug>( g_firstPlugIndex + 2 );
}
const Gaffer::StringPlug *CopyPrimitiveVariables::sourceLocationPlug() const
{
return getChild<StringPlug>( g_firstPlugIndex + 2 );
}
bool CopyPrimitiveVariables::affectsProcessedObject( const Gaffer::Plug *input ) const
{
return Deformer::affectsProcessedObject( input ) ||
input == sourcePlug()->objectPlug() ||
input == primitiveVariablesPlug() ||
input == sourceLocationPlug() ||
input == sourcePlug()->existsPlug()
;
}
void CopyPrimitiveVariables::hashProcessedObject( const ScenePath &path, const Gaffer::Context *context, IECore::MurmurHash &h ) const
{
Deformer::hashProcessedObject( path, context, h );
primitiveVariablesPlug()->hash( h );
boost::optional<ScenePath> sourceLocationPath;
const string sourceLocation = sourceLocationPlug()->getValue();
if( !sourceLocation.empty() )
{
/// \todo When we can use `std::optional` from C++17, `emplace()`
/// will return a reference, allowing us to call
/// `stringToPath( sourceLocation, sourceLocationPath.emplace() )`.
sourceLocationPath.emplace();
ScenePlug::stringToPath( sourceLocation, *sourceLocationPath );
}
if( !sourcePlug()->exists( sourceLocationPath ? *sourceLocationPath : path ) )
{
h = inPlug()->objectPlug()->hash();
return;
}
if( sourceLocationPath )
{
h.append( sourcePlug()->objectHash( *sourceLocationPath ) );
}
else
{
sourcePlug()->objectPlug()->hash( h );
}
}
IECore::ConstObjectPtr CopyPrimitiveVariables::computeProcessedObject( const ScenePath &path, const Gaffer::Context *context, const IECore::Object *inputObject ) const
{
auto primitive = runTimeCast<const Primitive>( inputObject );
if( !primitive )
{
return inputObject;
}
const string primitiveVariables = primitiveVariablesPlug()->getValue();
if( primitiveVariables.empty() )
{
return inputObject;
}
boost::optional<ScenePath> sourceLocationPath;
const string sourceLocation = sourceLocationPlug()->getValue();
if( !sourceLocation.empty() )
{
/// \todo When we can use `std::optional` from C++17, `emplace()`
/// will return a reference, allowing us to call
/// `stringToPath( sourceLocation, copyFromPath.emplace() )`.
sourceLocationPath.emplace();
ScenePlug::stringToPath( sourceLocation, *sourceLocationPath );
}
if( !sourcePlug()->exists( sourceLocationPath ? *sourceLocationPath : path ) )
{
return inputObject;
}
ConstObjectPtr sourceObject;
if( sourceLocationPath )
{
sourceObject = sourcePlug()->object( *sourceLocationPath );
}
else
{
sourceObject = sourcePlug()->objectPlug()->getValue();
}
auto sourcePrimitive = runTimeCast<const Primitive>( sourceObject.get() );
if( !sourcePrimitive )
{
return inputObject;
}
PrimitivePtr result = primitive->copy();
for( auto &variable : sourcePrimitive->variables )
{
if( !StringAlgo::matchMultiple( variable.first, primitiveVariables ) )
{
continue;
}
if( !result->isPrimitiveVariableValid( variable.second ) )
{
string destinationPath; ScenePlug::pathToString( path, destinationPath );
const string &sourcePath = sourceLocation.size() ? sourceLocation : destinationPath;
throw IECore::Exception( boost::str(
boost::format( "Cannot copy \"%1%\" from \"%2%\" to \"%3%\" because source and destination primitives have different topology" )
% variable.first % destinationPath % sourcePath
) );
}
result->variables[variable.first] = variable.second;
}
return result;
}
bool CopyPrimitiveVariables::adjustBounds() const
{
if( !Deformer::adjustBounds() )
{
return false;
}
return StringAlgo::matchMultiple( "P", primitiveVariablesPlug()->getValue() );
}
| 2,162 |
5,460 | from .models import instance_metadata_backend
instance_metadata_backends = {"global": instance_metadata_backend}
| 30 |
328 | <reponame>YangLingWHU/gAnswer<filename>genrate_fragments/step2_dedubplicate.py
# encoding=utf-8
'''
Step2: remove the dubplicate triples.
'''
triples = set()
j = 1
i = 1
with open('./pkubase/pkubase-triples.txt','r') as f:
while 1:
line = f.readline()
if not line:
break
triples.add(line)
if j % 100000 == 0:
print("%d:%d"%(i,j))
j += 1
j = 1
i = 2
with open('./pkubase/pkubase-types.txt','r') as f:
while 1:
line = f.readline()
if not line:
break
triples.add(line)
if j % 100000 == 0:
print("%d:%d"%(i,j))
j += 1
print(len(triples))
wf = open('./pkubase/pkubase_clean.txt','w')
for item in triples:
wf.write(item)
| 442 |
708 | <gh_stars>100-1000
package com.ccj.tabview.mypoptabview.myloader;
import com.ccj.poptabview.base.BaseFilterTabBean;
import java.util.List;
/**
* Created by chenchangjun on 17/7/26.
*/
public class MyFilterTabBean extends BaseFilterTabBean<MyFilterTabBean.MyChildFilterBean> {
/*情况1---比如,你需要如下字段*/
protected String show_name;//展示字段
protected String channel_name;
protected String category_ids;
protected String tag_ids;
protected String mall_ids;
protected List<MyChildFilterBean> tabs;
@Override
public String getTab_name() {
return show_name;
}
@Override
public void setTab_name(String tab_name) {
this.show_name=tab_name;
}
@Override
public List<MyChildFilterBean> getTabs() {
return tabs;
}
@Override
public void setTabs(List<MyChildFilterBean> tabs) {
this.tabs=tabs;
}
public static class MyChildFilterBean extends BaseFilterTabBean {
/*情况1---比如,你需要如下字段*/
protected String show_name; //展示字段
protected String channel_name;
protected String category_ids;
protected String tag_ids;
protected String mall_ids;
@Override
public String getTab_name() {
return show_name;
}
@Override
public void setTab_name(String tab_name) {
this.show_name=tab_name;
}
@Override
public List getTabs() {
return null;
}
@Override
public void setTabs(List tabs) {
}
public String getChannel_name() {
return channel_name;
}
public void setChannel_name(String channel_name) {
this.channel_name = channel_name;
}
public String getCategory_ids() {
return category_ids;
}
public void setCategory_ids(String category_ids) {
this.category_ids = category_ids;
}
public String getTag_ids() {
return tag_ids;
}
public void setTag_ids(String tag_ids) {
this.tag_ids = tag_ids;
}
public String getMall_ids() {
return mall_ids;
}
public void setMall_ids(String mall_ids) {
this.mall_ids = mall_ids;
}
}
public String getChannel_name() {
return channel_name;
}
public void setChannel_name(String channel_name) {
this.channel_name = channel_name;
}
public String getCategory_ids() {
return category_ids;
}
public void setCategory_ids(String category_ids) {
this.category_ids = category_ids;
}
public String getTag_ids() {
return tag_ids;
}
public void setTag_ids(String tag_ids) {
this.tag_ids = tag_ids;
}
public String getMall_ids() {
return mall_ids;
}
public void setMall_ids(String mall_ids) {
this.mall_ids = mall_ids;
}
}
| 1,402 |
778 | <gh_stars>100-1000
package org.aion.zero.impl.sync.handler;
import static org.aion.p2p.V1Constants.TRIE_DATA_REQUEST_MAXIMUM_BATCH_SIZE;
import java.util.Arrays;
import java.util.Collections;
import java.util.Map;
import org.aion.p2p.Ctrl;
import org.aion.p2p.Handler;
import org.aion.p2p.IP2pMgr;
import org.aion.p2p.Ver;
import org.aion.util.types.ByteArrayWrapper;
import org.aion.zero.impl.blockchain.IAionBlockchain;
import org.aion.zero.impl.sync.Act;
import org.aion.zero.impl.sync.DatabaseType;
import org.aion.zero.impl.sync.msg.RequestTrieData;
import org.aion.zero.impl.sync.msg.ResponseTrieData;
import org.slf4j.Logger;
/**
* Handler for trie node requests from the network.
*
* @author <NAME>
*/
public final class RequestTrieDataHandler extends Handler {
private final Logger log;
private final IAionBlockchain chain;
private final IP2pMgr p2p;
/**
* Constructor.
*
* @param log logger for reporting execution information
* @param chain the blockchain used by the application
* @param p2p peer manager used to submit messages
*/
public RequestTrieDataHandler(
final Logger log, final IAionBlockchain chain, final IP2pMgr p2p) {
super(Ver.V1, Ctrl.SYNC, Act.REQUEST_TRIE_DATA);
this.log = log;
this.chain = chain;
this.p2p = p2p;
}
@Override
public void receive(int peerId, String displayId, final byte[] message) {
if (message == null || message.length == 0) {
this.log.debug("<req-trie empty message from peer={}>", displayId);
return;
}
RequestTrieData request = RequestTrieData.decode(message);
if (request != null) {
DatabaseType dbType = request.getDbType();
ByteArrayWrapper key = ByteArrayWrapper.wrap(request.getNodeKey());
int limit = request.getLimit();
if (log.isDebugEnabled()) {
this.log.debug("<req-trie from-db={} key={} peer={}>", dbType, key, displayId);
}
byte[] value = null;
try {
// retrieve from blockchain depending on db type
value = chain.getTrieNode(key.toBytes(), dbType);
} catch (Exception e) {
this.log.error("<req-trie value retrieval failed>", e);
}
if (value != null) {
ResponseTrieData response;
if (limit == 1) {
// generate response without referenced nodes
response = new ResponseTrieData(key, value, dbType);
} else {
// check for internal limit on the request
if (limit == 0) {
limit = TRIE_DATA_REQUEST_MAXIMUM_BATCH_SIZE;
} else {
// the first value counts towards the limit
limit = Math.min(limit - 1, TRIE_DATA_REQUEST_MAXIMUM_BATCH_SIZE);
}
Map<ByteArrayWrapper, byte[]> referencedNodes = Collections.emptyMap();
try {
// determine if the node can be expanded
referencedNodes = chain.getReferencedTrieNodes(value, limit, dbType);
} catch (Exception e) {
this.log.error("<req-trie reference retrieval failed>", e);
}
// generate response with referenced nodes
response = new ResponseTrieData(key, value, referencedNodes, dbType);
}
// reply to request
this.p2p.send(peerId, displayId, response);
}
} else {
this.log.error(
"<req-trie decode-error msg-bytes={} peer={}>", message.length, displayId);
if (log.isTraceEnabled()) {
this.log.trace(
"<req-trie decode-error for msg={} peer={}>",
Arrays.toString(message),
displayId);
}
}
}
}
| 1,982 |
984 | <reponame>forksnd/win32metadata<filename>generation/WinSDK/RecompiledIdlHeaders/shared/ndis/ndisport.h
// Copyright (C) Microsoft Corporation. All rights reserved.
//
// Definitions for NDIS PORTs
//
#pragma once
#pragma region App, Games, or System family
#include <winapifamily.h>
#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP | WINAPI_PARTITION_SYSTEM | WINAPI_PARTITION_GAMES)
#include <ndis/version.h>
EXTERN_C_START
typedef ULONG NDIS_PORT_NUMBER, *PNDIS_PORT_NUMBER;
#define NDIS_DEFAULT_PORT_NUMBER ((NDIS_PORT_NUMBER)0)
#define NDIS_MAXIMUM_PORTS 0x1000000
//
// NDIS_PORT_TYPE defines the application of a port
//
typedef enum _NDIS_PORT_TYPE
{
NdisPortTypeUndefined,
NdisPortTypeBridge,
NdisPortTypeRasConnection,
NdisPortType8021xSupplicant,
#if NDIS_SUPPORT_NDIS630
NdisPortTypeNdisImPlatform,
#endif // NDIS_SUPPORT_NDIS630
NdisPortTypeMax,
} NDIS_PORT_TYPE, *PNDIS_PORT_TYPE;
EXTERN_C_END
#endif // WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP | WINAPI_PARTITION_SYSTEM | WINAPI_PARTITION_GAMES)
#pragma endregion
| 428 |
1,575 | // Copyright 2021 DeepMind Technologies Limited
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MUJOCO_SRC_ENGINE_ENGINE_MACRO_H_
#define MUJOCO_SRC_ENGINE_ENGINE_MACRO_H_
#include "engine/engine_callback.h"
//-------------------------------- utility macros --------------------------------------------------
// mark and free stack
#define mjMARKSTACK int _mark = d->pstack;
#define mjFREESTACK d->pstack = _mark;
// check bitflag
#define mjDISABLED(x) (m->opt.disableflags & (x))
#define mjENABLED(x) (m->opt.enableflags & (x))
// max and min macros
#define mjMAX(a,b) (((a) > (b)) ? (a) : (b))
#define mjMIN(a,b) (((a) < (b)) ? (a) : (b))
//-------------------------- timer macros ----------------------------------------------------------
#define TM_START mjtNum _tm = (mjcb_time ? mjcb_time() : 0);
#define TM_RESTART _tm = (mjcb_time ? mjcb_time() : 0);
#define TM_END(i) {d->timer[i].duration += ((mjcb_time ? mjcb_time() : 0) - _tm); d->timer[i].number++;}
#define TM_START1 mjtNum _tm1 = (mjcb_time ? mjcb_time() : 0);
#define TM_END1(i) {d->timer[i].duration += ((mjcb_time ? mjcb_time() : 0) - _tm1); d->timer[i].number++;}
#endif // MUJOCO_SRC_ENGINE_ENGINE_MACRO_H_
| 600 |
7,091 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import copy
import json
import argparse
from functools import partial
from collections import defaultdict
import paddle
from paddlenlp.data import Pad, Stack, Tuple
from paddlenlp.datasets import load_dataset, MapDataset
from paddlenlp.transformers import SkepTokenizer, SkepForTokenClassification, SkepForSequenceClassification
from utils import decoding, load_dict, read_test_file
from extraction.data import convert_example_to_feature as convert_example_to_feature_ext
from classification.data import convert_example_to_feature as convert_example_to_feature_cls
def concate_aspect_and_opinion(text, aspect, opinions):
aspect_text = ""
for opinion in opinions:
if text.find(aspect) <= text.find(opinion):
aspect_text += aspect + opinion + ","
else:
aspect_text += opinion + aspect + ","
aspect_text = aspect_text[:-1]
return aspect_text
def predict_ext(ext_model_path, ext_label_path, test_path, max_seq_len):
# load dict
model_name = "skep_ernie_1.0_large_ch"
ext_label2id, ext_id2label = load_dict(args.ext_label_path)
tokenizer = SkepTokenizer.from_pretrained(model_name)
ori_test_ds = load_dataset(read_test_file, data_path=test_path, lazy=False)
trans_func = partial(
convert_example_to_feature_ext,
tokenizer=tokenizer,
label2id=ext_label2id,
max_seq_len=args.max_seq_len,
is_test=True)
test_ds = copy.copy(ori_test_ds).map(trans_func, lazy=False)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id),
Pad(axis=0, pad_val=tokenizer.pad_token_type_id),
Stack(dtype="int64"), ): fn(samples)
test_batch_sampler = paddle.io.BatchSampler(
test_ds, batch_size=args.batch_size, shuffle=False)
test_loader = paddle.io.DataLoader(
test_ds, batch_sampler=test_batch_sampler, collate_fn=batchify_fn)
print("test data loaded.")
# load ext model
ext_state_dict = paddle.load(args.ext_model_path)
ext_model = SkepForTokenClassification.from_pretrained(
model_name, num_classes=len(ext_label2id))
ext_model.load_dict(ext_state_dict)
print("extraction model loaded.")
ext_model.eval()
results = []
for bid, batch_data in enumerate(test_loader):
input_ids, token_type_ids, seq_lens = batch_data
logits = ext_model(input_ids, token_type_ids=token_type_ids)
predictions = logits.argmax(axis=2).numpy()
for eid, (seq_len, prediction) in enumerate(zip(seq_lens, predictions)):
idx = bid * args.batch_size + eid
tag_seq = [ext_id2label[idx] for idx in prediction[:seq_len][1:-1]]
text = ori_test_ds[idx]["text"]
aps = decoding(text[:max_seq_len-2], tag_seq)
for aid, ap in enumerate(aps):
aspect, opinions = ap[0], list(set(ap[1:]))
aspect_text = concate_aspect_and_opinion(text, aspect, opinions)
results.append({
"id": str(idx) + "_" + str(aid),
"aspect": aspect,
"opinions": opinions,
"text": text,
"aspect_text": aspect_text
})
return results
def predict_cls(cls_model_path, cls_label_path, ext_results):
# load dict
model_name = "skep_ernie_1.0_large_ch"
cls_label2id, cls_id2label = load_dict(args.cls_label_path)
tokenizer = SkepTokenizer.from_pretrained(model_name)
test_ds = MapDataset(ext_results)
trans_func = partial(
convert_example_to_feature_cls,
tokenizer=tokenizer,
label2id=cls_label2id,
max_seq_len=args.max_seq_len,
is_test=True)
test_ds = test_ds.map(trans_func, lazy=False)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id),
Pad(axis=0, pad_val=tokenizer.pad_token_type_id),
Stack(dtype="int64")): fn(samples)
# set shuffle is False
test_batch_sampler = paddle.io.BatchSampler(
test_ds, batch_size=args.batch_size, shuffle=False)
test_loader = paddle.io.DataLoader(
test_ds, batch_sampler=test_batch_sampler, collate_fn=batchify_fn)
print("test data loaded.")
# load cls model
cls_state_dict = paddle.load(args.cls_model_path)
cls_model = SkepForSequenceClassification.from_pretrained(
model_name, num_classes=len(cls_label2id))
cls_model.load_dict(cls_state_dict)
print("classification model loaded.")
cls_model.eval()
results = []
for bid, batch_data in enumerate(test_loader):
input_ids, token_type_ids, seq_lens = batch_data
logits = cls_model(input_ids, token_type_ids=token_type_ids)
predictions = logits.argmax(axis=1).numpy().tolist()
results.extend(predictions)
results = [cls_id2label[pred_id] for pred_id in results]
return results
def post_process(ext_results, cls_results):
assert len(ext_results) == len(cls_results)
collect_dict = defaultdict(list)
for ext_result, cls_result in zip(ext_results, cls_results):
ext_result["sentiment_polarity"] = cls_result
eid, _ = ext_result["id"].split("_")
collect_dict[eid].append(ext_result)
sentiment_results = []
for eid in collect_dict.keys():
sentiment_result = {}
ap_list = []
for idx, single_ap in enumerate(collect_dict[eid]):
if idx == 0:
sentiment_result["text"] = single_ap["text"]
ap_list.append({
"aspect": single_ap["aspect"],
"opinions": single_ap["opinions"],
"sentiment_polarity": single_ap["sentiment_polarity"]
})
sentiment_result["ap_list"] = ap_list
sentiment_results.append(sentiment_result)
with open(args.save_path, "w", encoding="utf-8") as f:
for sentiment_result in sentiment_results:
f.write(json.dumps(sentiment_result, ensure_ascii=False) + "\n")
if __name__ == "__main__":
# yapf: disable
parser = argparse.ArgumentParser()
parser.add_argument("--ext_model_path", type=str, default=None, help="The path of extraction model path that you want to load.")
parser.add_argument("--cls_model_path", type=str, default=None, help="The path of classification model path that you want to load.")
parser.add_argument("--ext_label_path", type=str, default=None, help="The path of extraction label dict.")
parser.add_argument("--cls_label_path", type=str, default=None, help="The path of classification label dict.")
parser.add_argument('--test_path', type=str, default=None, help="The path of test set that you want to predict.")
parser.add_argument('--save_path', type=str, required=True, default=None, help="The saving path of predict results.")
parser.add_argument("--batch_size", type=int, default=16, help="Batch size per GPU/CPU for training.")
parser.add_argument("--max_seq_len", type=int, default=512, help="The maximum total input sequence length after tokenization.")
args = parser.parse_args()
# yapf: enbale
# predict with ext model
ext_results = predict_ext(args.ext_model_path, args.ext_label_path, args.test_path, args.max_seq_len)
print("predicting with extraction model done!")
# predict with cls model
cls_results = predict_cls(args.cls_model_path, args.cls_label_path, ext_results)
print("predicting with classification model done!")
# post_process prediction results
post_process(ext_results, cls_results)
print(f"sentiment analysis results has been saved to path: {args.save_path}")
| 3,351 |
746 | <reponame>kmdkuk/AndroidTraining
package jp.mixi.practice.intent.med;
import android.app.Activity;
import android.os.Bundle;
import android.view.View;
public class MainActivity extends Activity {
public static final String ACTION_FIRST = "jp.mixi.practice.intent.med.android.intent.action.FIRST";
public static final String ACTION_SECOND = "jp.mixi.practice.intent.med.android.intent.action.SECOND";
public static final String ACTION_THIRD = "jp.mixi.practice.intent.med.android.intent.action.THIRD";
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
View button1 = findViewById(R.id.CallAction1);
View button2 = findViewById(R.id.CallAction2);
View button3 = findViewById(R.id.CallAction3);
// TODO それぞれ、Broadcast を受け取ったら Log.v(String, String) を利用して、ログ出力にどの Action を受信したかを表示する処理を書くこと
button1.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
// TODO ここに、ACTION_FIRST を呼び出す処理を書く
}
});
button2.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
// TODO ここに、ACTION_SECOND を呼び出す処理を書く
}
});
button3.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
// TODO ここに、ACTION_THIRD を呼び出す処理を書く
}
});
}
} | 822 |
1,694 | <filename>stagemonitor-alerting/src/test/java/org/stagemonitor/alerting/incident/IncidentRepositoryTest.java
package org.stagemonitor.alerting.incident;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.stagemonitor.alerting.ThresholdMonitoringReporterTest;
import org.stagemonitor.alerting.check.CheckResult;
import org.stagemonitor.core.MeasurementSession;
import org.stagemonitor.AbstractElasticsearchTest;
import org.stagemonitor.core.util.JsonUtils;
import java.util.Arrays;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
@RunWith(Parameterized.class)
public class IncidentRepositoryTest<T extends IncidentRepository> extends AbstractElasticsearchTest {
private final T incidentRepository;
public IncidentRepositoryTest(T incidentRepository, Class<T> clazz) {
this.incidentRepository = incidentRepository;
if (incidentRepository instanceof ElasticsearchIncidentRepository) {
final ElasticsearchIncidentRepository elasticsearchIncidentRepository = (ElasticsearchIncidentRepository) incidentRepository;
elasticsearchIncidentRepository.setElasticsearchClient(elasticsearchClient);
}
}
@Before
public void setUp() throws Exception {
incidentRepository.clear();
}
@Parameterized.Parameters(name = "{index}: {1}")
public static Iterable<Object[]> data() {
return Arrays.asList(new Object[][]{
{new ElasticsearchIncidentRepository(elasticsearchClient), ElasticsearchIncidentRepository.class},
{new ConcurrentMapIncidentRepository(), ConcurrentMapIncidentRepository.class}
});
}
@Test
public void testSaveAndGet() throws Exception {
Incident incident = createIncidentWithVersion("id1", 1);
assertTrue(incidentRepository.createIncident(incident));
refresh();
assertIncidentEquals(incidentRepository.getIncidentByCheckId(incident.getCheckId()), incident);
assertIncidentEquals(incidentRepository.getAllIncidents().iterator().next(), incident);
}
@Test
public void testGetNotPresent() throws Exception {
assertNull(incidentRepository.getIncidentByCheckId("testGetNotPresent"));
assertTrue(incidentRepository.getAllIncidents().isEmpty());
}
@Test
public void testAlreadyCreated() {
assertTrue(incidentRepository.createIncident(createIncidentWithVersion("id1", 1)));
assertFalse(incidentRepository.createIncident(createIncidentWithVersion("id1", 1)));
}
@Test
public void testWrongVersion() {
assertTrue(incidentRepository.createIncident(createIncidentWithVersion("id1", 1)));
assertFalse(incidentRepository.updateIncident(createIncidentWithVersion("id1", 1)));
assertTrue(incidentRepository.updateIncident(createIncidentWithVersion("id1", 2)));
}
@Test
public void testDelete() throws Exception {
assertTrue(incidentRepository.createIncident(createIncidentWithVersion("id1", 1)));
assertTrue(incidentRepository.deleteIncident(createIncidentWithVersion("id1", 2)));
assertNull(incidentRepository.getIncidentByCheckId("id1"));
assertTrue(incidentRepository.getAllIncidents().isEmpty());
}
@Test
public void testDeleteWrongVersion() throws Exception {
assertTrue(incidentRepository.createIncident(createIncidentWithVersion("id1", 1)));
assertFalse(incidentRepository.deleteIncident(createIncidentWithVersion("id1", 1)));
assertFalse(incidentRepository.deleteIncident(createIncidentWithVersion("id1", 0)));
}
private void assertIncidentEquals(Incident expected, Incident actual) {
assertEquals(JsonUtils.toJson(expected), JsonUtils.toJson(actual));
}
public static Incident createIncidentWithVersion(String checkId, int version) {
Incident incident = new Incident(ThresholdMonitoringReporterTest.createCheckCheckingMean(1, 5),
new MeasurementSession("testApp", "testHost2", "testInstance"),
Arrays.asList(new CheckResult("test", 10, CheckResult.Status.CRITICAL),
new CheckResult("test", 10, CheckResult.Status.ERROR)));
incident.setVersion(version);
incident.setCheckId(checkId);
return incident;
}
}
| 1,305 |
22,688 | /******************************************************************************
* Copyright 2017 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include "cyber/common/log.h"
#include "cyber/common/macros.h"
#include "cyber/cyber.h"
#include "cyber/init.h"
#include "cyber/time/time.h"
#include "modules/canbus/proto/chassis.pb.h"
#include "modules/control/proto/pad_msg.pb.h"
#include "cyber/time/clock.h"
#include "modules/common/adapters/adapter_gflags.h"
#include "modules/common/util/message_util.h"
#include "modules/control/common/control_gflags.h"
namespace {
using apollo::canbus::Chassis;
using apollo::control::DrivingAction;
using apollo::control::PadMessage;
using apollo::cyber::Clock;
using apollo::cyber::CreateNode;
using apollo::cyber::Node;
using apollo::cyber::Reader;
using apollo::cyber::Writer;
class PadTerminal {
public:
PadTerminal() : node_(CreateNode("pad_terminal")) {}
void init() {
chassis_reader_ = node_->CreateReader<Chassis>(
FLAGS_chassis_topic, [this](const std::shared_ptr<Chassis> &chassis) {
on_chassis(*chassis);
});
pad_writer_ = node_->CreateWriter<PadMessage>(FLAGS_pad_topic);
terminal_thread_.reset(new std::thread([this] { terminal_thread_func(); }));
}
void help() {
AINFO << "COMMAND:\n";
AINFO << "\t0: reset to manual drive mode.";
AINFO << "\t1: auto drive mode.";
AINFO << "\tctrl + c: exit.";
AINFO << "\tother: print help.";
}
void send(int cmd_type) {
PadMessage pad;
if (cmd_type == RESET_COMMAND) {
pad.set_action(DrivingAction::RESET);
AINFO << "sending reset action command.";
} else if (cmd_type == AUTO_DRIVE_COMMAND) {
pad.set_action(DrivingAction::START);
AINFO << "sending start action command.";
}
apollo::common::util::FillHeader("terminal", &pad);
pad_writer_->Write(pad);
AINFO << "send pad_message OK";
}
void on_chassis(const Chassis &chassis) {
static bool is_first_emergency_mode = true;
static int64_t count_start = 0;
static bool waiting_reset = false;
// check if chassis enter security mode, if enter, after 10s should reset to
// manual
if (chassis.driving_mode() == Chassis::EMERGENCY_MODE) {
if (is_first_emergency_mode) {
count_start = Clock::Now().ToNanosecond() / 1e3;
is_first_emergency_mode = false;
AINFO << "detect emergency mode.";
} else {
int64_t diff =
Clock::Now().ToNanosecond() / 1e3 - count_start;
if (diff > EMERGENCY_MODE_HOLD_TIME) {
count_start = Clock::Now().ToNanosecond() / 1e3;
waiting_reset = true;
// send a reset command to control
send(RESET_COMMAND);
AINFO << "trigger to reset emergency mode to manual mode.";
} else {
// nothing to do
}
}
} else if (chassis.driving_mode() == Chassis::COMPLETE_MANUAL) {
if (waiting_reset) {
is_first_emergency_mode = true;
waiting_reset = false;
AINFO << "emergency mode reset to manual ok.";
}
}
}
void terminal_thread_func() {
int mode = 0;
bool should_exit = false;
while (std::cin >> mode) {
switch (mode) {
case 0:
send(RESET_COMMAND);
break;
case 1:
send(AUTO_DRIVE_COMMAND);
break;
case 9:
should_exit = true;
break;
default:
help();
break;
}
if (should_exit) {
break;
}
}
}
void stop() { terminal_thread_->join(); }
private:
std::unique_ptr<std::thread> terminal_thread_;
const int ROS_QUEUE_SIZE = 1;
const int RESET_COMMAND = 1;
const int AUTO_DRIVE_COMMAND = 2;
const int EMERGENCY_MODE_HOLD_TIME = 4 * 1000000;
std::shared_ptr<Reader<Chassis>> chassis_reader_;
std::shared_ptr<Writer<PadMessage>> pad_writer_;
std::shared_ptr<Node> node_;
};
} // namespace
int main(int argc, char **argv) {
apollo::cyber::Init("pad_terminal");
FLAGS_alsologtostderr = true;
FLAGS_v = 3;
google::ParseCommandLineFlags(&argc, &argv, true);
PadTerminal pad_terminal;
pad_terminal.init();
pad_terminal.help();
apollo::cyber::WaitForShutdown();
pad_terminal.stop();
return 0;
}
| 1,931 |
313 | //
// YBLShopCarModel.h
// YBL365
//
// Created by 乔同新 on 16/12/20.
// Copyright © 2016年 乔同新. All rights reserved.
//
#import <Foundation/Foundation.h>
@interface YBLShopCarModel : NSObject
//添加的购物数量
@property (nonatomic, assign) NSInteger carNumber;
//购物车数据
@property (nonatomic, strong) NSMutableArray *carGoodArray;
//临时商品数据
@property (nonatomic, strong) NSArray *tempGoods;
+ (instancetype)shareInstance;
//添加商品到购物车
- (void)addGoodToCar:(id)good;
//减少商品到购物车
- (void)subtractGoodToCar:(id)good;
@end
| 282 |
375 | <filename>device/cpu/core/layer_arithmetic_operation.h
/*
Copyright (c) 2014, Intel Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#include "device/cpu/api_internal/nn_device_interface_0_internal.h"
#include "device/api/nn_primitives_api_0.h"
#include "helper_zxyn_f32.h"
namespace layer {
/* Examples of operation performed by arithmetic primitive
(alpha, beta, gamma are scalars, imput_A and input_B are of workload_data type):
(1) output = gamma * ( input_A {ARITHMETIC_FUNCTION} input_B )
Where {ARITHMETIC_FUNCTION} is addition, subtraction, multiplication or division
(2) output = alpha * input_A {ARITHMETIC_FUNCTION} beta * input_B
Where {ARITHMETIC_FUNCTION} is addition or subtraction
*/
enum class scalar_op_type {
NO_SCALAR_OPERATION,
MUL_BY_GAMMA,
MUL_BY_ALPHA,
MUL_BY_BETA,
MUL_BY_ALPHA_AND_BETA
};
class arithmetic_f32 : public helper_zxyn_f32::primitive_zxyn_f32_base {
public:
arithmetic_f32(size_t image_size_x,
size_t image_size_y,
size_t image_size_z,
NN_ARITHMETIC_FUNCTION arithmetic_function,
size_t batch_size,
nn_device_internal *device);
arithmetic_f32(size_t image_size_x,
size_t image_size_y,
size_t image_size_z,
NN_ARITHMETIC_FUNCTION arithmetic_function,
size_t batch_size,
float alpha,
float beta,
float gamma,
nn_device_internal *device);
virtual std::vector<nn_workload_data_t *> create_parameters(bool allocate_delta = false) override;
bool validate_input(size_t index, nn_workload_data_t *data) override;
void forward(const std::vector<const nn_workload_data_t *> &inputs,
const std::vector<const nn_workload_data_t *> ¶meters,
const std::vector<nn_workload_data_t *> &outputs) override;
void prepare_forward(const std::vector<const nn_workload_data_t *> &inputs,
const std::vector<const nn_workload_data_t *> ¶meters,
const std::vector<nn_workload_data_t *> &outputs) override;
std::vector<float> get_input_feat_periodic(const std::vector<const nn_workload_data_t *> ¶meters) const;
bool is_linear() const { return (arithmetic_function == NN_ARITHMETIC_FUNCTION_SUBTRACTION)
or (arithmetic_function == NN_ARITHMETIC_FUNCTION_ADDITION); }
protected:
const NN_ARITHMETIC_FUNCTION arithmetic_function;
float alpha, beta, gamma;
std::vector<nn_multithreaded_request> job;
std::tuple<float*, float*> prepared_for;
virtual size_t get_required_input_w() override;
virtual size_t get_required_input_h() override;
private:
void forward(const nn::workload_data<nn::layout_f32> *input,
const nn::workload_data<nn::layout_f32> *factor,
nn::workload_data<nn::layout_f32> *output);
void prepare_forward(const nn::workload_data<nn::layout_f32> *input,
const nn::workload_data<nn::layout_f32> *factor,
nn::workload_data<nn::layout_f32> *output);
void run_arithmetic_operation_work_item(const nn::workload_data<nn::layout_f32> *input,
const nn::workload_data<nn::layout_f32> *factor,
nn::workload_data<nn::layout_f32> *output);
template <NN_ARITHMETIC_FUNCTION T_function, scalar_op_type scalar_op>
void process_arithmetic_operation(const nn::workload_data<nn::layout_f32> *input,
const nn::workload_data<nn::layout_f32> *factor,
nn::workload_data<nn::layout_f32> *output);
friend void unpack_arithmetic_callback_handle(void *void_handle);
};
} // namespace layer
| 2,269 |
2,728 | <reponame>vincenttran-msft/azure-sdk-for-python
# coding=utf-8
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import logging
import os
from azure.messaging.webpubsubservice import WebPubSubServiceClient
from azure.identity import DefaultAzureCredential
logging.basicConfig(level=logging.DEBUG)
LOG = logging.getLogger()
# Set the values of the client ID, tenant ID, and client secret of the AAD application as environment variables:
# AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET, WEBPUBSUB_ENDPOINT, WEBPUBSUB_CONNECTION_STRING
try:
endpoint = os.environ["WEBPUBSUB_ENDPOINT"]
connection_string = os.environ['WEBPUBSUB_CONNECTION_STRING']
except KeyError:
LOG.error("Missing environment variable 'WEBPUBSUB_ENDPOINT' or 'WEBPUBSUB_CONNECTION_STRING' - please set if before running the example")
exit()
# Build a client through AAD
client_aad = WebPubSubServiceClient(endpoint=endpoint, hub='hub', credential=DefaultAzureCredential())
# Build authentication token
token_aad = client_aad.get_client_access_token()
print('token by AAD: {}'.format(token_aad))
# Build a client through connection string
client_key = WebPubSubServiceClient.from_connection_string(connection_string, hub='hub')
# Build authentication token
token_key = client_key.get_client_access_token()
print('token by access key: {}'.format(token_key))
| 742 |
1,334 | <gh_stars>1000+
# Generated by Django 3.2 on 2021-09-22 12:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('leads', '0015_auto_20210913_1918'),
]
operations = [
migrations.RenameField(
model_name='lead',
old_name='company',
new_name='org',
),
]
| 175 |
649 | <reponame>xjc90s/serenity-core
package net.serenitybdd.screenplay;
/**
* Created by john on 16/08/2015.
*/
public class Uninstrumented {
@SuppressWarnings("unchecked")
public static <T> Class<T> versionOf(Class<T> questionClass) {
return questionClass.getName().contains("EnhancerByCGLIB") ? (Class<T>) questionClass.getSuperclass() : questionClass;
}
}
| 141 |
1,511 | <gh_stars>1000+
/* vim:set expandtab ts=4 shiftwidth=4: */
/* GIO - GLib Input, Output and Streaming Library
*
* Copyright (C) 2006-2007 Red Hat, Inc.
* Copyright (C) 2007 <NAME>.
* Copyright (C) 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
* | 92 |
777 | <reponame>google-ar/chromium
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ui/ozone/platform/drm/gpu/gbm_device.h"
#include <gbm.h>
#include <utility>
namespace ui {
GbmDevice::GbmDevice(const base::FilePath& device_path,
base::File file,
bool is_primary_device)
: DrmDevice(device_path, std::move(file), is_primary_device) {}
GbmDevice::~GbmDevice() {
if (device_)
gbm_device_destroy(device_);
}
bool GbmDevice::Initialize(bool use_atomic) {
if (!DrmDevice::Initialize(use_atomic))
return false;
device_ = gbm_create_device(get_fd());
if (!device_) {
PLOG(ERROR) << "Unable to initialize GBM for " << device_path().value();
return false;
}
return true;
}
} // namespace ui
| 348 |
1,677 | // Copyright (c) .NET Foundation and contributors. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
#pragma once
#ifndef WIN32
#include <cstdlib>
#include "pal_mstypes.h"
#include "pal.h"
#include "ntimage.h"
#include "corhdr.h"
#define CoTaskMemAlloc(cb) malloc(cb)
#define CoTaskMemFree(cb) free(cb)
#define UINT_PTR_FORMAT "lx"
#else
#define UINT_PTR_FORMAT "llx"
#endif | 186 |
652 | //
// WLMInvoiceDetailVC.h
// WLMElectronicInvoice
//
// Created by 刘光强 on 2018/5/4.
// Copyright © 2018年 quangqiang. All rights reserved.
//
#import <UIKit/UIKit.h>
@interface WLMInvoiceDetailVC : WLBaseViewController
@end
| 97 |
11,356 | // Copyright 2010, <NAME>.
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// Test program for the boost::value_initialized<T> workaround.
//
// 17 June 2010 (Created) <NAME>
// Switch the workaround off, before inluding "value_init.hpp".
#define BOOST_DETAIL_VALUE_INIT_WORKAROUND 0
#include <boost/utility/value_init.hpp>
#include <iostream> // For cout.
#include <cstdlib> // For EXIT_SUCCESS and EXIT_FAILURE.
namespace
{
struct empty_struct
{
};
// A POD aggregate struct derived from an empty struct.
// Similar to struct Foo1 from Microsoft Visual C++ bug report 484295,
// "VC++ does not value-initialize members of derived classes without
// user-declared constructor", reported in 2009 by <NAME>:
// https://connect.microsoft.com/VisualStudio/feedback/details/484295
struct derived_struct: empty_struct
{
int data;
};
bool is_value_initialized(const derived_struct& arg)
{
return arg.data == 0;
}
class virtual_destructor_holder
{
public:
int i;
virtual ~virtual_destructor_holder()
{
}
};
bool is_value_initialized(const virtual_destructor_holder& arg)
{
return arg.i == 0;
}
// Equivalent to the Stats class from GCC Bug 33916,
// "Default constructor fails to initialize array members", reported in 2007 by
// <NAME>: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=33916
// and fixed for GCC 4.2.4.
class private_int_array_pair
{
friend bool is_value_initialized(const private_int_array_pair& arg);
private:
int first[12];
int second[12];
};
bool is_value_initialized(const private_int_array_pair& arg)
{
for ( unsigned i = 0; i < 12; ++i)
{
if ( (arg.first[i] != 0) || (arg.second[i] != 0) )
{
return false;
}
}
return true;
}
struct int_pair_struct
{
int first;
int second;
};
typedef int int_pair_struct::*ptr_to_member_type;
struct ptr_to_member_struct
{
ptr_to_member_type data;
};
bool is_value_initialized(const ptr_to_member_struct& arg)
{
return arg.data == 0;
}
template <typename T>
bool is_value_initialized(const T(& arg)[2])
{
return
is_value_initialized(arg[0]) &&
is_value_initialized(arg[1]);
}
template <typename T>
bool is_value_initialized(const boost::value_initialized<T>& arg)
{
return is_value_initialized(arg.data());
}
// Returns zero when the specified object is value-initializated, and one otherwise.
// Prints a message to standard output if the value-initialization has failed.
template <class T>
unsigned failed_to_value_initialized(const T& object, const char *const object_name)
{
if ( is_value_initialized(object) )
{
return 0u;
}
else
{
std::cout << "Note: Failed to value-initialize " << object_name << '.' << std::endl;
return 1u;
}
}
// A macro that passed both the name and the value of the specified object to
// the function above here.
#define FAILED_TO_VALUE_INITIALIZE(value) failed_to_value_initialized(value, #value)
// Equivalent to the dirty_stack() function from GCC Bug 33916,
// "Default constructor fails to initialize array members", reported in 2007 by
// <NAME>: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=33916
void dirty_stack()
{
unsigned char array_on_stack[4096];
for (unsigned i = 0; i < sizeof(array_on_stack); ++i)
{
array_on_stack[i] = 0x11;
}
}
}
int main()
{
dirty_stack();
// TODO More types may be added later.
const unsigned num_failures =
FAILED_TO_VALUE_INITIALIZE(boost::value_initialized<derived_struct>()) +
FAILED_TO_VALUE_INITIALIZE(boost::value_initialized<virtual_destructor_holder[2]>()) +
FAILED_TO_VALUE_INITIALIZE(boost::value_initialized<private_int_array_pair>()) +
FAILED_TO_VALUE_INITIALIZE(boost::value_initialized<ptr_to_member_struct>());
#ifdef BOOST_DETAIL_VALUE_INIT_WORKAROUND_SUGGESTED
// One or more failures are expected.
return num_failures > 0 ? EXIT_SUCCESS : EXIT_FAILURE;
#else
// No failures are expected.
return num_failures == 0 ? EXIT_SUCCESS : EXIT_FAILURE;
#endif
}
| 1,555 |
634 | /*
* Copyright 2000-2017 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.build.events.impl;
import com.intellij.build.BuildDescriptor;
import com.intellij.build.BuildViewSettingsProvider;
import com.intellij.build.DefaultBuildDescriptor;
import com.intellij.build.events.BuildEventsNls;
import com.intellij.build.events.StartBuildEvent;
import com.intellij.build.process.BuildProcessHandler;
import com.intellij.execution.filters.Filter;
import com.intellij.execution.ui.ConsoleView;
import com.intellij.execution.ui.RunContentDescriptor;
import com.intellij.openapi.actionSystem.AnAction;
import com.intellij.util.Consumer;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.Arrays;
import java.util.function.Supplier;
/**
* @author Vladislav.Soroka
*/
public class StartBuildEventImpl extends StartEventImpl implements StartBuildEvent {
private final @Nonnull
DefaultBuildDescriptor myBuildDescriptor;
private @Nullable
BuildViewSettingsProvider myBuildViewSettingsProvider;
public StartBuildEventImpl(@Nonnull BuildDescriptor descriptor, @Nonnull @BuildEventsNls.Message String message) {
super(descriptor.getId(), null, descriptor.getStartTime(), message);
myBuildDescriptor =
descriptor instanceof DefaultBuildDescriptor ? (DefaultBuildDescriptor)descriptor : new DefaultBuildDescriptor(descriptor);
}
//@ApiStatus.Experimental
@Nonnull
@Override
public DefaultBuildDescriptor getBuildDescriptor() {
return myBuildDescriptor;
}
/**
* @deprecated use {@link DefaultBuildDescriptor#withProcessHandler}
*/
@Deprecated
public StartBuildEventImpl withProcessHandler(@Nullable BuildProcessHandler processHandler,
@Nullable Consumer<? super ConsoleView> attachedConsoleConsumer) {
myBuildDescriptor.withProcessHandler(processHandler, attachedConsoleConsumer);
return this;
}
/**
* @deprecated use {@link DefaultBuildDescriptor#withProcessHandler}
*/
@Deprecated
public StartBuildEventImpl withRestartAction(@Nonnull AnAction anAction) {
myBuildDescriptor.withRestartAction(anAction);
return this;
}
/**
* @deprecated use {@link DefaultBuildDescriptor#withProcessHandler}
*/
@Deprecated
public StartBuildEventImpl withRestartActions(AnAction... actions) {
Arrays.stream(actions).forEach(myBuildDescriptor::withRestartAction);
return this;
}
/**
* @deprecated use {@link DefaultBuildDescriptor#withProcessHandler}
*/
@Deprecated
public StartBuildEventImpl withContentDescriptorSupplier(Supplier<? extends RunContentDescriptor> contentDescriptorSupplier) {
myBuildDescriptor.withContentDescriptor(contentDescriptorSupplier);
return this;
}
/**
* @deprecated use {@link DefaultBuildDescriptor#withProcessHandler}
*/
@Deprecated
public StartBuildEventImpl withExecutionFilter(@Nonnull Filter filter) {
myBuildDescriptor.withExecutionFilter(filter);
return this;
}
/**
* @deprecated use {@link DefaultBuildDescriptor#withProcessHandler}
*/
@Deprecated
public StartBuildEventImpl withExecutionFilters(Filter... filters) {
Arrays.stream(filters).forEach(myBuildDescriptor::withExecutionFilter);
return this;
}
@Nullable
//@ApiStatus.Experimental
public BuildViewSettingsProvider getBuildViewSettingsProvider() {
return myBuildViewSettingsProvider;
}
//@ApiStatus.Experimental
public StartBuildEventImpl withBuildViewSettingsProvider(@Nullable BuildViewSettingsProvider viewSettingsProvider) {
myBuildViewSettingsProvider = viewSettingsProvider;
return this;
}
}
| 1,299 |
381 | import gc
import time
import thread
import os
import errno
from pypy.interpreter.gateway import interp2app, unwrap_spec
from rpython.rlib import rgil
NORMAL_TIMEOUT = 300.0 # 5 minutes
def waitfor(space, w_condition, delay=1):
adaptivedelay = 0.04
limit = time.time() + delay * NORMAL_TIMEOUT
while time.time() <= limit:
rgil.release()
time.sleep(adaptivedelay)
rgil.acquire()
gc.collect()
if space.is_true(space.call_function(w_condition)):
return
adaptivedelay *= 1.05
print '*** timed out ***'
def timeout_killer(pid, delay):
def kill():
for x in range(delay * 10):
time.sleep(0.1)
try:
os.kill(pid, 0)
except OSError as e:
if e.errno == errno.ESRCH: # no such process
return
raise
os.kill(pid, 9)
print "process %s killed!" % (pid,)
thread.start_new_thread(kill, ())
class GenericTestThread:
spaceconfig = dict(usemodules=('thread', 'time', 'signal'))
def setup_class(cls):
cls.w_runappdirect = cls.space.wrap(cls.runappdirect)
if cls.runappdirect:
def plain_waitfor(self, condition, delay=1):
adaptivedelay = 0.04
limit = time.time() + NORMAL_TIMEOUT * delay
while time.time() <= limit:
time.sleep(adaptivedelay)
gc.collect()
if condition():
return
adaptivedelay *= 1.05
print '*** timed out ***'
cls.w_waitfor = plain_waitfor
def py_timeout_killer(self, *args, **kwargs):
timeout_killer(*args, **kwargs)
cls.w_timeout_killer = cls.space.wrap(py_timeout_killer)
else:
@unwrap_spec(delay=int)
def py_waitfor(space, w_condition, delay=1):
waitfor(space, w_condition, delay)
cls.w_waitfor = cls.space.wrap(interp2app(py_waitfor))
def py_timeout_killer(space, __args__):
args_w, kwargs_w = __args__.unpack()
args = map(space.unwrap, args_w)
kwargs = dict([
(k, space.unwrap(v))
for k, v in kwargs_w.iteritems()
])
timeout_killer(*args, **kwargs)
cls.w_timeout_killer = cls.space.wrap(interp2app(py_timeout_killer))
cls.w_busywait = cls.space.appexec([], """():
import time
return time.sleep
""")
| 1,389 |
14,425 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
@XmlRootElement(name="NewApplication")
@XmlAccessorType(XmlAccessType.FIELD)
public class NewApplication {
@XmlElement(name="application-id")
String applicationId;
@XmlElement(name="maximum-resource-capability")
ResourceInfo maximumResourceCapability;
public NewApplication() {
applicationId = "";
maximumResourceCapability = new ResourceInfo();
}
public NewApplication(String appId, ResourceInfo maxResources) {
applicationId = appId;
maximumResourceCapability = maxResources;
}
public String getApplicationId() {
return applicationId;
}
public ResourceInfo getMaximumResourceCapability() {
return maximumResourceCapability;
}
}
| 492 |
1,422 | <gh_stars>1000+
/*
* Copyright (C) 2012 The Flogger Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.flogger.backend;
import java.util.logging.Level;
/**
* Interface for all logger backends.
* <p>
* <h2>Implementation Notes:</h2>
* Often each {@link com.google.common.flogger.AbstractLogger} instance will be instantiated with a
* new logger backend (to permit per-class logging behavior). Because of this it is important that
* LoggerBackends have as little per-instance state as possible.
*/
public abstract class LoggerBackend {
/**
* Returns the logger name (which is usually a canonicalized class name) or {@code null} if not
* given.
*/
public abstract String getLoggerName();
/**
* Returns whether logging is enabled for the given level for this backend. Different backends may
* return different values depending on the class with which they are associated.
*/
public abstract boolean isLoggable(Level lvl);
/**
* Outputs the log statement represented by the given {@link LogData} instance.
*
* @param data user and logger supplied data to be rendered in a backend specific way.
*/
public abstract void log(LogData data);
/**
* Handles an error in a log statement. Errors passed into this method are expected to have only
* three distinct causes:
* <ol>
* <li>Bad format strings in log messages (e.g. {@code "foo=%Q"}. These will always be
* instances of {@link com.google.common.flogger.parser.ParseException ParseException} and
* contain human readable error messages describing the problem.
* <li>A backend optionally choosing not to handle errors from user code during formatting.
* This is not recommended (see below) but may be useful in testing or debugging.
* <li>Runtime errors in the backend itself.
* </ol>
*
* <p>It is recommended that backend implementations avoid propagating exceptions in user code
* (e.g. calls to {@code toString()}), as the nature of logging means that log statements are
* often only enabled when debugging. If errors were propagated up into user code, enabling
* logging to look for the cause of one issue could trigger previously unknown bugs, which could
* then seriously hinder debugging the original issue.
*
* <p>Typically a backend would handle an error by logging an alternative representation of the
* "bad" log data, being careful not to allow any more exceptions to occur. If a backend chooses
* to propagate an error (e.g. when testing or debugging) it must wrap it in
* {@link LoggingException} to avoid it being re-caught.
*
* @param error the exception throw when {@code badData} was initially logged.
* @param badData the original {@code LogData} instance which caused an error. It is not expected
* that simply trying to log this again will succeed and error handlers must be careful in
* how they handle this instance, it's arguments and metadata.
* @throws LoggingException to indicate an error which should be propagated into user code.
*/
public abstract void handleError(RuntimeException error, LogData badData);
}
| 1,000 |
777 | <reponame>google-ar/chromium<filename>third_party/WebKit/Source/core/layout/svg/LayoutSVGResourceRadialGradient.cpp<gh_stars>100-1000
/*
* Copyright (C) 2006 <NAME> <<EMAIL>>
* Copyright (C) Research In Motion Limited 2010. All rights reserved.
* Copyright (C) 2012 Adobe Systems Incorporated. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public License
* along with this library; see the file COPYING.LIB. If not, write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#include "core/layout/svg/LayoutSVGResourceRadialGradient.h"
#include "core/svg/SVGRadialGradientElement.h"
namespace blink {
LayoutSVGResourceRadialGradient::LayoutSVGResourceRadialGradient(
SVGRadialGradientElement* node)
: LayoutSVGResourceGradient(node),
m_attributesWrapper(RadialGradientAttributesWrapper::create()) {}
LayoutSVGResourceRadialGradient::~LayoutSVGResourceRadialGradient() {}
bool LayoutSVGResourceRadialGradient::collectGradientAttributes(
SVGGradientElement* gradientElement) {
m_attributesWrapper->set(RadialGradientAttributes());
return toSVGRadialGradientElement(gradientElement)
->collectGradientAttributes(mutableAttributes());
}
FloatPoint LayoutSVGResourceRadialGradient::centerPoint(
const RadialGradientAttributes& attributes) const {
return SVGLengthContext::resolvePoint(element(), attributes.gradientUnits(),
*attributes.cx(), *attributes.cy());
}
FloatPoint LayoutSVGResourceRadialGradient::focalPoint(
const RadialGradientAttributes& attributes) const {
return SVGLengthContext::resolvePoint(element(), attributes.gradientUnits(),
*attributes.fx(), *attributes.fy());
}
float LayoutSVGResourceRadialGradient::radius(
const RadialGradientAttributes& attributes) const {
return SVGLengthContext::resolveLength(element(), attributes.gradientUnits(),
*attributes.r());
}
float LayoutSVGResourceRadialGradient::focalRadius(
const RadialGradientAttributes& attributes) const {
return SVGLengthContext::resolveLength(element(), attributes.gradientUnits(),
*attributes.fr());
}
PassRefPtr<Gradient> LayoutSVGResourceRadialGradient::buildGradient() const {
const RadialGradientAttributes& attributes = this->attributes();
RefPtr<Gradient> gradient =
Gradient::create(focalPoint(attributes), focalRadius(attributes),
centerPoint(attributes), radius(attributes));
gradient->setSpreadMethod(
platformSpreadMethodFromSVGType(attributes.spreadMethod()));
addStops(*gradient, attributes.stops());
return gradient.release();
}
} // namespace blink
| 1,095 |
3,262 | <filename>src/plugins/xbl/src/xbl_manager.cpp
#include "xbl_manager.h"
#include "halley/support/logger.h"
#include "halley/text/halleystring.h"
#include "halley/concurrency/concurrent.h"
#include <map>
#include <vccorlib.h>
#include <winrt/base.h>
#include <winrt/Windows.System.UserProfile.h>
#include <winrt/Windows.Foundation.h>
#include <winrt/Windows.Foundation.Collections.h>
#include <winrt/Windows.UI.Core.h>
#include <winrt/Windows.Gaming.XboxLive.Storage.h>
#include <winrt/Windows.Storage.Streams.h>
#include "xsapi/services.h"
#include <ppltasks.h>
#include <assert.h>
#define GAME_SESSION_TEMPLATE_NAME L"Standard_game_session_without_matchmaking"
#define LOBBY_TEMPLATE_NAME L"test_lobby_invite"
#define LOGIN_DELAY 180
using namespace Halley;
template <typename T>
T from_cx(Platform::Object^ from)
{
T to{ nullptr };
winrt::check_hresult(reinterpret_cast<::IUnknown*>(from)
->QueryInterface(winrt::guid_of<T>(),
reinterpret_cast<void**>(winrt::put_abi(to))));
return to;
}
template <typename T>
T^ to_cx(winrt::Windows::Foundation::IUnknown const& from)
{
return safe_cast<T^>(reinterpret_cast<Platform::Object^>(winrt::get_abi(from)));
}
XBLMPMOperationStateCtrl::XBLMPMOperationStateCtrl()
{
reset();
}
void XBLMPMOperationStateCtrl::reset()
{
state = OpState::NotRequested;
requestStartTime = 0L;
timeOutActive = false;
}
void XBLMPMOperationStateCtrl::setStateRequested()
{
assert ( state==OpState::NotRequested );
state = OpState::Requested;
renewTimeoutTime();
}
void XBLMPMOperationStateCtrl::setStateError()
{
assert ( state==OpState::NotRequested || state==OpState::Requested );
state = OpState::Error;
}
void XBLMPMOperationStateCtrl::setStateDoneOk()
{
assert ( state==OpState::NotRequested || state==OpState::Requested );
state = OpState::DoneOk;
}
bool XBLMPMOperationStateCtrl::checkStateNotRequested()
{
return getState()==OpState::NotRequested;
}
bool XBLMPMOperationStateCtrl::checkStateRequested()
{
return getState()==OpState::Requested;
}
bool XBLMPMOperationStateCtrl::checkStateError()
{
return getState()==OpState::Error;
}
bool XBLMPMOperationStateCtrl::checkStateDoneOk()
{
return getState()==OpState::DoneOk;
}
void XBLMPMOperationStateCtrl::enableTimeout( bool active )
{
timeOutActive = active;
if ( active ) {
renewTimeoutTime();
}
}
void XBLMPMOperationStateCtrl::renewTimeoutTime()
{
requestStartTime = GetTickCount64();
}
XBLMPMOperationStateCtrl::OpState XBLMPMOperationStateCtrl::getState() const
{
if ( timeOutActive && state==OpState::Requested ) {
const ULONGLONG requestTimeout = 5000;
ULONGLONG sinceRequestedElapsedTime = GetTickCount64()-requestStartTime;
if ( sinceRequestedElapsedTime>requestTimeout ) {
Logger::logWarning(String("Operation time out!"));
state=OpState::Error;
}
}
return state;
}
XBLManager::XBLManager()
{
multiplayerIncommingInvitationUri = L"";
multiplayerCurrentSetup.mode = MultiplayerMode::None;
multiplayerCurrentSetup.key = "";
multiplayerCurrentSetup.invitationUri = L"";
multiplayerCurrentSetup.sessionId=-1;
multiplayerTargetSetup.mode = MultiplayerMode::None;
multiplayerTargetSetup.key = "";
multiplayerTargetSetup.invitationUri = L"";
multiplayerTargetSetup.sessionId=-1;
multiplayerState = MultiplayerState::NotInitialized;
multiplayerNextSessionId = 1;
xblMultiplayerManager = nullptr;
xblMultiplayerContext = 0;
playerLoggedOut = false;
}
XBLManager::~XBLManager()
{
deInit();
}
void XBLManager::init()
{
using namespace xbox::services::system;
signOutHandler = xbox_live_user::add_sign_out_completed_handler([this](const sign_out_completed_event_args&)
{
xboxUser.reset();
xboxLiveContext.reset();
gameSaveProvider.reset();
status = XBLStatus::Disconnected;
achievementsStatus = XBLAchievementsStatus::Uninitialized;
achievementStatus.clear();
playerLoggedOut = true;
});
}
void XBLManager::deInit()
{
using namespace xbox::services::system;
xbox_live_user::remove_sign_out_completed_handler(signOutHandler);
achievementsStatus = XBLAchievementsStatus::Uninitialized;
achievementStatus.clear();
}
std::shared_ptr<ISaveData> XBLManager::getSaveContainer(const String& name)
{
auto iter = saveStorage.find(name);
if (iter == saveStorage.end()) {
auto save = std::make_shared<XBLSaveData>(*this, name);
saveStorage[name] = save;
return save;
} else {
return iter->second;
}
}
void XBLManager::recreateCloudSaveContainer()
{
if (status == XBLStatus::Connected)
{
Concurrent::execute([=]() -> void
{
gameSaveProvider.reset();
status = XBLStatus::Disconnected;
getConnectedStorage().get();
std::map<String, std::shared_ptr<ISaveData>>::iterator iter;
for (iter = saveStorage.begin(); iter != saveStorage.end(); ++iter)
{
std::dynamic_pointer_cast<XBLSaveData>(iter->second)->recreate();
}
}).get();
}
}
std::optional<winrt::Windows::Gaming::XboxLive::Storage::GameSaveProvider> XBLManager::getProvider() const
{
return gameSaveProvider;
}
XBLStatus XBLManager::getStatus() const
{
return status;
}
class XboxLiveAuthorisationToken : public AuthorisationToken {
public:
XboxLiveAuthorisationToken(String userId, String token)
{
//data["userId"] = std::move(userId);
data["token"] = std::move(token);
}
String getType() const override
{
return "xboxlive";
}
bool isSingleUse() const override
{
return false;
}
bool isCancellable() const override
{
return false;
}
void cancel() override
{
}
std::map<String, String> getMapData() const override
{
return data;
}
private:
std::map<String, String> data;
bool playOnline = false;
bool shareUGC = false;
};
Future<AuthTokenResult> XBLManager::getAuthToken(const AuthTokenParameters& parameters)
{
Promise<AuthTokenResult> promise;
if (status == XBLStatus::Connected) {
auto future = promise.getFuture();
xboxLiveContext->user()->get_token_and_signature(parameters.method.getUTF16().c_str(), parameters.url.getUTF16().c_str(), parameters.headers.getUTF16().c_str())
.then([=, promise = std::move(promise)](xbox::services::xbox_live_result<xbox::services::system::token_and_signature_result> result) mutable
{
if (result.err()) {
Logger::logError(result.err_message());
promise.setValue(AuthTokenRetrievalResult::Error);
} else {
auto payload = result.payload();
auto privileges = String(payload.privileges().c_str());
auto userId = String(payload.xbox_user_id().c_str());
auto token = String(payload.token().c_str());
OnlineCapabilities capabilities;
for (const auto& priv: privileges.split(' ')) {
const int privNumber = priv.toInteger();
if (privNumber == 254) { // MULTIPLAYER_SESSIONS
capabilities.onlinePlay = true;
} else if (privNumber == 247) { // USER_CREATED_CONTENT
capabilities.ugc = true;
} else if (privNumber == 211) { // SHARE_CONTENT
capabilities.ugcShare = true;
} else if (privNumber == 252) { // COMMUNICATIONS
capabilities.communication = true;
} else if (privNumber == 249) { // PROFILE_VIEWING
capabilities.viewProfiles = true;
}
}
promise.setValue(AuthTokenResult(std::make_unique<XboxLiveAuthorisationToken>(userId, token), capabilities));
}
});
return future;
} else {
promise.setValue(AuthTokenRetrievalResult::Error);
return promise.getFuture();
}
}
void XBLManager::setAchievementProgress(const String& achievementId, int currentProgress, int maximumValue)
{
if (xboxUser != nullptr && xboxLiveContext != nullptr)
{
string_t id (achievementId.cppStr().begin(), achievementId.cppStr().end());
int progress = (int)floor(((float)currentProgress / (float)maximumValue) * 100.f);
xboxLiveContext->achievement_service().update_achievement(xboxUser->xbox_user_id(), id, progress).then([=] (xbox::services::xbox_live_result<void> result)
{
if (result.err())
{
Logger::logError(String("Error unlocking achievement '") + achievementId + String("': ") + result.err().value() + " " + result.err_message());
}
else if (progress == 100)
{
achievementStatus[id] = true;
}
});
}
}
bool XBLManager::isAchievementUnlocked(const String& achievementId, bool defaultValue)
{
if (achievementsStatus == XBLAchievementsStatus::Uninitialized)
{
Logger::logWarning(String("Trying to get the achievement status before starting the retrieve task!"));
return false;
}
else if (achievementsStatus == XBLAchievementsStatus::Retrieving)
{
unsigned long long timeout = GetTickCount64() + 5000;
while (achievementsStatus == XBLAchievementsStatus::Retrieving && GetTickCount64() < timeout) {}
if (achievementsStatus == XBLAchievementsStatus::Retrieving)
{
Logger::logWarning(String("Achievements are taking too long to load!"));
return false;
}
}
string_t id(achievementId.cppStr().begin(), achievementId.cppStr().end());
auto iterator = achievementStatus.find(id);
if (iterator != achievementStatus.end())
{
return iterator->second;
}
return defaultValue;
}
String XBLManager::getPlayerName()
{
if (xboxUser)
{
return String(xboxUser->gamertag().c_str());
}
return "";
}
bool XBLManager::playerHasLoggedOut()
{
bool loggedOut = playerLoggedOut;
if (loggedOut) {
playerLoggedOut = false;
}
return loggedOut;
}
winrt::Windows::Foundation::IAsyncAction XBLManager::onLoggedIn()
{
xboxLiveContext = std::make_shared<xbox::services::xbox_live_context>(xboxUser);
retrieveUserAchievementsState();
co_await getConnectedStorage();
}
Future<PlatformSignInResult> XBLManager::signIn()
{
Promise<PlatformSignInResult> origPromise;
//auto future = promise.getFuture();
xbox::services::system::xbox_live_services_settings::get_singleton_instance()->set_diagnostics_trace_level(xbox::services::xbox_services_diagnostics_trace_level::verbose);
status = XBLStatus::Connecting;
using namespace xbox::services::system;
xboxUser = std::make_shared<xbox_live_user>(nullptr);
auto dispatcher = to_cx<Platform::Object>(winrt::Windows::UI::Core::CoreWindow::GetForCurrentThread().Dispatcher());
xboxUser->signin_silently(dispatcher).then([=] (xbox::services::xbox_live_result<sign_in_result> result) mutable -> winrt::Windows::Foundation::IAsyncAction
{
// For some reason the coroutine fucks this up if it's a capture, so make a local copy
auto promise = origPromise;
if (result.err()) {
Logger::logError(String("Error signing in to Xbox Live: ") + result.err_message());
status = XBLStatus::Disconnected;
promise.setValue(PlatformSignInResult(false, true, 10.0));
} else {
auto resultStatus = result.payload().status();
switch (resultStatus) {
case success:
co_await onLoggedIn();
promise.setValue(PlatformSignInResult(true, false, 0.0));
break;
case user_interaction_required:
{
xboxUser->signin(dispatcher).then([=](xbox::services::xbox_live_result<sign_in_result> loudResult) mutable -> winrt::Windows::Foundation::IAsyncAction
{
// For some reason the coroutine fucks this up if it's a capture, so make a local copy
auto localPromise = promise;
if (loudResult.err()) {
Logger::logError("Error signing in to Xbox live: " + String(loudResult.err_message().c_str()));
status = XBLStatus::Disconnected;
localPromise.setValue(PlatformSignInResult(false, true, 10.0));
} else {
auto resPayload = loudResult.payload();
switch (resPayload.status()) {
case success:
co_await onLoggedIn();
localPromise.setValue(PlatformSignInResult(true, false, 0.0));
break;
default:
status = XBLStatus::Disconnected;
localPromise.setValue(PlatformSignInResult(false, true, 10.0));
break;
}
}
}, concurrency::task_continuation_context::use_current());
break;
}
default:
status = XBLStatus::Disconnected;
promise.setValue(PlatformSignInResult(false, true, 10.0));
}
}
});
return origPromise.getFuture();
}
bool XBLManager::isSignedIn() const
{
return status != XBLStatus::Disconnected || (xboxUser && xboxUser->is_signed_in());
}
winrt::Windows::Foundation::IAsyncAction XBLManager::getConnectedStorage()
{
using namespace winrt::Windows::Gaming::XboxLive::Storage;
try
{
auto windowsUser = co_await winrt::Windows::System::User::FindAllAsync();
GameSaveProviderGetResult result = co_await GameSaveProvider::GetForUserAsync(*windowsUser.First(), xboxLiveContext->application_config()->scid());
if (result.Status() == GameSaveErrorStatus::Ok) {
gameSaveProvider = result.Value();
status = XBLStatus::Connected;
}
else {
status = XBLStatus::Disconnected;
}
}
catch (...)
{
status = XBLStatus::Disconnected;
}
if (status != XBLStatus::Connected)
{
Logger::logError(String("Error getting the connected storage for user '") + xboxUser->gamertag().c_str() + String("'"));
xboxUser.reset();
xboxLiveContext.reset();
gameSaveProvider.reset();
achievementsStatus = XBLAchievementsStatus::Uninitialized;
achievementStatus.clear();
playerLoggedOut = true;
}
}
void XBLManager::retrieveUserAchievementsState()
{
achievementsStatus = XBLAchievementsStatus::Retrieving;
achievementStatus.clear();
xboxLiveContext->achievement_service().get_achievements_for_title_id(
xboxUser->xbox_user_id(),
xboxLiveContext->application_config()->title_id(),
xbox::services::achievements::achievement_type::all,
false,
xbox::services::achievements::achievement_order_by::title_id,
0,
0)
.then([=](xbox::services::xbox_live_result<xbox::services::achievements::achievements_result> result)
{
try
{
bool receivedMoreAchievements;
do
{
receivedMoreAchievements = false;
if (result.err())
{
Logger::logError(String("Error retrieving achievements for user '") + xboxUser->gamertag().c_str() + String("': ") + result.err().value() + " " + result.err_message());
achievementsStatus = XBLAchievementsStatus::Uninitialized;
}
else
{
std::vector<xbox::services::achievements::achievement> achievements = result.payload().items();
for (unsigned int i = 0; i < achievements.size(); ++i)
{
xbox::services::achievements::achievement achievement = achievements[i];
bool isAchieved = (achievement.progress_state() == xbox::services::achievements::achievement_progress_state::achieved);
Logger::logInfo(String("Achievement '") + achievement.name().c_str() + String("' (ID '") + achievement.id().c_str() + String("'): ") + (isAchieved ? String("Achieved") : String("Locked")));
achievementStatus[achievement.id()] = isAchieved;
}
if (result.payload().has_next())
{
result = result.payload().get_next(32).get();
receivedMoreAchievements = true;
}
else
{
achievementsStatus = XBLAchievementsStatus::Ready;
}
}
} while (receivedMoreAchievements);
}
catch (...)
{
achievementsStatus = XBLAchievementsStatus::Uninitialized;
Logger::logError(String("Error retrieving achievements for user '") + xboxUser->gamertag().c_str() + String("'"));
}
});
}
void XBLManager::showPlayerInfo(String playerId)
{
xbox::services::system::title_callable_ui::show_profile_card_ui(playerId.getUTF16());
}
XBLSaveData::XBLSaveData(XBLManager& manager, String containerName)
: manager(manager)
, containerName(containerName.isEmpty() ? "save" : containerName)
, isSaving(false)
{
updateContainer();
}
void XBLManager::setProfanityCheckForbiddenWordsList(std::vector<String> words)
{
forbiddenWords.clear();
forbiddenWords.resize(words.size());
for (size_t i = 0; i < words.size(); ++i) {
string_t word = words[i].getUTF16();
std::transform(word.begin(), word.end(), word.begin(), ::towlower);
forbiddenWords[i] = String(word.c_str());
}
}
String XBLManager::performProfanityCheck(String text)
{
string_t finalText = text.getUTF16();
string_t lowercaseText = finalText;
std::transform(lowercaseText.begin(), lowercaseText.end(), lowercaseText.begin(), ::towlower);
string_t replacement(256, '*');
for (size_t i = 0; i < forbiddenWords.size(); ++i)
{
string_t forbiddenWord = forbiddenWords[i].getUTF16();
size_t startPos = 0;
while ((startPos = lowercaseText.find(forbiddenWord, startPos)) != std::wstring::npos) {
// Replace only full words
bool validFirstChar = (startPos == 0 || (!isdigit(lowercaseText[startPos - 1]) && !isalpha(lowercaseText[startPos - 1])));
bool validLastChar = ((startPos + forbiddenWord.length()) >= lowercaseText.length() || (!isdigit(lowercaseText[startPos + forbiddenWord.length()]) && !isalpha(lowercaseText[startPos + forbiddenWord.length()])));
if (validFirstChar && validLastChar) {
finalText.replace(startPos, forbiddenWord.length(), replacement, 0, forbiddenWord.length());
}
startPos += forbiddenWord.length();
}
}
return String(finalText.c_str());
}
void XBLManager::suspend()
{
Logger::logError("XBLManager::suspend\n");
suspended=true;
multiplayeEnableTimeout(false);
}
void XBLManager::resume()
{
Logger::logError("XBLManager::resume\n");
suspended=false;
multiplayeEnableTimeout(true);
}
bool XBLManager::isSuspended() const
{
return suspended;
}
bool XBLSaveData::isReady() const
{
updateContainer();
return gameSaveContainer.is_initialized();
}
Bytes XBLSaveData::getData(const String& path)
{
if (!isReady()) {
throw Exception("Container is not ready yet!", HalleyExceptions::PlatformPlugin);
}
return Concurrent::execute([&] () -> Bytes
{
if (isSaving)
{
unsigned long long timeout = GetTickCount64() + 3000;
while (isSaving && GetTickCount64() < timeout) {}
if (isSaving)
{
Logger::logWarning(String("Saving data to connected storage is taking too long!"));
}
}
auto key = winrt::hstring(path.getUTF16());
std::vector<winrt::hstring> updates;
updates.push_back(key);
auto view = winrt::single_threaded_vector(std::move(updates)).GetView();
auto gameBlob = gameSaveContainer->GetAsync(view).get();
if (gameBlob.Status() == winrt::Windows::Gaming::XboxLive::Storage::GameSaveErrorStatus::Ok) {
if (gameBlob.Value().HasKey(key)) {
auto buffer = gameBlob.Value().Lookup(key);
auto size = buffer.Length();
Bytes result(size);
auto dataReader = winrt::Windows::Storage::Streams::DataReader::FromBuffer(buffer);
dataReader.ReadBytes(winrt::array_view<uint8_t>(result));
return result;
}
}
else
{
Logger::logError(String("Error getting Blob '") + path + String("': ") + (int)gameBlob.Status());
}
return {};
}).get();
}
std::vector<String> XBLSaveData::enumerate(const String& root)
{
if (!isReady()) {
throw Exception("Container is not ready yet!", HalleyExceptions::PlatformPlugin);
}
return Concurrent::execute([&] () -> std::vector<String>
{
std::vector<String> results;
auto query = gameSaveContainer->CreateBlobInfoQuery(root.getUTF16().c_str());
auto info = query.GetBlobInfoAsync().get();
if (info.Status() == winrt::Windows::Gaming::XboxLive::Storage::GameSaveErrorStatus::Ok) {
auto& entries = info.Value();
for (uint32_t i = 0; i < entries.Size(); ++i) {
results.push_back(String(entries.GetAt(i).Name().c_str()));
}
}
return results;
}).get();
}
void XBLSaveData::setData(const String& path, const Bytes& data, bool commit)
{
if (!isReady()) {
throw Exception("Container is not ready yet!", HalleyExceptions::PlatformPlugin);
}
isSaving = true;
Concurrent::execute([=]() -> void
{
auto dataWriter = winrt::Windows::Storage::Streams::DataWriter();
dataWriter.WriteBytes(winrt::array_view<const uint8_t>(data));
std::map<winrt::hstring, winrt::Windows::Storage::Streams::IBuffer> updates;
updates[winrt::hstring(path.getUTF16())] = dataWriter.DetachBuffer();
auto view = winrt::single_threaded_map(std::move(updates)).GetView();
auto result = gameSaveContainer->SubmitUpdatesAsync(view, {}, L"").get();
if (result.Status() != winrt::Windows::Gaming::XboxLive::Storage::GameSaveErrorStatus::Ok)
{
Logger::logError(String("Error saving Blob '") + path + String("': ") + (int)result.Status());
}
isSaving = false;
});
}
void XBLSaveData::removeData(const String& path)
{
if (!isReady()) {
throw Exception("Container is not ready yet!", HalleyExceptions::PlatformPlugin);
}
Concurrent::execute([=]() -> void
{
auto key = winrt::hstring(path.getUTF16());
std::vector<winrt::hstring> updates;
updates.push_back(key);
auto view = winrt::single_threaded_vector(std::move(updates)).GetView();
auto result = gameSaveContainer->SubmitUpdatesAsync({}, view, L"").get();
if (result.Status() != winrt::Windows::Gaming::XboxLive::Storage::GameSaveErrorStatus::Ok)
{
Logger::logError(String("Error deleting Blob '") + path + String("': ") + (int)result.Status());
}
}).get();
}
void XBLSaveData::commit()
{
}
void XBLSaveData::recreate()
{
if (manager.getStatus() == XBLStatus::Connected) {
gameSaveContainer.reset();
gameSaveContainer = manager.getProvider()->CreateContainer(containerName.getUTF16().c_str());
}
}
void XBLSaveData::updateContainer() const
{
if (manager.getStatus() == XBLStatus::Connected) {
if (!gameSaveContainer) {
gameSaveContainer = manager.getProvider()->CreateContainer(containerName.getUTF16().c_str());
}
} else {
gameSaveContainer.reset();
}
}
void XBLManager::update()
{
multiplayerUpdate();
}
std::unique_ptr<MultiplayerSession> XBLManager::makeMultiplayerSession(const String& key)
{
return std::make_unique<XBLMultiplayerSession>(*this, key);
}
void XBLManager::invitationArrived (const std::wstring& uri)
{
Logger::logInfo(String("Invite received: ") + String(uri.c_str()));
Concurrent::execute([=]()
{
multiplayerIncommingInvitationMutex.lock();
// Wait until join callback was set
if (!joinCallback) {
unsigned long long timeout = GetTickCount64() + 30000;
while (!joinCallback && GetTickCount64() < timeout) {}
if (!joinCallback) {
Logger::logWarning(String("Join callback is taking too long to set!"));
return;
}
}
// Ensure that save container is ready
if (!getSaveContainer("")->isReady()) {
unsigned long long timeout = GetTickCount64() + 30000;
while (!getSaveContainer("")->isReady() && GetTickCount64() < timeout) {}
if (!getSaveContainer("")->isReady()) {
Logger::logWarning(String("Save container is taking too long to be ready!"));
}
}
// Then start multiplayer discarding repeated invitations
if ( multiplayerIncommingInvitationUri == uri || multiplayerTargetSetup.invitationUri == uri
|| ( multiplayerCurrentSetup.invitationUri == uri && multiplayerState!=MultiplayerState::Error )
)
{
Logger::logWarning(String("Discarding repeated invite!"));
}
else
{
multiplayerIncommingInvitationUri = uri;
preparingToJoinCallback();
}
multiplayerIncommingInvitationMutex.unlock();
});
}
bool XBLManager::incommingInvitation ()
{
return !multiplayerIncommingInvitationUri.empty();
}
int XBLManager::acceptInvitation ()
{
multiplayerTargetSetup.mode = MultiplayerMode::Invitee;
multiplayerTargetSetup.key = "";
multiplayerTargetSetup.invitationUri = multiplayerIncommingInvitationUri;
multiplayerTargetSetup.sessionId=multiplayerNextSessionId++;
multiplayerDone();
multiplayerIncommingInvitationUri = L"";
return multiplayerTargetSetup.sessionId;
}
int XBLManager::openHost (const String& key)
{
multiplayerTargetSetup.mode = MultiplayerMode::Inviter;
multiplayerTargetSetup.key = key;
multiplayerTargetSetup.invitationUri = L"";
multiplayerTargetSetup.sessionId=multiplayerNextSessionId++;
multiplayerDone();
return multiplayerTargetSetup.sessionId;
}
void XBLManager::showInviteUI()
{
if (multiplayerState== MultiplayerState::Running) {
Logger::logDev("NFO: Opening social UI...\n");
auto result = xblMultiplayerManager->lobby_session()->invite_friends(xboxUser, L"", L"Join my game!!");
if (result.err()) {
Logger::logInfo("InviteFriends failed: "+toString(result.err_message().c_str())+"\n");
}
}
}
MultiplayerStatus XBLManager::getMultiplayerStatus(int session) const
{
if ( multiplayerTargetSetup.sessionId!=-1 ) {
if ( session==-1 || session==multiplayerTargetSetup.sessionId )
return MultiplayerStatus::Initializing;
else
return MultiplayerStatus::Error;
}
else {
if ( multiplayerCurrentSetup.sessionId!=-1 ) {
if ( session==-1 || session==multiplayerCurrentSetup.sessionId ) {
switch (multiplayerState) {
case MultiplayerState::Initializing:
return MultiplayerStatus::Initializing;
case MultiplayerState::Running:
return MultiplayerStatus::Running;
case MultiplayerState::Error:
return MultiplayerStatus::Error;
case MultiplayerState::Ending:
case MultiplayerState::NotInitialized:
default:
return MultiplayerStatus::NotInit;
}
}
else
return MultiplayerStatus::Error;
}
else {
if ( session==-1 )
return MultiplayerStatus::NotInit;
else
return MultiplayerStatus::Error;
}
}
return MultiplayerStatus::NotInit;
}
bool XBLManager::isMultiplayerAsHost () const
{
MultiplayerMode mode = multiplayerTargetSetup.mode;
if (mode == MultiplayerMode::None) mode=multiplayerCurrentSetup.mode;
return (mode == MultiplayerMode::Inviter);
}
bool XBLManager::isMultiplayerAsGuest () const
{
MultiplayerMode mode = multiplayerTargetSetup.mode;
if (mode == MultiplayerMode::None) mode=multiplayerCurrentSetup.mode;
return (mode == MultiplayerMode::Invitee);
}
void XBLManager::closeMultiplayer (bool deepReset, int session)
{
if (session == -1 || session == multiplayerCurrentSetup.sessionId) {
multiplayerDone();
}
if (session == multiplayerTargetSetup.sessionId)
{
multiplayerTargetSetup.mode = MultiplayerMode::None;
multiplayerTargetSetup.key = "";
multiplayerTargetSetup.invitationUri = L"";
multiplayerTargetSetup.sessionId = -1;
}
// Reset some stuff
if (deepReset)
{
multiplayerIncommingInvitationUri = L"";
multiplayerCurrentSetup.mode = MultiplayerMode::None;
multiplayerCurrentSetup.key = "";
multiplayerCurrentSetup.invitationUri = L"";
multiplayerCurrentSetup.sessionId = -1;
multiplayerTargetSetup.mode = MultiplayerMode::None;
multiplayerTargetSetup.key = "";
multiplayerTargetSetup.invitationUri = L"";
multiplayerTargetSetup.sessionId = -1;
}
}
void XBLManager::multiplayerUpdate()
{
switch (multiplayerState) {
case MultiplayerState::NotInitialized:
multiplayerUpdate_NotInitialized();
break;
case MultiplayerState::Initializing:
multiplayerUpdate_Initializing();
break;
case MultiplayerState::Running:
multiplayerUpdate_Running();
break;
case MultiplayerState::Ending:
multiplayerUpdate_Ending();
break;
}
xblMultiplayerPoolProcess();
}
void XBLManager::multiplayerUpdate_NotInitialized()
{
if (multiplayerTargetSetup.mode != MultiplayerMode::None) {
// Get incomming setup
multiplayerCurrentSetup = multiplayerTargetSetup;
// Delete incomming setup
multiplayerTargetSetup.mode = MultiplayerMode::None;
multiplayerTargetSetup.key = "";
multiplayerTargetSetup.invitationUri = L"";
multiplayerTargetSetup.sessionId = -1;
// Reset operation states
xblOperation_add_local_user.reset();
xblOperation_set_property.reset();
xblOperation_set_joinability.reset();
xblOperation_join_lobby.reset();
xblOperation_remove_local_user.reset();
// MPM Initialization
Logger::logInfo("NFO: Initialize multiplayer Manager\n");
xblMultiplayerManager = multiplayer_manager::get_singleton_instance();
xblMultiplayerManager->initialize(LOBBY_TEMPLATE_NAME);
// Set state
multiplayerState = MultiplayerState::Initializing;
}
}
void XBLManager::multiplayerUpdate_Initializing()
{
switch (multiplayerCurrentSetup.mode) {
case MultiplayerMode::Inviter:
multiplayerUpdate_Initializing_Iniviter();
break;
case MultiplayerMode::Invitee:
multiplayerUpdate_Initializing_Inivitee();
break;
}
}
void XBLManager::multiplayerUpdate_Initializing_Iniviter()
{
// Check 'add_local_user' Operation
if (xblOperation_add_local_user.checkStateNotRequested()) {
// Add Local User
Logger::logDev("NFO: Add Local User\n");
auto result = xblMultiplayerManager->lobby_session()->add_local_user(xboxUser);
xblOperation_add_local_user.setStateRequested();
if (result.err()) {
Logger::logError("ERR: Unable to join local user: "+toString(result.err_message().c_str())+"\n" );
xblOperation_add_local_user.setStateError();
}
else {
Logger::logDev("NFO: Set local user address\n");
string_t connectionAddress = L"1.1.1.1";
xblMultiplayerManager->lobby_session()->set_local_member_connection_address(xboxUser, connectionAddress);
}
}
// Check 'set_property' Operation
if (xblOperation_set_property.checkStateNotRequested() && xblOperation_add_local_user.checkStateDoneOk()) {
Logger::logDev("NFO: Set server user GameKey property:\n"+toString(multiplayerCurrentSetup.key.c_str()));
std::string lobbyKey = multiplayerCurrentSetup.key.c_str();
std::wstring lobbyKeyW (lobbyKey.begin(), lobbyKey.end());
xblMultiplayerManager->lobby_session()->set_synchronized_properties(L"GameKey", web::json::value::string(lobbyKeyW), (void*)InterlockedIncrement(&xblMultiplayerContext));
xblOperation_set_property.setStateRequested();
}
// Check 'set_joinability' Operation
if (xblOperation_set_joinability.checkStateNotRequested() && xblOperation_add_local_user.checkStateDoneOk()) {
Logger::logDev("NFO: Set server joinability\n");
xblMultiplayerManager->set_joinability (joinability::joinable_by_friends, (void*)InterlockedIncrement(&xblMultiplayerContext));
xblOperation_set_joinability.setStateRequested();
}
// Check Initialization state based on Operations status
if (xblOperation_add_local_user.checkStateDoneOk()
&& xblOperation_set_property.checkStateDoneOk()
&& xblOperation_set_joinability.checkStateDoneOk() ) {
// Everthing ok : initaliziation successful
multiplayerState = MultiplayerState::Running;
}
else {
if (xblOperation_add_local_user.checkStateError()
|| xblOperation_set_property.checkStateError()
|| xblOperation_set_joinability.checkStateError()
) {
multiplayerState = MultiplayerState::Error;
if (joinErrorCallback) {
joinErrorCallback();
}
}
}
}
void XBLManager::multiplayerUpdate_Initializing_Inivitee()
{
// Check 'join_lobby' Operation (aka protocol activation)
if (xblOperation_join_lobby.checkStateNotRequested() ) {
// Extract handle id from URI
std::wstring handle = L"";
size_t pos = multiplayerCurrentSetup.invitationUri.find(L"handle=");
if (pos != std::string::npos) {
// Handle id is a hyphenated GUID, so its length is fixed
handle = multiplayerCurrentSetup.invitationUri.substr(pos + strlen("handle="), 36);
}
else {
Logger::logError("ERR: Unable to extract handle ID from URI: "+toString(multiplayerCurrentSetup.invitationUri.c_str())+"\n");
xblOperation_join_lobby.setStateError();
return;
}
auto result = xblMultiplayerManager->join_lobby(handle, xboxUser);
xblOperation_join_lobby.setStateRequested();
if (result.err()) {
Logger::logError("ERR: Unable to join to lobby: "+toString(result.err_message())+"\n");
xblOperation_join_lobby.setStateError();
}
else {
Logger::logDev("NFO: Set local user address\n");
string_t connectionAddress = L"1.1.1.1";
result = xblMultiplayerManager->lobby_session()->set_local_member_connection_address(xboxUser, connectionAddress, (void*)InterlockedIncrement(&xblMultiplayerContext));
if (result.err()) {
Logger::logError("ERR: Unable to set local member connection address: "+toString(result.err_message().c_str())+"\n" );
xblOperation_join_lobby.setStateError();
}
}
}
// Check Initialization state based on Operations status
if (xblOperation_join_lobby.checkStateDoneOk()) {
// Everthing ok : initaliziation successful
multiplayerCurrentSetup.invitationUri=L"";
try {
// Get game key
Logger::logDev("NFO: Get server user GameKey property\n");
auto lobbySession = xblMultiplayerManager->lobby_session();
web::json::value lobbyJson = lobbySession->properties();
auto lobbyJsonKey = lobbyJson[L"GameKey"];
std::wstring lobbyKey = lobbyJsonKey.as_string();
Logger::logInfo("Got server user GameKey property:" + toString(lobbyKey.c_str()) + "\n");
multiplayerCurrentSetup.key = String(lobbyKey.c_str());
// Call join callback
multiplayerState = MultiplayerState::Running;
PlatformJoinCallbackParameters params;
params.param = multiplayerCurrentSetup.key;
joinCallback(params);
// Done multiplayer
multiplayerDone();
} catch (...) {
multiplayerState = MultiplayerState::Error;
if (joinErrorCallback) {
joinErrorCallback();
}
}
}
else {
if (xblOperation_join_lobby.checkStateError()) {
multiplayerCurrentSetup.invitationUri=L"";
multiplayerState = MultiplayerState::Error;
if (joinErrorCallback) {
joinErrorCallback();
}
}
}
}
void XBLManager::multiplayerUpdate_Running()
{
if (multiplayerCurrentSetup.mode != MultiplayerMode::None) {
switch (multiplayerCurrentSetup.mode) {
case MultiplayerMode::Inviter:
break;
case MultiplayerMode::Invitee:
break;
}
}
}
void XBLManager::multiplayerUpdate_Ending()
{
// Check remove user state
bool opsInProgress = ( xblOperation_add_local_user.checkStateRequested()
|| xblOperation_join_lobby.checkStateRequested()
|| xblOperation_set_property.checkStateRequested()
|| xblOperation_set_joinability.checkStateRequested()
);
if (!opsInProgress) {
bool removeUserNeeded = (xblOperation_add_local_user.checkStateDoneOk()
|| xblOperation_join_lobby.checkStateDoneOk()
);
if (removeUserNeeded) {
// Check 'remove_local_user' Operation
if ( xblOperation_remove_local_user.checkStateNotRequested() )
{
auto result = xblMultiplayerManager->lobby_session()->remove_local_user(xboxUser);
xblOperation_remove_local_user.setStateRequested();
if (result.err()) {
Logger::logError("ERR: Unable to remove local user: "+toString(result.err_message().c_str())+"\n" );
xblOperation_remove_local_user.setStateError();
}
}
// Check NotInitialized state based on Operations status
if ( xblOperation_remove_local_user.checkStateDoneOk()
|| xblOperation_remove_local_user.checkStateError()
)
{
// Ending done
multiplayerState = MultiplayerState::NotInitialized;
}
}
else
{
// Ending done
multiplayerState = MultiplayerState::NotInitialized;
}
}
}
void XBLManager::multiplayeEnableTimeout( bool active )
{
xblOperation_add_local_user.enableTimeout(active);
xblOperation_set_property.enableTimeout(active);
xblOperation_set_joinability.enableTimeout(active);
xblOperation_join_lobby.enableTimeout(active);
xblOperation_remove_local_user.enableTimeout(active);
}
void XBLManager::multiplayerDone()
{
if (multiplayerState != MultiplayerState::NotInitialized) {
multiplayerState = MultiplayerState::Ending;
update();
}
}
void XBLManager::xblMultiplayerPoolProcess()
{
if (xblMultiplayerManager!=nullptr) {
std::vector<multiplayer_event> queue = xblMultiplayerManager->do_work();
for (auto& e : queue) {
switch (e.event_type()) {
case multiplayer_event_type::user_added:
{
if ( xblOperation_add_local_user.checkStateRequested() )
{
auto userAddedArgs = std::dynamic_pointer_cast<user_added_event_args>(e.event_args());
if (e.err()) {
Logger::logError("ERR: event user_added: "+toString(e.err_message().c_str())+"\n");
xblOperation_add_local_user.setStateError();
}
else {
Logger::logDev("NFO: event user_added ok!...\n");
xblOperation_add_local_user.setStateDoneOk();
}
}
else
{
Logger::logDev("NFO: not expected response to multiplayer_event_type::user_added\n");
}
}
break;
case multiplayer_event_type::join_lobby_completed:
{
if ( xblOperation_join_lobby.checkStateRequested() )
{
auto joinLobbyArgs = std::dynamic_pointer_cast<join_lobby_completed_event_args>(e.event_args());
if (e.err()) {
Logger::logError("ERR: JoinLobby failed: "+toString(e.err_message().c_str())+"\n" );
xblOperation_join_lobby.setStateError();
}
else {
Logger::logDev("NFO: JoinLobby ok!...\n");
xblOperation_join_lobby.setStateDoneOk();
}
}
else
{
Logger::logDev("NFO: not expected response to multiplayer_event_type::join_lobby_completed\n");
}
}
break;
case multiplayer_event_type::session_property_changed:
{
if ( xblOperation_set_property.checkStateRequested() )
{
auto gamePropChangedArgs = std::dynamic_pointer_cast<session_property_changed_event_args>(e.event_args());
if (e.session_type() == multiplayer_session_type::lobby_session) {
Logger::logDev("NFO: Lobby property changed...\n");
xblOperation_set_property.setStateDoneOk();
}
else {
Logger::logDev("NFO: Game property changed...\n");
}
}
else
{
Logger::logDev("NFO: not expected response to multiplayer_event_type::session_property_changed\n");
}
}
break;
case multiplayer_event_type::joinability_state_changed:
{
if ( xblOperation_set_joinability.checkStateRequested() )
{
if (e.err()) {
Logger::logError("ERR: Joinabilty change failed: "+toString(e.err_message().c_str())+"\n");
xblOperation_set_joinability.setStateError();
}
else {
Logger::logDev("NFO: Joinabilty change ok!...\n");
xblOperation_set_joinability.setStateDoneOk();
}
}
else
{
Logger::logDev("NFO: not expected response to multiplayer_event_type::joinability_state_changed\n");
}
}
break;
case multiplayer_event_type::user_removed:
{
if ( xblOperation_remove_local_user.checkStateRequested() )
{
if (e.err()) {
Logger::logError("ERR: multiplayer_event_type::user_removed failed: "+toString(e.err_message().c_str())+"\n");
xblOperation_remove_local_user.setStateError();
}
else {
Logger::logDev("NFO: multiplayer_event_type::user_removed ok!...\n");
xblOperation_remove_local_user.setStateDoneOk();
}
}
else
{
Logger::logDev("NFO: not expected response to multiplayer_event_type::user_removed\n");
}
}
break;
case multiplayer_event_type::join_game_completed:
Logger::logDev("NFO: multiplayer_event_type::join_game_completed\n");
break;
case multiplayer_event_type::member_property_changed:
Logger::logDev("NFO: multiplayer_event_type::member_property_changed\n");
break;
case multiplayer_event_type::member_joined:
Logger::logDev("NFO: multiplayer_event_type::member_joined\n");
break;
case multiplayer_event_type::member_left:
Logger::logDev("NFO: multiplayer_event_type::member_left\n");
break;
case multiplayer_event_type::leave_game_completed:
Logger::logDev("NFO: multiplayer_event_type::leave_game_completed\n");
break;
case multiplayer_event_type::local_member_property_write_completed:
Logger::logDev("NFO: multiplayer_event_type::local_member_property_write_completed\n");
break;
case multiplayer_event_type::local_member_connection_address_write_completed:
Logger::logDev("NFO: multiplayer_event_type::local_member_connection_address_write_completed\n");
break;
case multiplayer_event_type::session_property_write_completed:
Logger::logDev("NFO: multiplayer_event_type::session_property_write_completed\n");
break;
case multiplayer_event_type::session_synchronized_property_write_completed:
Logger::logDev("NFO: multiplayer_event_type::session_synchronized_property_write_completed\n");
break;
case multiplayer_event_type::host_changed:
Logger::logDev("NFO: multiplayer_event_type::host_changed\n");
break;
case multiplayer_event_type::synchronized_host_write_completed:
Logger::logDev("NFO: multiplayer_event_type::synchronized_host_write_completed\n");
break;
case multiplayer_event_type::perform_qos_measurements:
Logger::logDev("NFO: multiplayer_event_type::perform_qos_measurements\n");
break;
case multiplayer_event_type::find_match_completed:
Logger::logDev("NFO: multiplayer_event_type::find_match_completed\n");
break;
case multiplayer_event_type::client_disconnected_from_multiplayer_service:
Logger::logDev("NFO: multiplayer_event_type::client_disconnected_from_multiplayer_service\n");
break;
case multiplayer_event_type::invite_sent:
Logger::logDev("NFO: multiplayer_event_type::invite_sent\n");
break;
case multiplayer_event_type::tournament_registration_state_changed:
Logger::logDev("NFO: multiplayer_event_type::tournament_registration_state_changed\n");
break;
case multiplayer_event_type::tournament_game_session_ready:
Logger::logDev("NFO: multiplayer_event_type::tournament_game_session_ready\n");
break;
case multiplayer_event_type::arbitration_complete:
Logger::logDev("NFO: multiplayer_event_type::arbitration_complete\n");
break;
default:
Logger::logDev("NFO: multiplayer_event_type::UKNOWN!?!?!\n");
break;
}
}
}
}
void XBLManager::setJoinCallback(PlatformJoinCallback callback)
{
joinCallback = callback;
}
void XBLManager::setPreparingToJoinCallback(PlatformPreparingToJoinCallback callback)
{
preparingToJoinCallback = callback;
}
void XBLManager::setJoinErrorCallback(PlatformJoinErrorCallback callback)
{
joinErrorCallback = callback;
if (callback && multiplayerState == MultiplayerState::Error) {
callback();
}
}
XBLMultiplayerSession::XBLMultiplayerSession(XBLManager& manager,const String& key)
: manager(manager)
, key(key)
, sessionId(-1)
{
sessionId = manager.openHost ( key );
}
XBLMultiplayerSession::~XBLMultiplayerSession()
{
manager.closeMultiplayer(false, sessionId);
}
MultiplayerStatus XBLMultiplayerSession::getStatus() const
{
return manager.getMultiplayerStatus ( sessionId );
}
void XBLMultiplayerSession::showInviteUI(int maxPlayers, const std::map<I18NLanguage, String>& messagePerLanguage)
{
manager.showInviteUI ();
}
| 15,609 |
348 | {"nom":"Labergement-lès-Seurre","circ":"5ème circonscription","dpt":"Côte-d'Or","inscrits":752,"abs":468,"votants":284,"blancs":15,"nuls":8,"exp":261,"res":[{"nuance":"LR","nom":"<NAME>","voix":144},{"nuance":"REM","nom":"<NAME>","voix":117}]} | 101 |
310 | <filename>mgwt/src/main/java/com/propertycross/mgwt/AppPlaceHistoryMapper.java
package com.propertycross.mgwt;
import com.google.gwt.place.shared.PlaceHistoryMapper;
import com.google.gwt.place.shared.WithTokenizers;
import com.propertycross.mgwt.place.FavouritesPlace.FavouritesPlaceTokenizer;
import com.propertycross.mgwt.place.PropertyCrossPlace.PropertyCrossPlaceTokenizer;
import com.propertycross.mgwt.place.PropertyPlace.PropertyPlaceTokenizer;
import com.propertycross.mgwt.place.SearchResultsPlace.SearchResultsPlaceTokenizer;
@WithTokenizers({ PropertyCrossPlaceTokenizer.class, SearchResultsPlaceTokenizer.class, PropertyPlaceTokenizer.class,
FavouritesPlaceTokenizer.class })
public interface AppPlaceHistoryMapper extends PlaceHistoryMapper {
}
| 221 |
809 | /**
* @file
*
* @date 12.03.2015
* @author: <NAME>
*/
#include <kernel/printk.h>
void acpi_shutdown(void) {
printk("No shutdown implemented please enable "
"'embox.arch.x86.kernel.acpi_shutdown'\n");
}
void acpi_reset(void) {
}
| 101 |
3,997 | <reponame>equeim/javacpp
package org.bytedeco.javacpp.annotation;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Documented
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.METHOD, ElementType.PARAMETER})
@Adapter("BasicStringAdapter")
public @interface StdU32String {
String value() default "int, char32_t";
} | 161 |
575 | <filename>device/fido/large_blob.cc
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "device/fido/large_blob.h"
#include "base/containers/span.h"
#include "components/cbor/reader.h"
#include "components/cbor/writer.h"
#include "crypto/aead.h"
#include "crypto/random.h"
#include "crypto/sha2.h"
#include "device/fido/fido_parsing_utils.h"
#include "device/fido/pin.h"
namespace device {
namespace {
// The number of bytes the large blob validation hash is truncated to.
constexpr size_t kTruncatedHashBytes = 16;
constexpr std::array<uint8_t, 4> kLargeBlobADPrefix = {'b', 'l', 'o', 'b'};
constexpr size_t kAssociatedDataLength = kLargeBlobADPrefix.size() + 8;
std::array<uint8_t, kAssociatedDataLength> GenerateLargeBlobAdditionalData(
size_t size) {
std::array<uint8_t, kAssociatedDataLength> additional_data;
const std::array<uint8_t, 8>& size_array =
fido_parsing_utils::Uint64LittleEndian(size);
std::copy(kLargeBlobADPrefix.begin(), kLargeBlobADPrefix.end(),
additional_data.begin());
std::copy(size_array.begin(), size_array.end(),
additional_data.begin() + kLargeBlobADPrefix.size());
return additional_data;
}
} // namespace
LargeBlobArrayFragment::LargeBlobArrayFragment(const std::vector<uint8_t> bytes,
const size_t offset)
: bytes(std::move(bytes)), offset(offset) {}
LargeBlobArrayFragment::~LargeBlobArrayFragment() = default;
LargeBlobArrayFragment::LargeBlobArrayFragment(LargeBlobArrayFragment&&) =
default;
bool VerifyLargeBlobArrayIntegrity(base::span<const uint8_t> large_blob_array) {
if (large_blob_array.size() <= kTruncatedHashBytes) {
return false;
}
const size_t trail_offset = large_blob_array.size() - kTruncatedHashBytes;
std::array<uint8_t, crypto::kSHA256Length> large_blob_hash =
crypto::SHA256Hash(large_blob_array.subspan(0, trail_offset));
base::span<const uint8_t> large_blob_trail =
large_blob_array.subspan(trail_offset);
return std::equal(large_blob_hash.begin(),
large_blob_hash.begin() + kTruncatedHashBytes,
large_blob_trail.begin(), large_blob_trail.end());
}
// static
LargeBlobsRequest LargeBlobsRequest::ForRead(size_t bytes, size_t offset) {
DCHECK_GT(bytes, 0u);
LargeBlobsRequest request;
request.get_ = bytes;
request.offset_ = offset;
return request;
}
// static
LargeBlobsRequest LargeBlobsRequest::ForWrite(LargeBlobArrayFragment fragment,
size_t length) {
LargeBlobsRequest request;
if (fragment.offset == 0) {
request.length_ = length;
}
request.offset_ = fragment.offset;
request.set_ = std::move(fragment.bytes);
return request;
}
LargeBlobsRequest::LargeBlobsRequest() = default;
LargeBlobsRequest::LargeBlobsRequest(LargeBlobsRequest&& other) = default;
LargeBlobsRequest::~LargeBlobsRequest() = default;
void LargeBlobsRequest::SetPinParam(
const pin::TokenResponse& pin_uv_auth_token) {
DCHECK(set_) << "SetPinParam should only be used for write requests";
std::vector<uint8_t> pin_auth(pin::kPinUvAuthTokenSafetyPadding.begin(),
pin::kPinUvAuthTokenSafetyPadding.end());
pin_auth.insert(pin_auth.end(), kLargeBlobPinPrefix.begin(),
kLargeBlobPinPrefix.end());
const std::array<uint8_t, 4> offset_array =
fido_parsing_utils::Uint32LittleEndian(offset_);
pin_auth.insert(pin_auth.end(), offset_array.begin(), offset_array.end());
std::array<uint8_t, crypto::kSHA256Length> set_hash =
crypto::SHA256Hash(*set_);
pin_auth.insert(pin_auth.end(), set_hash.begin(), set_hash.end());
std::tie(pin_uv_auth_protocol_, pin_uv_auth_param_) =
pin_uv_auth_token.PinAuth(pin_auth);
}
// static
base::Optional<LargeBlobsResponse> LargeBlobsResponse::ParseForRead(
const size_t bytes_to_read,
const base::Optional<cbor::Value>& cbor_response) {
if (!cbor_response || !cbor_response->is_map()) {
return base::nullopt;
}
const cbor::Value::MapValue& map = cbor_response->GetMap();
auto it =
map.find(cbor::Value(static_cast<int>(LargeBlobsResponseKey::kConfig)));
if (it == map.end() || !it->second.is_bytestring()) {
return base::nullopt;
}
const std::vector<uint8_t>& config = it->second.GetBytestring();
if (config.size() > bytes_to_read) {
return base::nullopt;
}
return LargeBlobsResponse(std::move(config));
}
// static
base::Optional<LargeBlobsResponse> LargeBlobsResponse::ParseForWrite(
const base::Optional<cbor::Value>& cbor_response) {
// For writing, we expect an empty response.
if (cbor_response) {
return base::nullopt;
}
return LargeBlobsResponse();
}
LargeBlobsResponse::LargeBlobsResponse(
base::Optional<std::vector<uint8_t>> config)
: config_(std::move(config)) {}
LargeBlobsResponse::LargeBlobsResponse(LargeBlobsResponse&& other) = default;
LargeBlobsResponse& LargeBlobsResponse::operator=(LargeBlobsResponse&& other) =
default;
LargeBlobsResponse::~LargeBlobsResponse() = default;
std::pair<CtapRequestCommand, base::Optional<cbor::Value>>
AsCTAPRequestValuePair(const LargeBlobsRequest& request) {
cbor::Value::MapValue map;
if (request.get_) {
map.emplace(static_cast<int>(LargeBlobsRequestKey::kGet), *request.get_);
}
if (request.set_) {
map.emplace(static_cast<int>(LargeBlobsRequestKey::kSet), *request.set_);
}
map.emplace(static_cast<int>(LargeBlobsRequestKey::kOffset), request.offset_);
if (request.length_) {
map.emplace(static_cast<int>(LargeBlobsRequestKey::kLength),
*request.length_);
}
if (request.pin_uv_auth_param_) {
map.emplace(static_cast<int>(LargeBlobsRequestKey::kPinUvAuthParam),
*request.pin_uv_auth_param_);
}
if (request.pin_uv_auth_protocol_) {
map.emplace(static_cast<int>(LargeBlobsRequestKey::kPinUvAuthProtocol),
static_cast<uint8_t>(*request.pin_uv_auth_protocol_));
}
return std::make_pair(CtapRequestCommand::kAuthenticatorLargeBlobs,
cbor::Value(std::move(map)));
}
// static.
base::Optional<LargeBlobData> LargeBlobData::Parse(const cbor::Value& value) {
if (!value.is_map()) {
return base::nullopt;
}
const cbor::Value::MapValue& map = value.GetMap();
auto ciphertext_it =
map.find(cbor::Value(static_cast<int>(LargeBlobDataKeys::kCiphertext)));
if (ciphertext_it == map.end() || !ciphertext_it->second.is_bytestring()) {
return base::nullopt;
}
auto nonce_it =
map.find(cbor::Value(static_cast<int>(LargeBlobDataKeys::kNonce)));
if (nonce_it == map.end() || !nonce_it->second.is_bytestring() ||
nonce_it->second.GetBytestring().size() != kLargeBlobArrayNonceLength) {
return base::nullopt;
}
auto orig_size_it =
map.find(cbor::Value(static_cast<int>(LargeBlobDataKeys::kOrigSize)));
if (orig_size_it == map.end() || !orig_size_it->second.is_unsigned()) {
return base::nullopt;
}
return LargeBlobData(ciphertext_it->second.GetBytestring(),
base::make_span<kLargeBlobArrayNonceLength>(
nonce_it->second.GetBytestring()),
orig_size_it->second.GetUnsigned());
}
LargeBlobData::LargeBlobData(
std::vector<uint8_t> ciphertext,
base::span<const uint8_t, kLargeBlobArrayNonceLength> nonce,
int64_t orig_size)
: ciphertext_(std::move(ciphertext)), orig_size_(std::move(orig_size)) {
std::copy(nonce.begin(), nonce.end(), nonce_.begin());
}
LargeBlobData::LargeBlobData(LargeBlobKey key, base::span<const uint8_t> blob) {
orig_size_ = blob.size();
crypto::Aead aead(crypto::Aead::AeadAlgorithm::AES_256_GCM);
aead.Init(key);
crypto::RandBytes(nonce_);
ciphertext_ =
aead.Seal(blob, nonce_, GenerateLargeBlobAdditionalData(orig_size_));
}
LargeBlobData::LargeBlobData(LargeBlobData&&) = default;
LargeBlobData& LargeBlobData::operator=(LargeBlobData&&) = default;
LargeBlobData::~LargeBlobData() = default;
bool LargeBlobData::operator==(const LargeBlobData& other) const {
return ciphertext_ == other.ciphertext_ && nonce_ == other.nonce_ &&
orig_size_ == other.orig_size_;
}
base::Optional<std::vector<uint8_t>> LargeBlobData::Decrypt(
LargeBlobKey key) const {
crypto::Aead aead(crypto::Aead::AeadAlgorithm::AES_256_GCM);
aead.Init(key);
return aead.Open(ciphertext_, nonce_,
GenerateLargeBlobAdditionalData(orig_size_));
}
cbor::Value::MapValue LargeBlobData::AsCBOR() const {
cbor::Value::MapValue map;
map.emplace(static_cast<int>(LargeBlobDataKeys::kCiphertext), ciphertext_);
map.emplace(static_cast<int>(LargeBlobDataKeys::kNonce), nonce_);
map.emplace(static_cast<int>(LargeBlobDataKeys::kOrigSize), orig_size_);
return map;
}
LargeBlobArrayReader::LargeBlobArrayReader() = default;
LargeBlobArrayReader::LargeBlobArrayReader(LargeBlobArrayReader&&) = default;
LargeBlobArrayReader::~LargeBlobArrayReader() = default;
void LargeBlobArrayReader::Append(const std::vector<uint8_t>& fragment) {
bytes_.insert(bytes_.end(), fragment.begin(), fragment.end());
}
base::Optional<std::vector<LargeBlobData>> LargeBlobArrayReader::Materialize() {
if (!VerifyLargeBlobArrayIntegrity(bytes_)) {
return base::nullopt;
}
base::span<const uint8_t> cbor_bytes =
base::make_span(bytes_.data(), bytes_.size() - kTruncatedHashBytes);
base::Optional<cbor::Value> cbor = cbor::Reader::Read(cbor_bytes);
if (!cbor || !cbor->is_array()) {
return base::nullopt;
}
std::vector<LargeBlobData> large_blob_array;
const cbor::Value::ArrayValue& array = cbor->GetArray();
for (const cbor::Value& value : array) {
base::Optional<LargeBlobData> large_blob_data = LargeBlobData::Parse(value);
if (!large_blob_data) {
continue;
}
large_blob_array.emplace_back(std::move(*large_blob_data));
}
return large_blob_array;
}
LargeBlobArrayWriter::LargeBlobArrayWriter(
const std::vector<LargeBlobData>& large_blob_array) {
cbor::Value::ArrayValue array;
for (const LargeBlobData& large_blob_data : large_blob_array) {
array.emplace_back(large_blob_data.AsCBOR());
}
bytes_ = *cbor::Writer::Write(cbor::Value(array));
std::array<uint8_t, crypto::kSHA256Length> large_blob_hash =
crypto::SHA256Hash(bytes_);
bytes_.insert(bytes_.end(), large_blob_hash.begin(),
large_blob_hash.begin() + kTruncatedHashBytes);
DCHECK(VerifyLargeBlobArrayIntegrity(bytes_));
}
LargeBlobArrayWriter::LargeBlobArrayWriter(LargeBlobArrayWriter&&) = default;
LargeBlobArrayWriter::~LargeBlobArrayWriter() = default;
LargeBlobArrayFragment LargeBlobArrayWriter::Pop(size_t length) {
CHECK(has_remaining_fragments());
length = std::min(length, bytes_.size() - offset_);
LargeBlobArrayFragment fragment{
fido_parsing_utils::Materialize(
base::make_span(bytes_.data() + offset_, length)),
offset_};
offset_ += length;
return fragment;
}
} // namespace device
| 4,360 |
705 | <reponame>gaybro8777/klio
# Copyright 2019-2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from googleapiclient import errors as gerrors
from klio_exec.commands import stop
@pytest.fixture
def mock_discovery_client(mocker, monkeypatch):
mock = mocker.Mock()
monkeypatch.setattr(stop.discovery, "build", lambda x, y: mock)
return mock
@pytest.fixture
def config(mocker):
pipeline_options = mocker.Mock(
project="test-project", region="europe-west1"
)
return mocker.Mock(job_name="test-job", pipeline_options=pipeline_options)
@pytest.fixture
def jobs_response():
return {
"jobs": [
{"name": "not-the-test-job"},
{
"id": "1234",
"name": "test-job",
"projectId": "test-project",
"location": "europe-west1",
},
]
}
@pytest.fixture
def job(jobs_response):
return jobs_response["jobs"][1]
@pytest.fixture
def mock_sleep(mocker, monkeypatch):
mock = mocker.Mock()
monkeypatch.setattr(stop.time, "sleep", mock)
return mock
@pytest.mark.parametrize("api_version", (None, "v1b3", "v2"))
def test_set_dataflow_client(mock_discovery_client, api_version):
assert stop._client is None
stop._set_dataflow_client(api_version)
assert stop._client is not None
assert mock_discovery_client == stop._client
# cleanup
setattr(stop, "_client", None)
# return the desired job, no jobs at all, or no jobs matching job name
@pytest.mark.parametrize("returns_jobs", (True, False, None))
def test_check_job_running(
mock_discovery_client, returns_jobs, jobs_response, config, monkeypatch
):
monkeypatch.setattr(stop, "_client", mock_discovery_client)
_projects_req = mock_discovery_client.projects.return_value
req = _projects_req.locations.return_value.jobs.return_value.list
req.return_value.execute.return_value = {}
if returns_jobs:
req.return_value.execute.return_value = jobs_response
elif returns_jobs is False:
req.return_value.execute.return_value = {
"jobs": [{"name": "not-the-test-job"}]
}
ret = stop._check_job_running(config)
if returns_jobs:
assert jobs_response["jobs"][1] == ret
else:
assert ret is None
req.assert_called_once_with(
projectId="test-project", location="europe-west1", filter="ACTIVE"
)
req.return_value.execute.assert_called_once_with()
def test_check_job_running_errors(
mock_discovery_client, config, monkeypatch, caplog
):
monkeypatch.setattr(stop, "_client", mock_discovery_client)
_projects_req = mock_discovery_client.projects.return_value
req = _projects_req.locations.return_value.jobs.return_value.list
req.return_value.execute.side_effect = Exception("foo")
stop._check_job_running(config)
req.assert_called_once_with(
projectId="test-project", location="europe-west1", filter="ACTIVE"
)
req.return_value.execute.assert_called_once_with()
assert 2 == len(caplog.records)
@pytest.mark.parametrize(
"state,pyver", (("drain", None), ("cancel", None), (None, 2), (None, 3))
)
def test_update_job_state(
state, pyver, mock_discovery_client, job, monkeypatch
):
monkeypatch.setattr(stop, "_client", mock_discovery_client)
exp_state = state
if not state:
if pyver == 2:
exp_state = "JOB_STATE_DRAINED"
else:
exp_state = "JOB_STATE_CANCELLED"
monkeypatch.setitem(stop.JOB_STATE_MAP, "default", exp_state)
_projects_req = mock_discovery_client.projects.return_value
req = _projects_req.locations.return_value.jobs.return_value.update
req.return_value.execute.return_value = None
stop._update_job_state(job, state)
job["requestedState"] = exp_state
req.assert_called_once_with(
jobId="1234",
projectId="test-project",
location="europe-west1",
body=job,
)
req.return_value.execute.assert_called_once_with()
def test_update_job_state_400_error(
mock_discovery_client, job, mock_sleep, mocker, monkeypatch, caplog
):
monkeypatch.setattr(stop, "_client", mock_discovery_client)
_projects_req = mock_discovery_client.projects.return_value
req = _projects_req.locations.return_value.jobs.return_value.update
mock_resp = mocker.Mock(status=400)
req.return_value.execute.side_effect = gerrors.HttpError(mock_resp, b"foo")
with pytest.raises(SystemExit):
stop._update_job_state(job, "drain")
assert 1 == req.return_value.execute.call_count
assert 1 == len(caplog.records)
assert not mock_sleep.call_count
def test_update_job_state_500_error(
mock_discovery_client, job, mock_sleep, mocker, monkeypatch, caplog
):
monkeypatch.setattr(stop, "_client", mock_discovery_client)
_projects_req = mock_discovery_client.projects.return_value
req = _projects_req.locations.return_value.jobs.return_value.update
mock_resp = mocker.Mock(status=500)
req.return_value.execute.side_effect = gerrors.HttpError(mock_resp, b"foo")
with pytest.raises(SystemExit):
stop._update_job_state(job, "drain")
assert 4 == req.return_value.execute.call_count
assert 4 == len(caplog.records)
assert 3 == mock_sleep.call_count
def test_update_job_state_error(
mock_discovery_client, job, mock_sleep, monkeypatch, caplog
):
monkeypatch.setattr(stop, "_client", mock_discovery_client)
_projects_req = mock_discovery_client.projects.return_value
req = _projects_req.locations.return_value.jobs.return_value.update
req.return_value.execute.side_effect = Exception("foo")
with pytest.raises(SystemExit):
stop._update_job_state(job, "cancel")
assert 4 == req.return_value.execute.call_count
assert 4 == len(caplog.records)
assert 3 == mock_sleep.call_count
@pytest.mark.parametrize(
"exec_side_effect",
(
(
{"currentState": "JOB_STATE_CANCELLING"},
{"currentState": "JOB_STATE_CANCELLED"},
),
(Exception("foo"), {"currentState": "JOB_STATE_CANCELLED"}),
),
)
def test_watch_job_state(
mock_discovery_client,
mock_sleep,
monkeypatch,
caplog,
job,
exec_side_effect,
):
monkeypatch.setattr(stop, "_client", mock_discovery_client)
_projects_req = mock_discovery_client.projects.return_value
req = _projects_req.locations.return_value.jobs.return_value.get
req.return_value.execute.side_effect = exec_side_effect
stop._watch_job_state(job)
assert 2 == req.return_value.execute.call_count
mock_sleep.assert_called_once_with(5)
assert 1 == len(caplog.records)
def test_watch_job_state_raises(
mock_discovery_client, monkeypatch, caplog, job
):
monkeypatch.setattr(stop, "_client", mock_discovery_client)
with pytest.raises(SystemExit):
stop._watch_job_state(job, timeout=0)
assert 1 == len(caplog.records)
@pytest.mark.parametrize("has_running_job", (True, False))
def test_stop(has_running_job, config, mocker, monkeypatch, job):
mock_set_dataflow_client = mocker.Mock()
monkeypatch.setattr(stop, "_set_dataflow_client", mock_set_dataflow_client)
ret_val = None
if has_running_job:
ret_val = job
mock_check_job_running = mocker.Mock(return_value=ret_val)
monkeypatch.setattr(stop, "_check_job_running", mock_check_job_running)
mock_update_job_state = mocker.Mock()
monkeypatch.setattr(stop, "_update_job_state", mock_update_job_state)
mock_watch_job_state = mocker.Mock()
monkeypatch.setattr(stop, "_watch_job_state", mock_watch_job_state)
stop.stop(config, "cancel")
mock_set_dataflow_client.assert_called_once_with()
mock_check_job_running.assert_called_once_with(config)
if has_running_job:
mock_update_job_state.assert_called_once_with(job, req_state="cancel")
mock_watch_job_state.assert_called_once_with(job)
else:
mock_update_job_state.assert_not_called()
mock_watch_job_state.assert_not_called()
| 3,390 |
643 | <reponame>adityasharad/ql<gh_stars>100-1000
class Generic2<T> {
public Generic2(T init) { stored = init; }
private T stored;
T identity2(T param) { return identity(param); }
T identity(T param) { return param; }
T getter() { return stored; }
void setter(T param) { stored = param; }
}
public class Test {
public static void user() {
Generic2<String> invariant = new Generic2<String>("hello world");
invariant.identity("hello world");
invariant.identity2("hello world");
Generic2<? extends String> projectedOut = invariant;
projectedOut.getter();
Generic2<? super String> projectedIn = invariant;
projectedIn.setter("hi planet");
projectedIn.getter();
}
}
| 244 |
1,444 | package org.wiztools.restclient.ui.reqbody;
import com.google.inject.ImplementedBy;
import org.wiztools.restclient.bean.ReqEntity;
import org.wiztools.restclient.ui.ViewPanel;
/**
*
* @author subwiz
*/
@ImplementedBy(ReqBodyPanelImpl.class)
public interface ReqBodyPanel extends ViewPanel {
void enableBody();
void disableBody();
void setEntity(ReqEntity entity);
ReqEntity getEntity();
}
| 150 |
2,151 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
''' Base class for postprocessing of RC files.
'''
class PostProcessor(object):
''' Base class for postprocessing of the RC file data before being
output through the RC2GRD tool. You should implement this class if
you want GRIT to do specific things to the RC files after it has
converted the data into GRD format, i.e. change the content of the
RC file, and put it into a P4 changelist, etc.'''
def Process(self, rctext, rcpath, grdnode):
''' Processes the data in rctext and grdnode.
Args:
rctext: string containing the contents of the RC file being processed.
rcpath: the path used to access the file.
grdtext: the root node of the grd xml data generated by
the rc2grd tool.
Return:
The root node of the processed GRD tree.
'''
raise NotImplementedError()
| 322 |
2,984 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to you under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.calcite.plan;
/**
* The mode of trait derivation.
*/
public enum DeriveMode {
/**
* Uses the left most child's traits to decide what
* traits to require from the other children. This
* generally applies to most operators.
*/
LEFT_FIRST,
/**
* Uses the right most child's traits to decide what
* traits to require from the other children. Operators
* like index nested loop join may find this useful.
*/
RIGHT_FIRST,
/**
* Iterates over each child, uses current child's traits
* to decide what traits to require from the other
* children. It includes both LEFT_FIRST and RIGHT_FIRST.
* System that doesn't enable join commutativity should
* consider this option. Special customized operators
* like a Join who has 3 inputs may find this useful too.
*/
BOTH,
/**
* Leave it to you, you decide what you cook. This will
* allow planner to pass all the traits from all the
* children, the user decides how to make use of these
* traits and whether to derive new rel nodes.
*/
OMAKASE,
/**
* Trait derivation is prohibited.
*/
PROHIBITED
}
| 530 |
679 | /**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
// MARKER(update_precomp.py): autogen include statement, do not remove
#include "precompiled_basic.hxx"
#include <tools/stream.hxx>
#include "svl/brdcst.hxx"
#include <basic/sbx.hxx>
#include <basic/sbxbase.hxx>
#include "sbxres.hxx"
#include "sbxconv.hxx"
#include <math.h>
#include <ctype.h>
#include "com/sun/star/uno/XInterface.hpp"
using namespace com::sun::star::uno;
///////////////////////////// SbxVariable //////////////////////////////
TYPEINIT1(SbxVariable,SbxValue)
TYPEINIT1(SbxHint,SfxSimpleHint)
extern sal_uInt32 nVarCreator; // in SBXBASE.CXX, fuer LoadData()
#ifdef DBG_UTIL
static sal_uIntPtr nVar = 0;
#endif
///////////////////////////// SbxVariableImpl ////////////////////////////
class SbxVariableImpl
{
friend class SbxVariable;
String m_aDeclareClassName;
Reference< XInterface > m_xComListener;
StarBASIC* m_pComListenerParentBasic;
SbxVariableImpl( void )
: m_pComListenerParentBasic( NULL )
{}
SbxVariableImpl( const SbxVariableImpl& r )
: m_aDeclareClassName( r.m_aDeclareClassName )
, m_xComListener( r.m_xComListener )
, m_pComListenerParentBasic( r.m_pComListenerParentBasic )
{
}
};
///////////////////////////// Konstruktoren //////////////////////////////
SbxVariable::SbxVariable() : SbxValue()
{
mpSbxVariableImpl = NULL;
pCst = NULL;
pParent = NULL;
nUserData = 0;
nHash = 0;
#ifdef DBG_UTIL
DbgOutf( "SbxVariable::Ctor %lx=%ld", (void*)this, ++nVar );
GetSbxData_Impl()->aVars.Insert( this, LIST_APPEND );
#endif
}
void registerComListenerVariableForBasic( SbxVariable* pVar, StarBASIC* pBasic );
SbxVariable::SbxVariable( const SbxVariable& r )
: SvRefBase( r ), SbxValue( r ), mpPar( r.mpPar ), pInfo( r.pInfo )
{
mpSbxVariableImpl = NULL;
if( r.mpSbxVariableImpl != NULL )
{
mpSbxVariableImpl = new SbxVariableImpl( *r.mpSbxVariableImpl );
if( mpSbxVariableImpl->m_xComListener.is() )
registerComListenerVariableForBasic( this, mpSbxVariableImpl->m_pComListenerParentBasic );
}
pCst = NULL;
if( r.CanRead() )
{
pParent = r.pParent;
nUserData = r.nUserData;
maName = r.maName;
nHash = r.nHash;
}
else
{
pParent = NULL;
nUserData = 0;
nHash = 0;
}
#ifdef DBG_UTIL
static sal_Char const aCellsStr[] = "Cells";
if ( maName.EqualsAscii( aCellsStr ) )
maName.AssignAscii( aCellsStr, sizeof( aCellsStr )-1 );
DbgOutf( "SbxVariable::Ctor %lx=%ld", (void*)this, ++nVar );
GetSbxData_Impl()->aVars.Insert( this, LIST_APPEND );
#endif
}
SbxVariable::SbxVariable( SbxDataType t, void* p ) : SbxValue( t, p )
{
mpSbxVariableImpl = NULL;
pCst = NULL;
pParent = NULL;
nUserData = 0;
nHash = 0;
#ifdef DBG_UTIL
DbgOutf( "SbxVariable::Ctor %lx=%ld", (void*)this, ++nVar );
GetSbxData_Impl()->aVars.Insert( this, LIST_APPEND );
#endif
}
void removeDimAsNewRecoverItem( SbxVariable* pVar );
SbxVariable::~SbxVariable()
{
#ifdef DBG_UTIL
ByteString aBStr( (const UniString&)maName, RTL_TEXTENCODING_ASCII_US );
DbgOutf( "SbxVariable::Dtor %lx (%s)", (void*)this, aBStr.GetBuffer() );
static sal_Char const aCellsStr[] = "Cells";
if ( maName.EqualsAscii( aCellsStr ) )
maName.AssignAscii( aCellsStr, sizeof( aCellsStr )-1 );
GetSbxData_Impl()->aVars.Remove( this );
#endif
if( IsSet( SBX_DIM_AS_NEW ))
removeDimAsNewRecoverItem( this );
delete mpSbxVariableImpl;
delete pCst;
}
////////////////////////////// Broadcasting //////////////////////////////
SfxBroadcaster& SbxVariable::GetBroadcaster()
{
if( !pCst )
pCst = new SfxBroadcaster;
return *pCst;
}
// Eines Tages kann man vielleicht den Parameter 0 schleifen,
// dann entfaellt die Kopiererei...
void SbxVariable::Broadcast( sal_uIntPtr nHintId )
{
if( pCst && !IsSet( SBX_NO_BROADCAST ) && StaticIsEnabledBroadcasting() )
{
// Da die Methode von aussen aufrufbar ist, hier noch einmal
// die Berechtigung testen
if( nHintId & SBX_HINT_DATAWANTED )
if( !CanRead() )
return;
if( nHintId & SBX_HINT_DATACHANGED )
if( !CanWrite() )
return;
// Weitere Broadcasts verhindern
SfxBroadcaster* pSave = pCst;
pCst = NULL;
sal_uInt16 nSaveFlags = GetFlags();
SetFlag( SBX_READWRITE );
if( mpPar.Is() )
// this, als Element 0 eintragen, aber den Parent nicht umsetzen!
mpPar->GetRef( 0 ) = this;
pSave->Broadcast( SbxHint( nHintId, this ) );
delete pCst; // wer weiss schon, auf welche Gedanken mancher kommt?
pCst = pSave;
SetFlags( nSaveFlags );
}
}
SbxInfo* SbxVariable::GetInfo()
{
if( !pInfo )
{
Broadcast( SBX_HINT_INFOWANTED );
if( pInfo.Is() )
SetModified( sal_True );
}
return pInfo;
}
void SbxVariable::SetInfo( SbxInfo* p )
{
pInfo = p;
}
void SbxVariable::SetParameters( SbxArray* p )
{
mpPar = p;
}
/////////////////////////// Name der Variablen ///////////////////////////
void SbxVariable::SetName( const XubString& rName )
{
maName = rName;
nHash = MakeHashCode( rName );
}
const XubString& SbxVariable::GetName( SbxNameType t ) const
{
static char cSuffixes[] = " %&!#@ $";
if( t == SbxNAME_NONE )
return maName;
// Parameter-Infos anfordern (nicht fuer Objekte)
((SbxVariable*)this)->GetInfo();
// Nix anfuegen, wenn einfache Property (keine leeren Klammern)
if( !pInfo
|| ( !pInfo->aParams.Count() && GetClass() == SbxCLASS_PROPERTY ) )
return maName;
xub_Unicode cType = ' ';
XubString aTmp( maName );
// Kurzer Typ? Dann holen, evtl. ist dieser 0.
SbxDataType et = GetType();
if( t == SbxNAME_SHORT_TYPES )
{
if( et <= SbxSTRING )
cType = cSuffixes[ et ];
if( cType != ' ' )
aTmp += cType;
}
aTmp += '(';
for( sal_uInt16 i = 0; i < pInfo->aParams.Count(); i++ )
{
const SbxParamInfo* q = pInfo->aParams.GetObject( i );
int nt = q->eType & 0x0FFF;
if( i )
aTmp += ',';
if( q->nFlags & SBX_OPTIONAL )
aTmp += String( SbxRes( STRING_OPTIONAL ) );
if( q->eType & SbxBYREF )
aTmp += String( SbxRes( STRING_BYREF ) );
aTmp += q->aName;
cType = ' ';
// Kurzer Typ? Dann holen, evtl. ist dieser 0.
if( t == SbxNAME_SHORT_TYPES )
{
if( nt <= SbxSTRING )
cType = cSuffixes[ nt ];
}
if( cType != ' ' )
{
aTmp += cType;
if( q->eType & SbxARRAY )
aTmp.AppendAscii( "()" );
}
else
{
if( q->eType & SbxARRAY )
aTmp.AppendAscii( "()" );
// langer Typ?
if( t != SbxNAME_SHORT )
{
aTmp += String( SbxRes( STRING_AS ) );
if( nt < 32 )
aTmp += String( SbxRes(
sal::static_int_cast< sal_uInt16 >( STRING_TYPES + nt ) ) );
else
aTmp += String( SbxRes( STRING_ANY ) );
}
}
}
aTmp += ')';
// Langer Typ? Dann holen
if( t == SbxNAME_LONG_TYPES && et != SbxEMPTY )
{
aTmp += String( SbxRes( STRING_AS ) );
if( et < 32 )
aTmp += String( SbxRes(
sal::static_int_cast< sal_uInt16 >( STRING_TYPES + et ) ) );
else
aTmp += String( SbxRes( STRING_ANY ) );
}
((SbxVariable*) this)->aToolString = aTmp;
return aToolString;
}
// Einen simplen Hashcode erzeugen: Es werden die ersten 6 Zeichen gewertet.
sal_uInt16 SbxVariable::MakeHashCode( const XubString& rName )
{
sal_uInt16 n = 0;
sal_uInt16 nLen = rName.Len();
if( nLen > 6 )
nLen = 6;
const xub_Unicode* p = rName.GetBuffer();
while( nLen-- )
{
sal_uInt8 c = (sal_uInt8)*p;
p++;
// Falls wir ein Schweinezeichen haben, abbrechen!!
if( c >= 0x80 )
return 0;
n = sal::static_int_cast< sal_uInt16 >( ( n << 3 ) + toupper( c ) );
}
return n;
}
////////////////////////////// Operatoren ////////////////////////////////
SbxVariable& SbxVariable::operator=( const SbxVariable& r )
{
SbxValue::operator=( r );
delete mpSbxVariableImpl;
if( r.mpSbxVariableImpl != NULL )
{
mpSbxVariableImpl = new SbxVariableImpl( *r.mpSbxVariableImpl );
if( mpSbxVariableImpl->m_xComListener.is() )
registerComListenerVariableForBasic( this, mpSbxVariableImpl->m_pComListenerParentBasic );
}
else
mpSbxVariableImpl = NULL;
return *this;
}
//////////////////////////////// Konversion ////////////////////////////////
SbxDataType SbxVariable::GetType() const
{
if( aData.eType == SbxOBJECT )
return aData.pObj ? aData.pObj->GetType() : SbxOBJECT;
else if( aData.eType == SbxVARIANT )
return aData.pObj ? aData.pObj->GetType() : SbxVARIANT;
else
return aData.eType;
}
SbxClassType SbxVariable::GetClass() const
{
return SbxCLASS_VARIABLE;
}
void SbxVariable::SetModified( sal_Bool b )
{
if( IsSet( SBX_NO_MODIFY ) )
return;
SbxBase::SetModified( b );
if( pParent && pParent != this ) //??? HotFix: Rekursion raus MM
pParent->SetModified( b );
}
void SbxVariable::SetParent( SbxObject* p )
{
#ifdef DBG_UTIL
// wird der Parent eines SbxObjects gesetzt?
if ( p && ISA(SbxObject) )
{
// dann mu\s dieses auch Child vom neuen Parent sein
sal_Bool bFound = sal_False;
SbxArray *pChilds = p->GetObjects();
if ( pChilds )
{
for ( sal_uInt16 nIdx = 0; !bFound && nIdx < pChilds->Count(); ++nIdx )
bFound = ( this == pChilds->Get(nIdx) );
}
if ( !bFound )
{
String aMsg = String::CreateFromAscii( "dangling: [" );
aMsg += GetName();
aMsg.AppendAscii( "].SetParent([" );
aMsg += p->GetName();
aMsg.AppendAscii( "])" );
ByteString aBStr( (const UniString&)aMsg, RTL_TEXTENCODING_ASCII_US );
DbgOut( aBStr.GetBuffer(), DBG_OUT_WARNING, __FILE__, __LINE__);
}
}
#endif
pParent = p;
}
SbxVariableImpl* SbxVariable::getImpl( void )
{
if( mpSbxVariableImpl == NULL )
mpSbxVariableImpl = new SbxVariableImpl();
return mpSbxVariableImpl;
}
const String& SbxVariable::GetDeclareClassName( void )
{
SbxVariableImpl* pImpl = getImpl();
return pImpl->m_aDeclareClassName;
}
void SbxVariable::SetDeclareClassName( const String& rDeclareClassName )
{
SbxVariableImpl* pImpl = getImpl();
pImpl->m_aDeclareClassName = rDeclareClassName;
}
void SbxVariable::SetComListener( ::com::sun::star::uno::Reference< ::com::sun::star::uno::XInterface > xComListener,
StarBASIC* pParentBasic )
{
SbxVariableImpl* pImpl = getImpl();
pImpl->m_xComListener = xComListener;
pImpl->m_pComListenerParentBasic = pParentBasic;
registerComListenerVariableForBasic( this, pParentBasic );
}
void SbxVariable::ClearComListener( void )
{
SbxVariableImpl* pImpl = getImpl();
pImpl->m_xComListener.clear();
}
////////////////////////////// Laden/Speichern /////////////////////////////
sal_Bool SbxVariable::LoadData( SvStream& rStrm, sal_uInt16 nVer )
{
sal_uInt16 nType;
sal_uInt8 cMark;
rStrm >> cMark;
if( cMark == 0xFF )
{
if( !SbxValue::LoadData( rStrm, nVer ) )
return sal_False;
rStrm.ReadByteString( maName, RTL_TEXTENCODING_ASCII_US );
sal_uInt32 nTemp;
rStrm >> nTemp;
nUserData = nTemp;
}
else
{
rStrm.SeekRel( -1L );
rStrm >> nType;
rStrm.ReadByteString( maName, RTL_TEXTENCODING_ASCII_US );
sal_uInt32 nTemp;
rStrm >> nTemp;
nUserData = nTemp;
// Korrektur: Alte Methoden haben statt SbxNULL jetzt SbxEMPTY
if( nType == SbxNULL && GetClass() == SbxCLASS_METHOD )
nType = SbxEMPTY;
SbxValues aTmp;
String aTmpString;
::rtl::OUString aVal;
aTmp.eType = aData.eType = (SbxDataType) nType;
aTmp.pOUString = &aVal;
switch( nType )
{
case SbxBOOL:
case SbxERROR:
case SbxINTEGER:
rStrm >> aTmp.nInteger; break;
case SbxLONG:
rStrm >> aTmp.nLong; break;
case SbxSINGLE:
{
// Floats als ASCII
rStrm.ReadByteString( aTmpString, RTL_TEXTENCODING_ASCII_US );
double d;
SbxDataType t;
if( ImpScan( aTmpString, d, t, NULL ) != SbxERR_OK || t == SbxDOUBLE )
{
aTmp.nSingle = 0;
return sal_False;
}
aTmp.nSingle = (float) d;
break;
}
case SbxDATE:
case SbxDOUBLE:
{
// Floats als ASCII
rStrm.ReadByteString( aTmpString, RTL_TEXTENCODING_ASCII_US );
SbxDataType t;
if( ImpScan( aTmpString, aTmp.nDouble, t, NULL ) != SbxERR_OK )
{
aTmp.nDouble = 0;
return sal_False;
}
break;
}
case SbxSTRING:
rStrm.ReadByteString( aTmpString, RTL_TEXTENCODING_ASCII_US );
aVal = aTmpString;
break;
case SbxEMPTY:
case SbxNULL:
break;
default:
aData.eType = SbxNULL;
DBG_ASSERT( sal_False, "Unsupported data type loaded" );
return sal_False;
}
// Wert putten
if( nType != SbxNULL && nType != SbxEMPTY && !Put( aTmp ) )
return sal_False;
}
rStrm >> cMark;
// cMark ist auch eine Versionsnummer!
// 1: initial version
// 2: mit nUserData
if( cMark )
{
if( cMark > 2 )
return sal_False;
pInfo = new SbxInfo;
pInfo->LoadData( rStrm, (sal_uInt16) cMark );
}
// Privatdaten nur laden, wenn es eine SbxVariable ist
if( GetClass() == SbxCLASS_VARIABLE && !LoadPrivateData( rStrm, nVer ) )
return sal_False;
((SbxVariable*) this)->Broadcast( SBX_HINT_DATACHANGED );
nHash = MakeHashCode( maName );
SetModified( sal_True );
return sal_True;
}
sal_Bool SbxVariable::StoreData( SvStream& rStrm ) const
{
rStrm << (sal_uInt8) 0xFF; // Marker
sal_Bool bValStore;
if( this->IsA( TYPE(SbxMethod) ) )
{
// #50200 Verhindern, dass Objekte, die zur Laufzeit als Return-Wert
// in der Methode als Value gespeichert sind, mit gespeichert werden
SbxVariable* pThis = (SbxVariable*)this;
sal_uInt16 nSaveFlags = GetFlags();
pThis->SetFlag( SBX_WRITE );
pThis->SbxValue::Clear();
pThis->SetFlags( nSaveFlags );
// Damit die Methode in keinem Fall ausgefuehrt wird!
// CAST, um const zu umgehen!
pThis->SetFlag( SBX_NO_BROADCAST );
bValStore = SbxValue::StoreData( rStrm );
pThis->ResetFlag( SBX_NO_BROADCAST );
}
else
bValStore = SbxValue::StoreData( rStrm );
if( !bValStore )
return sal_False;
// if( !SbxValue::StoreData( rStrm ) )
// return sal_False;
rStrm.WriteByteString( maName, RTL_TEXTENCODING_ASCII_US );
rStrm << (sal_uInt32)nUserData;
if( pInfo.Is() )
{
rStrm << (sal_uInt8) 2; // Version 2: mit UserData!
pInfo->StoreData( rStrm );
}
else
rStrm << (sal_uInt8) 0;
// Privatdaten nur speichern, wenn es eine SbxVariable ist
if( GetClass() == SbxCLASS_VARIABLE )
return StorePrivateData( rStrm );
else
return sal_True;
}
////////////////////////////// SbxInfo ///////////////////////////////////
SbxInfo::SbxInfo() : aHelpFile(), nHelpId( 0 ), aParams()
{}
SbxInfo::SbxInfo( const String& r, sal_uInt32 n )
: aHelpFile( r ), nHelpId( n ), aParams()
{}
////////////////////////////// SbxAlias //////////////////////////////////
SbxAlias::SbxAlias( const XubString& rName, SbxVariable* p )
: SbxVariable(), xAlias( p )
{
SetName( rName );
SetFlags( p->GetFlags() );
SetFlag( SBX_DONTSTORE );
aData.eType = p->GetType();
StartListening( p->GetBroadcaster() );
}
SbxAlias::SbxAlias( const SbxAlias& r )
: SvRefBase( r ), SbxVariable( r ),
SfxListener( r ), xAlias( r.xAlias )
{}
SbxAlias& SbxAlias::operator=( const SbxAlias& r )
{
xAlias = r.xAlias;
return *this;
}
SbxAlias::~SbxAlias()
{
if( xAlias.Is() )
EndListening( xAlias->GetBroadcaster() );
}
void SbxAlias::Broadcast( sal_uIntPtr nHt )
{
if( xAlias.Is() && StaticIsEnabledBroadcasting() )
{
xAlias->SetParameters( GetParameters() );
if( nHt == SBX_HINT_DATAWANTED )
SbxVariable::operator=( *xAlias );
else if( nHt == SBX_HINT_DATACHANGED || nHt == SBX_HINT_CONVERTED )
*xAlias = *this;
else if( nHt == SBX_HINT_INFOWANTED )
{
xAlias->Broadcast( nHt );
pInfo = xAlias->GetInfo();
}
}
}
void SbxAlias::SFX_NOTIFY( SfxBroadcaster&, const TypeId&,
const SfxHint& rHint, const TypeId& )
{
const SbxHint* p = PTR_CAST(SbxHint,&rHint);
if( p && p->GetId() == SBX_HINT_DYING )
{
xAlias.Clear();
// Alias loeschen?
if( pParent )
pParent->Remove( this );
}
}
void SbxVariable::Dump( SvStream& rStrm, sal_Bool bFill )
{
ByteString aBNameStr( (const UniString&)GetName( SbxNAME_SHORT_TYPES ), RTL_TEXTENCODING_ASCII_US );
rStrm << "Variable( "
<< ByteString::CreateFromInt64( (sal_uIntPtr) this ).GetBuffer() << "=="
<< aBNameStr.GetBuffer();
ByteString aBParentNameStr( (const UniString&)GetParent()->GetName(), RTL_TEXTENCODING_ASCII_US );
if ( GetParent() )
rStrm << " in parent '" << aBParentNameStr.GetBuffer() << "'";
else
rStrm << " no parent";
rStrm << " ) ";
// bei Object-Vars auch das Object ausgeben
if ( GetValues_Impl().eType == SbxOBJECT &&
GetValues_Impl().pObj &&
GetValues_Impl().pObj != this &&
GetValues_Impl().pObj != GetParent() )
{
rStrm << " contains ";
((SbxObject*) GetValues_Impl().pObj)->Dump( rStrm, bFill );
}
else
rStrm << endl;
}
| 7,637 |
1,671 | /**
* Copyright 2020 LinkedIn Corp. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
package com.github.ambry.replication;
/**
* The model used for cross-colo replication
* Under ALL_TO_ALL mode, all replicas of local data center replicate from all peer replicas in remote data centers
* Under LEADER_BASED mode, only the leader replica of local data center (as elected by helix) will replicate with other leader replicas in remote data centers
*/
public enum ReplicationModelType {
ALL_TO_ALL, LEADER_BASED
}
| 250 |
767 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
import re
import json
from esb.bkcore.models import FunctionController
class FunctionControllerClient(object):
"""功能控制器"""
@classmethod
def _get_func_ctrl_by_code(cls, func_code, data_type='list'):
"""根据功能标识获取对应数据
:param str data_type: list,将字符串按照逗号、分号分隔转换为列表;json,将字符串json.loads
"""
func_ctrl = FunctionController.objects.filter(func_code=func_code).first()
if func_ctrl:
if data_type == 'list':
return func_ctrl.switch_status, re.findall(r'[^,;]+', func_ctrl.wlist or '')
elif data_type == 'json':
return func_ctrl.switch_status, json.loads(func_ctrl.wlist)
else:
return func_ctrl.switch_status, func_ctrl.wlist
else:
return None, None
@classmethod
def is_skip_user_auth(cls, app_code):
"""判定APP是否可跳过用户认证,如果功能开放,且APP在白名单内,则可跳过"""
switch_status, wlist = FunctionControllerClient._get_func_ctrl_by_code('user_auth::skip_user_auth')
if switch_status and app_code in wlist:
return True
else:
return False
| 843 |
4,054 | <filename>config-model/src/test/java/com/yahoo/vespa/model/utils/internal/ReflectionUtilTest.java
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model.utils.internal;
import com.yahoo.config.model.producer.AbstractConfigProducer;
import com.yahoo.test.ArraytypesConfig;
import com.yahoo.config.ChangesRequiringRestart;
import com.yahoo.config.ConfigInstance;
import com.yahoo.test.SimpletypesConfig;
import com.yahoo.vespa.config.ConfigKey;
import org.junit.Test;
import java.util.Set;
import static com.yahoo.vespa.model.utils.internal.ReflectionUtil.getAllConfigsProduced;
import static org.hamcrest.Matchers.is;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
/**
* @author <NAME>
* @author bjorncs
* @author gjoranv
* @since 5.1
*/
public class ReflectionUtilTest {
private static class SimpleProducer extends AbstractConfigProducer implements SimpletypesConfig.Producer {
SimpleProducer(AbstractConfigProducer parent, String subId) { super(parent, subId); }
@Override
public void getConfig(SimpletypesConfig.Builder builder) { }
}
private interface ProducerInterface extends SimpletypesConfig.Producer, ArraytypesConfig.Producer { }
private static class InterfaceImplementingProducer extends AbstractConfigProducer implements ProducerInterface {
InterfaceImplementingProducer(AbstractConfigProducer parent, String subId) { super(parent, subId); }
@Override
public void getConfig(ArraytypesConfig.Builder builder) { }
@Override
public void getConfig(SimpletypesConfig.Builder builder) { }
}
private static abstract class MyAbstractProducer extends AbstractConfigProducer implements SimpletypesConfig.Producer {
MyAbstractProducer(AbstractConfigProducer parent, String subId) { super(parent, subId); }
@Override
public void getConfig(SimpletypesConfig.Builder builder) { }
}
private static class ConcreteProducer extends MyAbstractProducer {
ConcreteProducer(AbstractConfigProducer parent, String subId) { super(parent, subId); }
}
private static class RestartConfig extends ConfigInstance {
@SuppressWarnings("UnusedDeclaration")
private static boolean containsFieldsFlaggedWithRestart() {
return true;
}
@SuppressWarnings("UnusedDeclaration")
private ChangesRequiringRestart getChangesRequiringRestart(RestartConfig newConfig) {
return new ChangesRequiringRestart("testing");
}
}
private static class NonRestartConfig extends ConfigInstance {}
@Test
public void getAllConfigsProduced_includes_configs_produced_by_super_class() {
Set<ConfigKey<?>> configs = getAllConfigsProduced(ConcreteProducer.class, "foo");
assertThat(configs.size(), is(1));
assertTrue(configs.contains(new ConfigKey<>(SimpletypesConfig.CONFIG_DEF_NAME, "foo", SimpletypesConfig.CONFIG_DEF_NAMESPACE)));
}
@Test
public void getAllConfigsProduced_includes_configs_produced_by_implemented_interface() {
Set<ConfigKey<?>> configs = getAllConfigsProduced(InterfaceImplementingProducer.class, "foo");
assertThat(configs.size(), is(2));
assertTrue(configs.contains(new ConfigKey<>(SimpletypesConfig.CONFIG_DEF_NAME, "foo", SimpletypesConfig.CONFIG_DEF_NAMESPACE)));
assertTrue(configs.contains(new ConfigKey<>(ArraytypesConfig.CONFIG_DEF_NAME, "foo", ArraytypesConfig.CONFIG_DEF_NAMESPACE)));
}
@Test
public void getAllConfigsProduced_includes_configs_directly_implemented_by_producer() {
Set<ConfigKey<?>> configs = getAllConfigsProduced(SimpleProducer.class, "foo");
assertThat(configs.size(), is(1));
assertTrue(configs.contains(new ConfigKey<>(SimpletypesConfig.CONFIG_DEF_NAME, "foo", SimpletypesConfig.CONFIG_DEF_NAMESPACE)));
}
@Test
public void requireThatRestartMethodsAreDetectedProperly() {
assertFalse(ReflectionUtil.hasRestartMethods(NonRestartConfig.class));
assertTrue(ReflectionUtil.hasRestartMethods(RestartConfig.class));
}
@Test
public void requireThatRestartMethodsAreProperlyInvoked() {
assertTrue(ReflectionUtil.containsFieldsFlaggedWithRestart(RestartConfig.class));
assertEquals("testing", ReflectionUtil.getChangesRequiringRestart(new RestartConfig(), new RestartConfig()).getName());
}
@Test(expected = IllegalArgumentException.class)
public void requireThatGetChangesRequiringRestartValidatesParameterTypes() {
ReflectionUtil.getChangesRequiringRestart(new RestartConfig(), new NonRestartConfig());
}
}
| 1,633 |
488 | from .unittest_tools import unittest
from quantlib.time.daycounter import DayCounter
from quantlib.time.daycounters.simple import (
Actual360, SimpleDayCounter
)
from quantlib.time.daycounters.actual_actual import (
ActualActual, ISDA, ISMA, AFB
)
from quantlib.time.daycounters.thirty360 import (
Thirty360, EUROBONDBASIS
)
from quantlib.time.date import (
Date, November, May, February, July, January, Period,
Months
)
class TestDayCounter(unittest.TestCase):
def test_create_day_counter(self):
day_counter = Actual360()
self.assertTrue(day_counter is not None)
self.assertIsInstance(day_counter, DayCounter)
def test_daycounter_name(self):
day_counter = Actual360()
self.assertEqual('Actual/360', day_counter.name)
self.assertEqual('Actual/360', str(day_counter))
def test_empty_daycounter(self):
day_counter = DayCounter()
with self.assertRaisesRegexp(RuntimeError, r"no (day counter )?implementation provided"):
day_counter.name
class TestDayCounterFromName(unittest.TestCase):
def test_create_simple_daycounter_from_name(self):
type_vs_name = {
'Actual360' : 'Actual/360',
'Actual/360' : 'Actual/360',
'Actual/360 (inc)': 'Actual/360 (inc)',
'Actual365Fixed' : 'Actual/365 (Fixed)',
'Actual/365' : 'Actual/365 (Fixed)',
'OneDayCounter' : '1/1',
'1/1' : '1/1',
}
for counter_type, expected_name in type_vs_name.items():
cnt = DayCounter.from_name(counter_type)
self.assertEqual(cnt.name, expected_name)
def test_create_daycounter_with_convention_from_name(self):
type_vs_name = {
'Actual/Actual (Bond)' : 'Actual/Actual (ISMA)',
'Actual/Actual (ISMA)' : 'Actual/Actual (ISMA)',
'Actual/Actual (ISDA)' : 'Actual/Actual (ISDA)',
'Actual/Actual (Historical)' : 'Actual/Actual (ISDA)',
'Actual/Actual (Actual365)' : 'Actual/Actual (ISDA)',
'Actual/Actual (AFB)' : 'Actual/Actual (AFB)',
'Actual/Actual (Euro)' : 'Actual/Actual (AFB)',
}
for counter_type, expected_name in type_vs_name.items():
cnt = DayCounter.from_name(counter_type)
self.assertEqual(cnt.name, expected_name)
class TestActualActual(unittest.TestCase):
def setUp(self):
self.from_date = Date(1, November, 2003)
self.to_date = Date(1, May, 2004)
self.ref_start = Date(1, November, 2003)
self.ref_end = Date(1, May, 2004)
def test_first_example_isda(self):
day_counter = ActualActual(ISDA)
self.assertAlmostEqual(
0.497724380567,
day_counter.year_fraction(self.from_date, self.to_date)
)
def test_first_example_isma(self):
day_counter = ActualActual(ISMA)
self.assertAlmostEqual(
0.5,
day_counter.year_fraction(self.from_date, self.to_date,
self.ref_start, self.ref_end)
)
def test_first_example_afb(self):
day_counter = ActualActual(AFB)
self.assertAlmostEqual(
0.497267759563,
day_counter.year_fraction(self.from_date, self.to_date)
)
def test_short_calculation_first_period_isda(self):
day_counter = ActualActual(ISDA)
from_date = Date(1, February, 1999)
to_date = Date(1, July, 1999)
expected_result = 0.410958904110
self.assertAlmostEqual(
expected_result,
day_counter.year_fraction(from_date, to_date)
)
def test_short_calculation_first_period_isma(self):
day_counter = ActualActual(ISMA)
from_date = Date(1, February, 1999)
to_date = Date(1, July, 1999)
ref_start = Date(1,July,1998)
ref_end = Date(1,July,1999)
expected_result = 0.410958904110
self.assertAlmostEqual(
expected_result,
day_counter.year_fraction(from_date, to_date, ref_start, ref_end)
)
def test_short_calculation_first_period_afb(self):
day_counter = ActualActual(AFB)
from_date = Date(1, February, 1999)
to_date = Date(1, July, 1999)
expected_result = 0.410958904110
self.assertAlmostEqual(
expected_result,
day_counter.year_fraction(from_date, to_date)
)
def test_short_calculation_second_period_isda(self):
day_counter = ActualActual(ISDA)
from_date = Date(1, July, 1999)
to_date = Date(1, July, 2000)
expected_result = 1.001377348600
self.assertAlmostEqual(
expected_result,
day_counter.year_fraction(from_date, to_date)
)
def test_short_calculation_second_period_isma(self):
day_counter = ActualActual(ISMA)
from_date = Date(1, July, 1999)
to_date = Date(1, July, 2000)
ref_start = Date(1, July, 1999)
ref_end = Date(1, July, 2000)
expected_result = 1.000000000000
self.assertAlmostEqual(
expected_result,
day_counter.year_fraction(from_date, to_date, ref_start, ref_end)
)
def test_short_calculation_second_period_afb(self):
day_counter = ActualActual(AFB)
from_date = Date(1, July, 1999)
to_date = Date(1, July, 2000)
expected_result = 1.000000000000
self.assertAlmostEqual(
expected_result,
day_counter.year_fraction(from_date, to_date)
)
def test_simple(self):
periods = [3, 6, 12]
expected_times = [0.25, 0.5, 1.0]
first = Date(1,January,2002)
day_counter = SimpleDayCounter();
for index, period in enumerate(periods):
end = first + Period(period, Months)
calculated = day_counter.year_fraction(first,end)
self.assertAlmostEqual(
expected_times[index], calculated
)
def test_thirty360(self):
day_counter = Thirty360(EUROBONDBASIS)
from_date = Date(1, July, 1999)
to_date = Date(1, July, 2000)
expected_result = 1.000000000000
self.assertAlmostEqual(
expected_result,
day_counter.year_fraction(from_date, to_date)
)
def test_equality_method(self):
day_counter = Thirty360(EUROBONDBASIS)
a = Thirty360()
self.assertNotEqual(day_counter, a)
self.assertNotEqual(day_counter, Thirty360())
self.assertEqual(day_counter, Thirty360(EUROBONDBASIS))
if __name__ == '__main__':
unittest.main()
| 3,175 |
416 | package org.simpleflatmapper.converter.impl.time;
import org.simpleflatmapper.converter.Context;
import org.simpleflatmapper.converter.ContextualConverter;
import java.time.OffsetTime;
import java.time.ZoneId;
import java.util.Date;
public class DateToJavaOffsetTimeConverter implements ContextualConverter<Date, OffsetTime> {
private final ZoneId dateTimeZone;
public DateToJavaOffsetTimeConverter(ZoneId dateTimeZone) {
this.dateTimeZone = dateTimeZone;
}
@Override
public OffsetTime convert(Date in, Context context) throws Exception {
if (in == null) return null;
return in.toInstant().atZone(dateTimeZone).toOffsetDateTime().toOffsetTime();
}
}
| 240 |
852 | ///
/// \class l1t::Stage2Layer2JetSumAlgorithmFirmwareImp1
///
/// \author: <NAME>
///
/// Description: first iteration of stage 2 jet algo
#include "FWCore/MessageLogger/interface/MessageLogger.h"
#include "L1Trigger/L1TCalorimeter/interface/Stage2Layer2JetSumAlgorithmFirmware.h"
#include "L1Trigger/L1TCalorimeter/interface/CaloTools.h"
l1t::Stage2Layer2JetSumAlgorithmFirmwareImp1::Stage2Layer2JetSumAlgorithmFirmwareImp1(CaloParamsHelper const* params) {
httJetThresholdHw_ = floor(params->etSumEtThreshold(1) / params->jetLsb());
mhtJetThresholdHw_ = floor(params->etSumEtThreshold(3) / params->jetLsb());
httEtaMax_ = params->etSumEtaMax(1);
httEtaMaxHF_ = CaloTools::kHFEnd;
mhtEtaMax_ = params->etSumEtaMax(3);
mhtEtaMaxHF_ = CaloTools::kHFEnd;
}
void l1t::Stage2Layer2JetSumAlgorithmFirmwareImp1::processEvent(const std::vector<l1t::Jet>& alljets,
std::vector<l1t::EtSum>& htsums) {
// etaSide=1 is positive eta, etaSide=-1 is negative eta
for (int etaSide = 1; etaSide >= -1; etaSide -= 2) {
int hx(0), hy(0), ht(0);
int hxHF(0), hyHF(0), htHF(0);
bool satMht(false), satMhtHF(false), satHt(false), satHtHF(false);
// loop over rings
for (unsigned absieta = 1; absieta <= (unsigned int)CaloTools::mpEta(CaloTools::kHFEnd); absieta++) {
int ieta = etaSide * absieta;
int ringHx(0), ringHy(0), ringHt(0);
int ringHxHF(0), ringHyHF(0), ringHtHF(0);
// loop over phi
for (int iphi = 1; iphi <= CaloTools::kHBHENrPhi; iphi++) {
// find the jet at this (eta,phi)
l1t::Jet thisJet;
bool foundJet = false;
for (unsigned jetIt = 0; jetIt < alljets.size(); jetIt++) {
if (CaloTools::mpEta(alljets.at(jetIt).hwEta()) == ieta && alljets.at(jetIt).hwPhi() == iphi) {
thisJet = alljets.at(jetIt);
foundJet = true;
}
}
if (!foundJet)
continue;
// x- and -y coefficients are truncated by after multiplication of Et by trig coefficient.
// The trig coefficients themselves take values [-1023,1023] and so were scaled by
// 2^10 = 1024, which requires bitwise shift to the right of the final value by 10 bits.
// The 4 below account for part of that and the rest is accounted for at ouput of demux
// (see Stage2Layer2DemuxSumsAlgoFirmwareImp1.cc)
if (thisJet.hwPt() > mhtJetThresholdHw_ &&
CaloTools::mpEta(abs(thisJet.hwEta())) <= CaloTools::mpEta(mhtEtaMax_)) {
if (thisJet.hwPt() == CaloTools::kSatJet) {
satMht = true;
satMhtHF = true;
} else {
ringHx += (int)((thisJet.hwPt() * CaloTools::cos_coeff[iphi - 1]) >> 4);
ringHy += (int)((thisJet.hwPt() * CaloTools::sin_coeff[iphi - 1]) >> 4);
}
}
if (thisJet.hwPt() > mhtJetThresholdHw_ &&
CaloTools::mpEta(abs(thisJet.hwEta())) <= CaloTools::mpEta(mhtEtaMaxHF_)) {
if (thisJet.hwPt() == CaloTools::kSatJet)
satMhtHF = true;
else {
ringHxHF += (int)((thisJet.hwPt() * CaloTools::cos_coeff[iphi - 1]) >> 4);
ringHyHF += (int)((thisJet.hwPt() * CaloTools::sin_coeff[iphi - 1]) >> 4);
}
}
if (thisJet.hwPt() > httJetThresholdHw_ &&
CaloTools::mpEta(abs(thisJet.hwEta())) <= CaloTools::mpEta(httEtaMax_)) {
if (thisJet.hwPt() == CaloTools::kSatJet) {
satHt = true;
satHtHF = true;
} else
ringHt += thisJet.hwPt();
}
if (thisJet.hwPt() > httJetThresholdHw_ &&
CaloTools::mpEta(abs(thisJet.hwEta())) <= CaloTools::mpEta(httEtaMaxHF_)) {
if (thisJet.hwPt() == CaloTools::kSatJet)
satHtHF = true;
else
ringHtHF += thisJet.hwPt();
}
}
hx += ringHx;
hy += ringHy;
ht += ringHt;
hxHF += ringHxHF;
hyHF += ringHyHF;
htHF += ringHtHF;
}
if (satHt)
ht = 0xffff;
if (satHtHF)
htHF = 0xffff;
if (satMht) {
hx = 0x7fffffff;
hy = 0x7fffffff;
}
if (satMhtHF) {
hxHF = 0x7fffffff;
hyHF = 0x7fffffff;
}
math::XYZTLorentzVector p4;
l1t::EtSum htSumHt(p4, l1t::EtSum::EtSumType::kTotalHt, ht, 0, 0, 0);
l1t::EtSum htSumHx(p4, l1t::EtSum::EtSumType::kTotalHtx, hx, 0, 0, 0);
l1t::EtSum htSumHy(p4, l1t::EtSum::EtSumType::kTotalHty, hy, 0, 0, 0);
l1t::EtSum htSumHtHF(p4, l1t::EtSum::EtSumType::kTotalHtHF, htHF, 0, 0, 0);
l1t::EtSum htSumHxHF(p4, l1t::EtSum::EtSumType::kTotalHtxHF, hxHF, 0, 0, 0);
l1t::EtSum htSumHyHF(p4, l1t::EtSum::EtSumType::kTotalHtyHF, hyHF, 0, 0, 0);
htsums.push_back(htSumHt);
htsums.push_back(htSumHx);
htsums.push_back(htSumHy);
htsums.push_back(htSumHtHF);
htsums.push_back(htSumHxHF);
htsums.push_back(htSumHyHF);
}
}
| 2,566 |
15,577 | #!/usr/bin/env python3
#!/usr/bin/env python3
import pytest
from helpers.cluster import ClickHouseCluster
import random
import string
import os
import time
from multiprocessing.dummy import Pool
from helpers.network import PartitionManager
from helpers.test_tools import assert_eq_with_retry
from kazoo.client import KazooClient, KazooState
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', main_configs=['configs/enable_keeper1.xml'], stay_alive=True)
node2 = cluster.add_instance('node2', main_configs=['configs/enable_keeper2.xml'], stay_alive=True)
def get_fake_zk(nodename, timeout=30.0):
_fake_zk_instance = KazooClient(hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout)
_fake_zk_instance.start()
return _fake_zk_instance
def test_smoke():
try:
cluster.start()
node1_zk = get_fake_zk("node1")
node1_zk.create("/test_alive", b"aaaa")
finally:
cluster.shutdown()
| 370 |
737 | /* account.h -*- C++ -*-
<NAME>, 16 November 2012
Copyright (c) 2012 Datacratic Inc. All rights reserved.
*/
#pragma once
#include <string>
#include <vector>
#include <unordered_map>
#include <memory>
#include <unordered_set>
#include "rtbkit/common/currency.h"
#include "rtbkit/common/account_key.h"
#include "soa/types/date.h"
#include "jml/utils/string_functions.h"
#include <mutex>
#include <thread>
#include "jml/arch/spinlock.h"
namespace Datacratic {
struct EventRecorder;
}
namespace RTBKIT {
using namespace Datacratic;
struct Account;
std::ostream & operator << (std::ostream & stream, const Account & account);
struct ShadowAccount;
std::ostream &
operator << (std::ostream & stream, const ShadowAccount & account);
/*****************************************************************************/
/* ACCOUNT TYPE */
/*****************************************************************************/
enum AccountType {
AT_NONE,
AT_BUDGET, ///< Budgeting account
AT_SPEND ///< Spend tracking account; leaf
};
inline AccountType restDecode(const std::string & param, AccountType *)
{
if (param == "none")
return AT_NONE;
else if (param == "budget")
return AT_BUDGET;
else if (param == "spend")
return AT_SPEND;
else throw ML::Exception("unknown account type " + param);
}
extern AccountType AccountTypeFromString(const std::string & param);
extern const std::string AccountTypeToString(enum AccountType type);
/*****************************************************************************/
/* ACCOUNT */
/*****************************************************************************/
/** This is the basic unit in which spend is tracked. */
struct Account {
Account()
: type(AT_NONE), status(ACTIVE)
{
}
AccountType type;
//mutable Date lastAccess;
//Date lastModification;
// On the Credit side...
CurrencyPool budgetIncreases; ///< Transferred in from parent
CurrencyPool budgetDecreases; ///< Transferred out (in parent only)
CurrencyPool recycledIn; ///< Unspent transferred back in
CurrencyPool allocatedIn;
CurrencyPool commitmentsRetired; ///< Money not spent
CurrencyPool adjustmentsIn; ///< Money transferred in by adjustments
// On the Debit side...
CurrencyPool recycledOut;
CurrencyPool allocatedOut;
CurrencyPool commitmentsMade;
CurrencyPool adjustmentsOut; ///< Money transferred out by adjustments
CurrencyPool spent; ///< Actually spent
CurrencyPool balance; ///< Balance to be spent
// Extra information tracked, but not used in any calculations
LineItems lineItems; ///< Spent line items
LineItems adjustmentLineItems; ///< Adjustment line items
// Invariant: sum(Credit Side) = sum(Debit Side)
enum Status {CLOSED, ACTIVE};
Status status;
public:
bool isSameOrPastVersion(const Account & otherAccount) const
{
/* All the amounts in the storage accounts must have a counterpart in
* the banker accounts and their value must be inferior or equal to
* the corresponding amounts in the banker. */
return (budgetIncreases.isSameOrPastVersion(otherAccount.budgetIncreases)
&& budgetDecreases.isSameOrPastVersion(otherAccount.budgetDecreases)
&& recycledIn.isSameOrPastVersion(otherAccount.recycledIn)
&& allocatedIn.isSameOrPastVersion(otherAccount.allocatedIn)
&& commitmentsRetired.isSameOrPastVersion(otherAccount.commitmentsRetired)
&& adjustmentsIn.isSameOrPastVersion(otherAccount.adjustmentsIn)
&& recycledOut.isSameOrPastVersion(otherAccount.recycledOut)
&& allocatedOut.isSameOrPastVersion(otherAccount.allocatedOut)
&& commitmentsMade.isSameOrPastVersion(otherAccount.commitmentsMade)
&& adjustmentsOut.isSameOrPastVersion(otherAccount.adjustmentsOut)
&& spent.isSameOrPastVersion(otherAccount.spent));
}
Json::Value toJson() const
{
// checkInvariants();
Json::Value result(Json::objectValue);
result["md"]["objectType"] = "Account";
result["md"]["version"] = 1;
result["type"] = AccountTypeToString(type);
result["budgetIncreases"] = budgetIncreases.toJson();
result["budgetDecreases"] = budgetDecreases.toJson();
result["spent"] = spent.toJson();
result["recycledIn"] = recycledIn.toJson();
result["recycledOut"] = recycledOut.toJson();
result["allocatedIn"] = allocatedIn.toJson();
result["allocatedOut"] = allocatedOut.toJson();
result["commitmentsMade"] = commitmentsMade.toJson();
result["commitmentsRetired"] = commitmentsRetired.toJson();
result["adjustmentsIn"] = adjustmentsIn.toJson();
result["adjustmentsOut"] = adjustmentsOut.toJson();
result["lineItems"] = lineItems.toJson();
result["adjustmentLineItems"] = adjustmentLineItems.toJson();
switch (status) {
case ACTIVE:
result["status"] = "active";
break;
case CLOSED:
result["status"] = "closed";
break;
default:
result["status"] = "active";
}
return result;
}
static const Account fromJson(const Json::Value & json)
{
Account result;
ExcAssertEqual(json["md"]["objectType"].asString(), "Account");
ExcAssertEqual(json["md"]["version"].asInt(), 1);
result.type = AccountTypeFromString(json["type"].asString());
if (json.isMember("budget")) {
result.budgetIncreases = CurrencyPool::fromJson(json["budget"]);
result.budgetDecreases = CurrencyPool::fromJson(json["adjustmentsOut"]);
result.adjustmentsOut = CurrencyPool();
}
else {
result.budgetIncreases = CurrencyPool::fromJson(json["budgetIncreases"]);
result.budgetDecreases = CurrencyPool::fromJson(json["budgetDecreases"]);
result.adjustmentsOut = CurrencyPool::fromJson(json["adjustmentsOut"]);
}
if (json.isMember("status")) {
std::string s = json["status"].asString();
if (s == "active")
result.status = ACTIVE;
else if (s == "closed")
result.status = CLOSED;
else
result.status = ACTIVE;
} else {
result.status = ACTIVE;
}
result.spent = CurrencyPool::fromJson(json["spent"]);
result.recycledIn = CurrencyPool::fromJson(json["recycledIn"]);
result.recycledOut = CurrencyPool::fromJson(json["recycledOut"]);
result.allocatedIn = CurrencyPool::fromJson(json["allocatedIn"]);
result.allocatedOut = CurrencyPool::fromJson(json["allocatedOut"]);
result.commitmentsMade = CurrencyPool::fromJson(json["commitmentsMade"]);
result.commitmentsRetired = CurrencyPool::fromJson(json["commitmentsRetired"]);
/* Note: adjustmentsIn is a credit value, ...Out is a debit value */
result.adjustmentsIn = CurrencyPool::fromJson(json["adjustmentsIn"]);
result.lineItems = LineItems::fromJson(json["lineItems"]);
result.adjustmentLineItems = LineItems::fromJson(json["adjustmentLineItems"]);
result.balance = ((result.budgetIncreases
+ result.recycledIn
+ result.commitmentsRetired
+ result.adjustmentsIn
+ result.allocatedIn)
- (result.budgetDecreases
+ result.recycledOut
+ result.commitmentsMade
+ result.spent
+ result.adjustmentsOut
+ result.balance
+ result.allocatedOut));
result.checkInvariants();
return result;
}
/*************************************************************************/
/* DERIVED QUANTITIES */
/*************************************************************************/
/** Return the amount which is balance to be recycled. */
CurrencyPool getRecycledAvail() const
{
return (recycledIn - recycledOut).nonNegative();
}
/** Returns the budget what was not transferred from or to other accounts.
*/
CurrencyPool getNetBudget() const;
/*************************************************************************/
/* INVARIANTS */
/*************************************************************************/
void checkInvariants(const char * whereFrom = "") const
{
try {
// Everything but balance must be positive
ExcAssert(budgetIncreases.isNonNegative());
ExcAssert(budgetDecreases.isNonNegative());
ExcAssert(recycledIn.isNonNegative());
ExcAssert(recycledOut.isNonNegative());
ExcAssert(commitmentsRetired.isNonNegative());
ExcAssert(recycledOut.isNonNegative());
ExcAssert(commitmentsMade.isNonNegative());
ExcAssert(spent.isNonNegative());
ExcAssert(adjustmentsIn.isNonNegative());
ExcAssert(adjustmentsOut.isNonNegative());
ExcAssert(allocatedIn.isNonNegative());
ExcAssert(allocatedOut.isNonNegative());
// Credit and debit sides must balance out
CurrencyPool credit = (budgetIncreases + recycledIn + commitmentsRetired
+ adjustmentsIn + allocatedIn);
CurrencyPool debit = (budgetDecreases + recycledOut + commitmentsMade + spent
+ adjustmentsOut + balance
+ allocatedOut);
ExcAssertEqual(credit, debit);
} catch (...) {
using namespace std;
cerr << "error on account " << *this << " checking invariants at "
<< whereFrom << endl;
throw;
}
//...
}
/*************************************************************************/
/* TRANSFER OPERATIONS (ONE SIDED) */
/*************************************************************************/
/* These operations need to be paired with a corresponding operation on
the other side.
*/
/** Recuperate everything that can safely be removed from the account,
and return the amount freed.
*/
CurrencyPool recuperate()
{
auto result = balance;
recycledOut += balance;
balance.clear();
checkInvariants();
return result;
}
/** Take some budget that had been recuperated from somewhere else and
add it in.
*/
void recycle(const CurrencyPool & recuperated)
{
ExcAssert(recuperated.isNonNegative());
recycledIn += recuperated;
balance += recuperated;
checkInvariants();
}
/** Set the budget to the given amount. It will adjust the balance
amount to match the new level.
*/
void setBudget(const CurrencyPool & newBudget);
/** Set the balance budget to the given level. This can either
transfer money out of or into the account.
*/
CurrencyPool setBalance(Account & parentAccount,
const CurrencyPool & newBalance)
{
checkInvariants("entry to setBalance");
auto before = *this;
auto parentBefore = parentAccount;
CurrencyPool requiredTotal = newBalance - balance;
// Some amount needs to be transferred in, and some amount out
CurrencyPool requiredIn = requiredTotal.nonNegative();
CurrencyPool requiredOut = requiredIn - requiredTotal;
ExcAssert(requiredIn.isNonNegative());
ExcAssert(requiredOut.isNonNegative());
CurrencyPool toTransfer = parentAccount.balance.limit(requiredIn);
using namespace std;
bool debug = false;
// First take it from the recycled...
CurrencyPool parentRecycledAvail
= parentAccount.getRecycledAvail();
CurrencyPool fromRecycled = parentRecycledAvail.limit(toTransfer);
CurrencyPool toRecycled = requiredOut;
if (debug) {
cerr << "newBalance = " << newBalance << endl;
cerr << "balance = " << balance << endl;
cerr << "requiredTotal = " << requiredTotal << endl;
cerr << "requiredIn = " << requiredIn << endl;
cerr << "requiredOut = " << requiredOut << endl;
cerr << "toTransfer = " << toTransfer << endl;
cerr << "parentRecycledAvail = " << parentRecycledAvail << endl;
cerr << "fromRecycled = " << fromRecycled << endl;
}
// And then as a commitment
CurrencyPool fromBudget = toTransfer - fromRecycled;
if (debug)
cerr << "fromBudget = " << fromBudget << endl;
// Take from parent recycled
parentAccount.recycledOut += fromRecycled;
parentAccount.balance -= fromRecycled;
recycledIn += fromRecycled;
balance += fromRecycled;
// Give back to budget
parentAccount.allocatedOut += fromBudget;
parentAccount.balance -= fromBudget;
budgetIncreases += fromBudget;
balance += fromBudget;
// Give to parent recycled
parentAccount.recycledIn += toRecycled;
parentAccount.balance += toRecycled;
recycledOut += toRecycled;
balance -= toRecycled;
try {
checkInvariants("exiting from setBalance");
parentAccount.checkInvariants("parent check invariants");
} catch (...) {
cerr << "before: " << before << endl;
cerr << "after: " << *this << endl;
cerr << "parent before: " << parentBefore << endl;
cerr << "parent after: " << parentAccount << endl;
cerr << "newBalance = " << newBalance << endl;
cerr << "balance = " << balance << endl;
cerr << "requiredTotal = " << requiredTotal << endl;
cerr << "requiredIn = " << requiredIn << endl;
cerr << "requiredOut = " << requiredOut << endl;
cerr << "toTransfer = " << toTransfer << endl;
cerr << "parentRecycledAvail = " << parentRecycledAvail << endl;
cerr << "fromRecycled = " << fromRecycled << endl;
cerr << "fromBudget = " << fromBudget << endl;
throw;
}
return balance;
}
/** Increase or decrease the adjustments made to the account
*/
void addAdjustment(const CurrencyPool & newAdjustment);
/** (migration helper) Register an expense on a AT_SPEND account.
*/
CurrencyPool importSpend(const CurrencyPool & spend);
void recuperateTo(Account & parentAccount)
{
CurrencyPool amount = balance.nonNegative();
recycledOut += amount;
balance -= amount;
parentAccount.recycledIn += amount;
parentAccount.balance += amount;
checkInvariants("recuperateTo");
}
};
/*****************************************************************************/
/* SHADOW ACCOUNT */
/*****************************************************************************/
/** This is an account that can track spend. It is a shadow of an account
that lives in the master banker, and only keeps track of a small amount
of information.
*/
struct ShadowAccount {
ShadowAccount()
: status(Account::ACTIVE), attachedBids(0), detachedBids(0)
{}
Account::Status status;
// credit
CurrencyPool netBudget; ///< net of fields not mentioned here
CurrencyPool commitmentsRetired;
// debit
CurrencyPool commitmentsMade;
CurrencyPool spent;
CurrencyPool balance; /// DERIVED; debit - credit
LineItems lineItems; ///< Line items for spend
struct Commitment {
Commitment(Amount amount, Date timestamp)
: amount(amount), timestamp(timestamp)
{
}
Amount amount; ///< Amount the commitment is for
Date timestamp; ///< When the commitment was made
};
std::unordered_map<std::string, Commitment> commitments;
void checkInvariants() const
{
try {
//ExcAssert(netBudget.isNonNegative());
ExcAssert(commitmentsRetired.isNonNegative());
ExcAssert(commitmentsMade.isNonNegative());
ExcAssert(spent.isNonNegative());
CurrencyPool credit = netBudget + commitmentsRetired;
CurrencyPool debit = commitmentsMade + spent + balance;
ExcAssertEqual(credit, debit);
} catch (...) {
using namespace std;
cerr << "invariants failed:" << endl;
cerr << *this << endl;
throw;
}
}
Json::Value toJson() const
{
checkInvariants();
Json::Value result(Json::objectValue);
result["md"]["objectType"] = "ShadowAccount";
result["md"]["version"] = 1;
result["netBudget"] = netBudget.toJson();
result["commitmentsRetired"] = commitmentsRetired.toJson();
result["commitmentsMade"] = commitmentsMade.toJson();
result["spent"] = spent.toJson();
result["lineItems"] = lineItems.toJson();
result["balance"] = balance.toJson();
ShadowAccount reparsed = fromJson(result);
reparsed.checkInvariants();
ExcAssertEqual(netBudget, reparsed.netBudget);
ExcAssertEqual(spent, reparsed.spent);
ExcAssertEqual(commitmentsRetired, reparsed.commitmentsRetired);
ExcAssertEqual(commitmentsMade, reparsed.commitmentsMade);
ExcAssertEqual(lineItems, reparsed.lineItems);
return result;
}
static const ShadowAccount fromJson(const Json::Value & val)
{
ShadowAccount result;
ExcAssertEqual(val["md"]["objectType"].asString(), "ShadowAccount");
ExcAssertEqual(val["md"]["version"].asInt(), 1);
result.netBudget = CurrencyPool::fromJson(val["netBudget"]);
result.commitmentsRetired = CurrencyPool::fromJson(val["commitmentsRetired"]);
result.commitmentsMade = CurrencyPool::fromJson(val["commitmentsMade"]);
result.spent = CurrencyPool::fromJson(val["spent"]);
result.balance = CurrencyPool::fromJson(val["balance"]);
result.lineItems = LineItems::fromJson(val["lineItems"]);
result.checkInvariants();
return result;
}
/*************************************************************************/
/* SPEND TRACKING */
/*************************************************************************/
void forceWinBid(Amount amountPaid,
const LineItems & lineItems)
{
commitDetachedBid(Amount(), amountPaid, lineItems);
}
/// Commit a bid that has been detached from its tracking
void commitDetachedBid(Amount amountAuthorized,
Amount amountPaid,
const LineItems & lineItems)
{
checkInvariants();
Amount amountUnspent = amountAuthorized - amountPaid;
balance += amountUnspent;
commitmentsRetired += amountAuthorized;
spent += amountPaid;
if(amountPaid) {
// Increase the number of impressions by 1
// whenever an amount is paid for a bid
commitEvent(Amount(CurrencyCode::CC_IMP, 1.0));
}
this->lineItems += lineItems;
checkInvariants();
}
/// Commit a specific currency (amountToCommit)
void commitEvent(const Amount & amountToCommit)
{
checkInvariants();
spent += amountToCommit;
commitmentsRetired += amountToCommit;
checkInvariants();
}
/*************************************************************************/
/* SPEND AUTHORIZATION */
/*************************************************************************/
bool authorizeBid(const std::string & item,
Amount amount)
{
checkInvariants();
if (!balance.hasAvailable(amount))
return false; // no budget balance
attachBid(item, amount);
balance -= amount;
commitmentsMade += amount;
checkInvariants();
return true;
}
void commitBid(const std::string & item,
Amount amountPaid,
const LineItems & lineItems)
{
commitDetachedBid(detachBid(item), amountPaid, lineItems);
}
void cancelBid(const std::string & item)
{
commitDetachedBid(detachBid(item), Amount(), LineItems());
}
Amount detachBid(const std::string & item)
{
checkInvariants();
auto cit = commitments.find(item);
if (cit == commitments.end())
throw ML::Exception("unknown commitment being committed");
Amount amountAuthorized = cit->second.amount;
commitments.erase(cit);
checkInvariants();
detachedBids++;
return amountAuthorized;
}
void attachBid(const std::string & item,
Amount amount)
{
Date now = Date::now();
auto c = commitments.insert(make_pair(item, Commitment(amount, now)));
if (!c.second)
throw ML::Exception("attempt to re-open commitment");
attachedBids++;
}
/*************************************************************************/
/* SYNCHRONIZATION */
/*************************************************************************/
const Account syncToMaster(Account & masterAccount) const
{
checkInvariants();
masterAccount.checkInvariants();
CurrencyPool newCommitmentsMade
= commitmentsMade - masterAccount.commitmentsMade;
CurrencyPool newCommitmentsRetired
= commitmentsRetired - masterAccount.commitmentsRetired;
CurrencyPool newSpend
= spent - masterAccount.spent;
ExcAssert(newCommitmentsMade.isNonNegative());
ExcAssert(newCommitmentsRetired.isNonNegative());
ExcAssert(newSpend.isNonNegative());
masterAccount.commitmentsRetired = commitmentsRetired;
masterAccount.commitmentsMade = commitmentsMade;
masterAccount.spent = spent;
masterAccount.balance
+= (newCommitmentsRetired - newCommitmentsMade - newSpend);
masterAccount.lineItems = lineItems;
masterAccount.checkInvariants("syncToMaster");
checkInvariants();
return masterAccount;
}
void syncFromMaster(const Account & masterAccount)
{
checkInvariants();
masterAccount.checkInvariants();
// net budget: balance assuming spent, commitments are zero
netBudget = masterAccount.getNetBudget();
balance = netBudget + commitmentsRetired
- commitmentsMade - spent;
status = masterAccount.status;
checkInvariants();
}
/** This method should be called exactly once the first time that a
shadow account receives its initial state from the master.
It will merge any changes that have been made since initialization
with the initial state from the master, in such a way that the
state will be the same as if the account had been synchronized before
any operations had occurred and then all operations had been
replayed.
*/
void initializeAndMergeState(const Account & masterAccount)
{
// We have to tally up the fields from the master and the current
// status.
checkInvariants();
masterAccount.checkInvariants();
// net budget: balance assuming spent, commitments are zero
netBudget = masterAccount.getNetBudget();
commitmentsMade += masterAccount.commitmentsMade;
commitmentsRetired += masterAccount.commitmentsRetired;
spent += masterAccount.spent;
lineItems += masterAccount.lineItems;
balance = netBudget + commitmentsRetired - commitmentsMade - spent;
checkInvariants();
}
/* LOGGING */
uint32_t attachedBids;
uint32_t detachedBids;
uint32_t lastExpiredCommitments;
void logBidEvents(const Datacratic::EventRecorder & eventRecorder,
const std::string & accountKey);
};
/*****************************************************************************/
/* ACCOUNT SUMMARY */
/*****************************************************************************/
/** This is a summary of an account and all of its sub-accounts. */
struct AccountSummary {
CurrencyPool budget; ///< Total amount we're allowed to spend
CurrencyPool inFlight; ///< Sum of sub-account inFlights (pending commitments)
CurrencyPool spent; ///< Sum of sub-account spend
CurrencyPool adjustments; ///< Sum of sub-account adjustments
CurrencyPool adjustedSpent; ///< Spend minus adjustments
CurrencyPool effectiveBudget; ///< budget computed internally
CurrencyPool available; ///< Total amount we're allowed to spend
Account account;
void addChild(const std::string & name,
const AccountSummary & child,
bool addInSubaccounts)
{
if (addInSubaccounts)
subAccounts[name] = child;
effectiveBudget += child.effectiveBudget;
inFlight += child.inFlight;
spent += child.spent;
adjustments += child.adjustments;
}
void dump(std::ostream & stream,
int indent = 0,
const std::string & name = "toplevel") const
{
stream << std::string(indent, ' ')
<< name
<< " b:" << budget
<< " s:" << spent
<< " i:" << inFlight
<< std::endl;
for (const auto & sa: subAccounts) {
sa.second.dump(stream, indent + 2, sa.first);
}
}
Json::Value toJson(bool simplified = false) const
{
Json::Value result;
result["md"]["objectType"]
= simplified ? "AccountSimpleSummary" : "AccountSummary";
result["md"]["version"] = 1;
result["budget"] = budget.toJson();
result["effectiveBudget"] = effectiveBudget.toJson();
result["spent"] = spent.toJson();
result["adjustments"] = adjustments.toJson();
result["adjustedSpent"] = adjustedSpent.toJson();
result["available"] = available.toJson();
result["inFlight"] = inFlight.toJson();
if (!simplified) {
result["account"] = account.toJson();
for (const auto & sa: subAccounts) {
result["subAccounts"][sa.first] = sa.second.toJson();
}
}
return result;
}
static AccountSummary fromJson(const Json::Value & val)
{
AccountSummary result;
ExcAssertEqual(val["md"]["objectType"].asString(), "AccountSummary");
ExcAssertEqual(val["md"]["version"].asInt(), 1);
result.budget = CurrencyPool::fromJson(val["budget"]);
result.effectiveBudget = CurrencyPool::fromJson(val["effectiveBudget"]);
result.inFlight = CurrencyPool::fromJson(val["inFlight"]);
result.spent = CurrencyPool::fromJson(val["spent"]);
result.adjustments = CurrencyPool::fromJson(val["adjustments"]);
result.adjustedSpent = CurrencyPool::fromJson(val["adjustedSpent"]);
result.available = CurrencyPool::fromJson(val["available"]);
result.account = Account::fromJson(val["account"]);
auto & sa = val["subAccounts"];
for (auto it = sa.begin(), end = sa.end(); it != end; ++it) {
result.subAccounts[it.memberName()]
= AccountSummary::fromJson(*it);
}
return result;
}
std::map<std::string, AccountSummary> subAccounts;
};
inline std::ostream &
operator << (std::ostream & stream, const AccountSummary & summary)
{
summary.dump(stream);
return stream;
}
/*****************************************************************************/
/* ACCOUNTS */
/*****************************************************************************/
struct Accounts {
Accounts()
: sessionStart(Datacratic::Date::now())
{
}
Datacratic::Date sessionStart;
struct AccountInfo: public Account {
std::set<AccountKey> children;
/* spend tracking across sessions */
CurrencyPool initialSpent;
};
const Account createAccount(const AccountKey & account,
AccountType type)
{
Guard guard(lock);
if (account.empty())
throw ML::Exception("can't create account with empty key");
return ensureAccount(account, type);
}
void restoreAccount(const AccountKey & accountKey,
const Json::Value & jsonValue,
bool overwrite = false) {
Guard guard(lock);
// if (accounts.count(accountKey) != 0 and !overwrite) {
// throw ML::Exception("an account already exists with that name");
// }
Account validAccount = validAccount.fromJson(jsonValue);
AccountInfo & newAccount = ensureAccount(accountKey, validAccount.type);
newAccount.type = AT_SPEND;
newAccount.type = validAccount.type;
newAccount.budgetIncreases = validAccount.budgetIncreases;
newAccount.budgetDecreases = validAccount.budgetDecreases;
newAccount.spent = validAccount.spent;
newAccount.recycledIn = validAccount.recycledIn;
newAccount.recycledOut = validAccount.recycledOut;
newAccount.allocatedIn = validAccount.allocatedIn;
newAccount.allocatedOut = validAccount.allocatedOut;
newAccount.commitmentsMade = validAccount.commitmentsMade;
newAccount.commitmentsRetired = validAccount.commitmentsRetired;
newAccount.adjustmentsIn = validAccount.adjustmentsIn;
newAccount.adjustmentsOut = validAccount.adjustmentsOut;
newAccount.balance = validAccount.balance;
newAccount.lineItems = validAccount.lineItems;
newAccount.adjustmentLineItems = validAccount.adjustmentLineItems;
newAccount.status = Account::ACTIVE;
}
void reactivateAccount(const AccountKey & accountKey)
{
Guard guard(lock);
AccountKey parents = accountKey;
while (!parents.empty()) {
getAccountImpl(parents).status = Account::ACTIVE;
parents.pop_back();
}
reactivateAccountChildren(accountKey);
}
const Account createBudgetAccount(const AccountKey & account)
{
Guard guard(lock);
if (account.empty())
throw ML::Exception("can't create account with empty key");
return ensureAccount(account, AT_BUDGET);
}
const Account createSpendAccount(const AccountKey & account)
{
Guard guard(lock);
if (account.size() < 2)
throw ML::Exception("commitment account must have parent");
return ensureAccount(account, AT_SPEND);
}
const AccountInfo getAccount(const AccountKey & account) const
{
Guard guard(lock);
return getAccountImpl(account);
}
std::pair<bool, bool> accountPresentAndActive(const AccountKey & account) const
{
Guard guard(lock);
return accountPresentAndActiveImpl(account);
}
/** closeAccount behavior is to close all children then close itself,
always transfering from children to parent. If top most account,
then throws an error after closing all children first.
*/
const Account closeAccount(const AccountKey & account)
{
Guard guard(lock);
return closeAccountImpl(account);
}
void checkInvariants() const
{
Guard guard(lock);
for (auto & a: accounts) {
a.second.checkInvariants();
}
}
Json::Value toJson() const
{
Json::Value result(Json::objectValue);
Guard guard(lock);
for (auto & a: accounts) {
result[a.first.toString()] = a.second.toJson();
}
return result;
}
static Accounts fromJson(const Json::Value & json);
/*************************************************************************/
/* BUDGET OPERATIONS */
/*************************************************************************/
/* These operations are assocated with putting money into the system. */
const Account setBudget(const AccountKey & topLevelAccount,
const CurrencyPool & newBudget)
{
using namespace std;
//cerr << "setBudget with newBudget " << newBudget << endl;
Guard guard(lock);
if (topLevelAccount.size() != 1)
throw ML::Exception("can't setBudget except at top level");
auto & a = ensureAccount(topLevelAccount, AT_BUDGET);
a.setBudget(newBudget);
return a;
}
/** Sets the balance budget for the given account to the given amount,
by transferring in from the parent account.
If typeToCreate is not AT_NONE, then the account will be implicitly
created if it doesn't exist.
*/
const Account setBalance(const AccountKey & account,
CurrencyPool amount,
AccountType typeToCreate)
{
Guard guard(lock);
if (typeToCreate != AT_NONE && !accounts.count(account)) {
auto & a = ensureAccount(account, typeToCreate);
a.setBalance(getParentAccount(account), amount);
return a;
}
else {
auto & a = getAccountImpl(account);
#if 0
using namespace std;
if (a.type == AT_BUDGET)
cerr << Date::now()
<< " setBalance " << account << " " << " from " << a.balance
<< " to " << amount << endl;
#endif
a.setBalance(getParentAccount(account), amount);
return a;
}
}
const CurrencyPool getBalance(const AccountKey & account) const
{
Guard guard(lock);
auto it = accounts.find(account);
if (it == accounts.end())
return CurrencyPool();
return it->second.balance;
}
const Account addAdjustment(const AccountKey & account,
CurrencyPool amount)
{
Guard guard(lock);
auto & a = getAccountImpl(account);
a.addAdjustment(amount);
return a;
}
/*************************************************************************/
/* TRANSFER OPERATIONS */
/*************************************************************************/
/* These operations are two-sided and involve transferring between a
parent account and a child account.
*/
void recuperate(const AccountKey & account)
{
Guard guard(lock);
getAccountImpl(account).recuperateTo(getParentAccount(account));
}
AccountSummary getAccountSummary(const AccountKey & account,
int maxDepth = -1) const
{
Guard guard(lock);
return getAccountSummaryImpl(account, 0, maxDepth);
}
Json::Value
getAccountSummariesJson(bool simplified = false, int maxDepth = -1)
const
{
Guard guard(lock);
Json::Value summaries;
for (const auto & it: accounts) {
const AccountKey & key = it.first;
AccountSummary summary = getAccountSummaryImpl(key, 0, maxDepth);
summaries[key.toString()] = summary.toJson(simplified);
}
return summaries;
}
const Account importSpend(const AccountKey & account,
const CurrencyPool & amount)
{
Guard guard(lock);
auto & a = getAccountImpl(account);
a.importSpend(amount);
return a;
}
/*************************************************************************/
/* HIGH LEVEL OPERATIONS */
/*************************************************************************/
/* These are higher-level opertions that build on top of the others in
order to make a given condition true.
*/
/*************************************************************************/
/* SYNCHRONIZATION OPERATIONS */
/*************************************************************************/
const Account syncFromShadow(const AccountKey & account,
const ShadowAccount & shadow)
{
Guard guard(lock);
// In the case that an account was added and the banker crashed
// before it could be written to persistent storage, we need to
// create the empty account here.
if (!accounts.count(account))
return shadow.syncToMaster(ensureAccount(account, AT_SPEND));
return shadow.syncToMaster(getAccountImpl(account));
}
/* "Out of sync" here means that the in-memory version of the relevant
accounts is obsolete compared to the version stored in the Redis
backend */
void markAccountOutOfSync(const AccountKey & account)
{
Guard guard(lock);
outOfSyncAccounts.insert(account);
}
bool isAccountOutOfSync(const AccountKey & account) const
{
Guard guard(lock);
return (outOfSyncAccounts.count(account) > 0);
}
/** interaccount consistency */
/* "Inconsistent" here means that there is a mismatch between the members
* used in money transfers for a given Account and the corresponding
* members in its subaccounts: allocatedOut and budgetIncreases,
* recycledIn and recycedOut, ...
*/
void ensureInterAccountConsistency();
bool isAccountInconsistent(const AccountKey & account) const
{
Guard guard(lock);
return (inconsistentAccounts.count(account) > 0);
}
/* Returns whether the budgetIncreases of subaccounts are consistent with
the allocatedOut of the top-account, recursively.
maxRecusion: -1 = infinity
*/
bool checkBudgetConsistency(const AccountKey & accountKey,
int maxRecursion = -1) const;
/* Returns the amounts in recycledIn and recycledOut that were transferred
* strictly from and to the parent account. */
void getRecycledUp(const AccountKey & accountKey,
CurrencyPool & recycledInUp,
CurrencyPool & recycledOutUp) const;
private:
friend class ShadowAccounts;
typedef ML::Spinlock Lock;
typedef std::unique_lock<Lock> Guard;
mutable Lock lock;
typedef std::map<AccountKey, AccountInfo> AccountMap;
AccountMap accounts;
typedef std::unordered_set<AccountKey> AccountSet;
AccountSet outOfSyncAccounts;
AccountSet inconsistentAccounts;
public:
std::vector<AccountKey>
getAccountKeys(const AccountKey & prefix = AccountKey(),
int maxDepth = -1) const
{
Guard guard(lock);
std::vector<AccountKey> result;
for (auto it = accounts.lower_bound(prefix), end = accounts.end();
it != accounts.end() && it->first.hasPrefix(prefix); ++it) {
if (maxDepth == -1 || it->first.size() <= maxDepth)
result.push_back(it->first);
}
return result;
}
void
forEachAccount(const std::function<void (const AccountKey &,
const Account &)>
& onAccount) const
{
Guard guard(lock);
for (auto & a: accounts) {
onAccount(a.first, a.second);
}
}
size_t size() const
{
Guard guard(lock);
return accounts.size();
}
bool empty() const
{
Guard guard(lock);
return accounts.empty();
}
/** Return a subtree of the accounts. */
Accounts getAccounts(const AccountKey & root, int maxDepth = 0)
{
Accounts result;
Guard guard(lock);
std::function<void (const AccountKey &, int, int)> doAccount
= [&] (const AccountKey & key, int depth, int maxDepth)
{
auto it = accounts.find(key);
if (it == accounts.end())
return;
result.ensureAccount(it->first, it->second.type) = it->second;
if (depth >= maxDepth)
return;
for (auto & k: it->second.children)
doAccount(k, depth + 1, maxDepth);
};
doAccount(root, 0, maxDepth);
return result;
}
private:
AccountInfo & ensureAccount(const AccountKey & accountKey,
AccountType type)
{
ExcAssertGreaterEqual(accountKey.size(), 1);
auto it = accounts.find(accountKey);
if (it != accounts.end()) {
ExcAssertEqual(it->second.type, type);
return it->second;
}
else {
if (accountKey.size() == 1) {
ExcAssertEqual(type, AT_BUDGET);
}
else {
AccountInfo & parent
= ensureAccount(accountKey.parent(), AT_BUDGET);
parent.children.insert(accountKey);
}
auto & result = accounts[accountKey];
result.type = type;
return result;
}
}
AccountInfo & getAccountImpl(const AccountKey & account)
{
auto it = accounts.find(account);
if (it == accounts.end())
throw ML::Exception("couldn't get account: " + account.toString());
return it->second;
}
std::pair<bool, bool> accountPresentAndActiveImpl(const AccountKey & account) const
{
auto it = accounts.find(account);
if (it == accounts.end())
return std::make_pair(false, false);
if (it->second.status == Account::CLOSED)
return std::make_pair(true, false);
else
return std::make_pair(true, true);
}
const Account closeAccountImpl(const AccountKey & accountKey)
{
AccountInfo & account = getAccountImpl(accountKey);
if (account.status == Account::CLOSED)
return account;
for ( AccountKey child : account.children ) {
closeAccountImpl(child);
}
if (accountKey.size() > 1)
account.recuperateTo(getParentAccount(accountKey));
account.status = Account::CLOSED;
return account;
}
void reactivateAccountChildren(const AccountKey & accountKey) {
if (accountPresentAndActiveImpl(accountKey).first) {
AccountInfo & account = getAccountImpl(accountKey);
for (auto child : account.children)
reactivateAccountChildren(child);
account.status = Account::ACTIVE;
}
}
const AccountInfo & getAccountImpl(const AccountKey & account) const
{
auto it = accounts.find(account);
if (it == accounts.end())
throw ML::Exception("couldn't get account: " + account.toString());
return it->second;
}
Account & getParentAccount(const AccountKey & accountKey)
{
if (accountKey.size() < 2)
throw ML::Exception("account has no parent");
AccountKey parentKey = accountKey;
parentKey.pop_back();
Account & result = getAccountImpl(parentKey);
ExcAssertEqual(result.type, AT_BUDGET);
return result;
}
void forEachChildAccount(const AccountKey & account,
std::function<void (const AccountKey & key)> cb) const
{
auto & info = getAccountImpl(account);
for (const AccountKey & ch: info.children)
cb(ch);
}
AccountSummary getAccountSummaryImpl(const AccountKey & account,
int depth, int maxDepth) const
{
AccountSummary result;
const Account & a = getAccountImpl(account);
result.account = a;
result.spent = a.spent;
result.budget = a.budgetIncreases - a.budgetDecreases;
result.effectiveBudget = a.budgetIncreases - a.budgetDecreases
+ a.recycledIn - a.recycledOut
+ a.allocatedIn - a.allocatedOut;
result.inFlight = a.commitmentsMade - a.commitmentsRetired;
result.adjustments = a.adjustmentsIn - a.adjustmentsOut;
auto doChildAccount = [&] (const AccountKey & key) {
auto childSummary = getAccountSummaryImpl(key, depth + 1,
maxDepth);
result.addChild(key.back(), childSummary,
maxDepth == -1 || depth < maxDepth);
};
forEachChildAccount(account, doChildAccount);
result.adjustedSpent = result.spent - result.adjustments;
result.available = (result.effectiveBudget - result.adjustedSpent - result.inFlight);
return result;
}
bool checkBudgetConsistencyImpl(const AccountKey & accountKey,
int maxRecursion, int currentLevel) const;
};
/*****************************************************************************/
/* SHADOW ACCOUNTS */
/*****************************************************************************/
struct ShadowAccounts {
/** Callback called whenever a new account is created. This can be
assigned to in order to add functionality that must be present
whenever a new account is created.
*/
std::function<void (AccountKey)> onNewAccount;
const ShadowAccount activateAccount(const AccountKey & account)
{
Guard guard(lock);
return getAccountImpl(account);
}
const ShadowAccount syncFromMaster(const AccountKey & account,
const Account & master)
{
Guard guard(lock);
auto & a = getAccountImpl(account);
ExcAssert(!a.uninitialized);
a.syncFromMaster(master);
return a;
}
/** Initialize an account by merging with the initial state as
received from the master banker.
*/
const ShadowAccount
initializeAndMergeState(const AccountKey & account,
const Account & master)
{
Guard guard(lock);
auto & a = getAccountImpl(account);
ExcAssert(a.uninitialized);
a.initializeAndMergeState(master);
a.uninitialized = false;
return a;
}
void checkInvariants() const
{
Guard guard(lock);
for (auto & a: accounts) {
a.second.checkInvariants();
}
}
const ShadowAccount getAccount(const AccountKey & accountKey) const
{
Guard guard(lock);
return getAccountImpl(accountKey);
}
bool accountExists(const AccountKey & accountKey) const
{
Guard guard(lock);
return accounts.count(accountKey);
}
bool createAccountAtomic(const AccountKey & accountKey)
{
Guard guard(lock);
AccountEntry & account = getAccountImpl(accountKey, false /* call onCreate */);
bool result = account.first;
// record that this account creation is requested for the first time
account.first = false;
return result;
}
/*************************************************************************/
/* SYNCHRONIZATION */
/*************************************************************************/
void syncTo(Accounts & master) const
{
Guard guard1(lock);
Guard guard2(master.lock);
for (auto & a: accounts)
a.second.syncToMaster(master.getAccountImpl(a.first));
}
void syncFrom(const Accounts & master)
{
Guard guard1(lock);
Guard guard2(master.lock);
for (auto & a: accounts) {
a.second.syncFromMaster(master.getAccountImpl(a.first));
if (master.outOfSyncAccounts.count(a.first) > 0) {
outOfSyncAccounts.insert(a.first);
}
}
}
void sync(Accounts & master)
{
Guard guard1(lock);
Guard guard2(master.lock);
for (auto & a: accounts) {
a.second.syncToMaster(master.getAccountImpl(a.first));
a.second.syncFromMaster(master.getAccountImpl(a.first));
}
}
bool isInitialized(const AccountKey & accountKey) const
{
Guard guard(lock);
return !getAccountImpl(accountKey).uninitialized;
}
bool isStalled(const AccountKey & accountKey) const
{
Guard guard(lock);
auto & account = getAccountImpl(accountKey);
return account.uninitialized && account.requested.minutesUntil(Date::now()) >= 1.0;
}
void reinitializeStalledAccount(const AccountKey & accountKey)
{
ExcAssert(isStalled(accountKey));
Guard guard(lock);
auto & account = getAccountImpl(accountKey);
account.first = true;
account.requested = Date::now();
}
/*************************************************************************/
/* BID OPERATIONS */
/*************************************************************************/
bool authorizeBid(const AccountKey & accountKey,
const std::string & item,
Amount amount)
{
Guard guard(lock);
return (outOfSyncAccounts.count(accountKey) == 0
&& getAccountImpl(accountKey).authorizeBid(item, amount));
}
void commitBid(const AccountKey & accountKey,
const std::string & item,
Amount amountPaid,
const LineItems & lineItems)
{
Guard guard(lock);
return getAccountImpl(accountKey).commitBid(item, amountPaid, lineItems);
}
void cancelBid(const AccountKey & accountKey,
const std::string & item)
{
Guard guard(lock);
return getAccountImpl(accountKey).cancelBid(item);
}
void forceWinBid(const AccountKey & accountKey,
Amount amountPaid,
const LineItems & lineItems)
{
Guard guard(lock);
return getAccountImpl(accountKey).forceWinBid(amountPaid, lineItems);
}
/// Commit a bid that has been detached from its tracking
void commitDetachedBid(const AccountKey & accountKey,
Amount amountAuthorized,
Amount amountPaid,
const LineItems & lineItems)
{
Guard guard(lock);
return getAccountImpl(accountKey)
.commitDetachedBid(amountAuthorized, amountPaid, lineItems);
}
/// Commit a specific currency (amountToCommit)
void commitEvent(const AccountKey & accountKey, const Amount & amountToCommit)
{
Guard guard(lock);
return getAccountImpl(accountKey).commitEvent(amountToCommit);
}
Amount detachBid(const AccountKey & accountKey,
const std::string & item)
{
Guard guard(lock);
return getAccountImpl(accountKey).detachBid(item);
}
void attachBid(const AccountKey & accountKey,
const std::string & item,
Amount amountAuthorized)
{
Guard guard(lock);
getAccountImpl(accountKey).attachBid(item, amountAuthorized);
}
void logBidEvents(const Datacratic::EventRecorder & eventRecorder);
private:
struct AccountEntry : public ShadowAccount {
AccountEntry(bool uninitialized = true, bool first = true)
: requested(Date::now()), uninitialized(uninitialized), first(first)
{
}
/** This flag marks that the shadow account has been created, but
it has never had its state read from the master banker. In this
case we will need to merge anything that was done to the current
account with the initial state from the master banker to obtain
the new state.
This is the *only* case in which both the master banker and the
slave banker can both have different ideas of the state of the
budget of an account.
*/
Date requested;
bool uninitialized;
bool first;
};
AccountEntry & getAccountImpl(const AccountKey & account,
bool callOnNewAccount = true)
{
auto it = accounts.find(account);
if (it == accounts.end()) {
if (callOnNewAccount && onNewAccount)
onNewAccount(account);
it = accounts.insert(std::make_pair(account, AccountEntry()))
.first;
}
return it->second;
}
const AccountEntry & getAccountImpl(const AccountKey & account) const
{
auto it = accounts.find(account);
if (it == accounts.end())
throw ML::Exception("getting unknown account " + account.toString());
return it->second;
}
typedef ML::Spinlock Lock;
typedef std::unique_lock<Lock> Guard;
mutable Lock lock;
typedef std::map<AccountKey, AccountEntry> AccountMap;
AccountMap accounts;
typedef std::unordered_set<AccountKey> AccountSet;
AccountSet outOfSyncAccounts;
public:
std::vector<AccountKey>
getAccountKeys(const AccountKey & prefix = AccountKey()) const
{
Guard guard(lock);
std::vector<AccountKey> result;
for (auto it = accounts.lower_bound(prefix), end = accounts.end();
it != accounts.end() && it->first.hasPrefix(prefix); ++it) {
result.push_back(it->first);
}
return result;
}
void
forEachAccount(const std::function<void (const AccountKey &,
const ShadowAccount &)> &
onAccount) const
{
Guard guard(lock);
for (auto & a: accounts) {
onAccount(a.first, a.second);
}
}
void
forEachInitializedAndActiveAccount(const std::function<void (const AccountKey &,
const ShadowAccount &)> & onAccount)
{
Guard guard(lock);
for (auto & a: accounts) {
if (a.second.uninitialized || a.second.status == Account::CLOSED)
continue;
onAccount(a.first, a.second);
}
}
size_t size() const
{
Guard guard(lock);
return accounts.size();
}
bool empty() const
{
Guard guard(lock);
return accounts.empty();
}
};
} // namespace RTBKIT
| 23,530 |
645 | <gh_stars>100-1000
// Copyright 2013-2016 Stanford University
//
// Licensed under the Apache License, Version 2.0 (the License);
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an AS IS BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdint.h>
uint64_t mont_mul(uint64_t c1, uint64_t np, uint32_t ml, uint32_t mh, uint64_t c0)
{
uint64_t l,h;
h= (np);
l=((h)&(0xffffffffL));
h=(((h)>>32)&(0xffffffffL));
{
uint64_t m,m1,lt,ht;
lt=l;
ht=h;
m =((mh))*(lt);
lt=((ml))*(lt);
m1=((ml))*(ht);
ht =((mh))*(ht);
m=(m+m1)&(0xffffffffffffffffL);
if (m < m1)
ht+=((((uint64_t)1)<<32)&(0xffffffffffffffffL));
ht+=(((m)>>32)&(0xffffffffL));
m1=(((m)<<32)&(0xffffffffffffffffL));
lt=(lt+m1)&(0xffffffffffffffffL);
if (lt < m1)
ht++;
(l)=lt;
(h)=ht;
};
l=(l+(c0))&(0xffffffffffffffffL);
if (l < (c0))
h++;
(c0)=(c1);
l=(l+(c0))&(0xffffffffffffffffL);
if (l < (c0))
h++;
(c0)=h&(0xffffffffffffffffL);
(c1)=l;
return c0 ^ c1;
}
| 644 |
335 | {
"word": "School",
"definitions": [
"Send to school; educate.",
"Train or discipline (someone) in a particular skill or activity.",
"Train (a horse) on the flat or over fences."
],
"parts-of-speech": "Verb"
} | 100 |
834 | // Copyright 2004-present Facebook. All Rights Reserved.
#include "fboss/agent/platforms/sai/SaiBcmWedge40PlatformPort.h"
namespace facebook::fboss {
void SaiBcmWedge40PlatformPort::linkStatusChanged(
bool /*up*/,
bool /*adminUp*/) {}
} // namespace facebook::fboss
| 94 |
503 | package com.smartisanos.sidebar.view;
import java.io.File;
import com.smartisanos.sidebar.R;
import com.smartisanos.sidebar.util.FileInfo;
import com.smartisanos.sidebar.util.Tracker;
import com.smartisanos.sidebar.util.Utils;
import android.content.Context;
import android.content.res.Configuration;
import android.util.AttributeSet;
import android.view.LayoutInflater;
import android.view.View;
import android.view.onestep.OneStepDragUtils;
import android.widget.ImageView;
import android.widget.LinearLayout;
import android.widget.TextView;
public class RecentFileItemView extends LinearLayout {
private TextView mDateText;
private View mFileItemGroup;
private ImageView mIcon;
private TextView mFileName;
private TextView mMoreLabel;
public RecentFileItemView(Context context) {
this(context, null);
}
public RecentFileItemView(Context context, AttributeSet attrs) {
this(context, attrs, 0);
}
public RecentFileItemView(Context context, AttributeSet attrs,
int defStyleAttr) {
this(context, attrs, defStyleAttr, 0);
}
public RecentFileItemView(Context context, AttributeSet attrs,
int defStyleAttr, int defStyleRes) {
super(context, attrs, defStyleAttr, defStyleRes);
//set layout
setOrientation(LinearLayout.VERTICAL);
LayoutInflater.from(context).inflate(R.layout.recent_file_item, this, true);
// find view
mDateText = (TextView) findViewById(R.id.date_content);
mFileItemGroup = findViewById(R.id.recent_file_item);
mFileName = (TextView) findViewById(R.id.file_name);
mIcon = (ImageView) findViewById(R.id.file_icon);
mMoreLabel = (TextView) findViewById(R.id.more_label);
}
@Override
protected void onConfigurationChanged(Configuration newConfig) {
super.onConfigurationChanged(newConfig);
mMoreLabel.setText(R.string.load_more);
}
public void reset() {
mMoreLabel.setVisibility(View.GONE);
mDateText.setVisibility(View.GONE);
mFileItemGroup.setVisibility(View.GONE);
setOnClickListener(null);
setOnLongClickListener(null);
}
public void showItem(final FileInfo info) {
mFileName.setText(new File(info.filePath).getName());
mIcon.setImageResource(info.getIconId());
mFileItemGroup.setVisibility(View.VISIBLE);
setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Utils.openFile(mContext, info);
Tracker.onClick(Tracker.EVENT_OPEN_DOC);
}
});
setOnLongClickListener(new View.OnLongClickListener() {
@Override
public boolean onLongClick(View v) {
OneStepDragUtils.dragFile(v, mContext, new File(info.filePath), info.mimeType);
return true;
}
});
}
public void showDate(int resId) {
mDateText.setText(resId);
mDateText.setVisibility(View.VISIBLE);
}
public void showMoreTag(View.OnClickListener listener) {
mMoreLabel.setVisibility(View.VISIBLE);
setOnClickListener(listener);
}
}
| 1,331 |
1,934 | //
// JotTouchBezier.h
// jot
//
// Created by <NAME> on 4/30/15.
//
//
#import <UIKit/UIKit.h>
#import <Foundation/Foundation.h>
/**
* Private class to handle drawing variable-width cubic bezier paths in a JotDrawView.
*/
@interface JotTouchBezier : NSObject
/**
* The start point of the cubic bezier path.
*/
@property (nonatomic, assign) CGPoint startPoint;
/**
* The end point of the cubic bezier path.
*/
@property (nonatomic, assign) CGPoint endPoint;
/**
* The first control point of the cubic bezier path.
*/
@property (nonatomic, assign) CGPoint controlPoint1;
/**
* The second control point of the cubic bezier path.
*/
@property (nonatomic, assign) CGPoint controlPoint2;
/**
* The starting width of the cubic bezier path.
*/
@property (nonatomic, assign) CGFloat startWidth;
/**
* The ending width of the cubic bezier path.
*/
@property (nonatomic, assign) CGFloat endWidth;
/**
* The stroke color of the cubic bezier path.
*/
@property (nonatomic, strong) UIColor *strokeColor;
/**
* YES if the line is a constant width, NO if variable width.
*/
@property (nonatomic, assign) BOOL constantWidth;
/**
* Returns an instance of JotTouchBezier with the given stroke color.
*
* @param color The color to use for drawing the bezier path.
*
* @return An instance of JotTouchBezier
*/
+ (instancetype)withColor:(UIColor *)color;
/**
* Draws the JotTouchBezier in the current graphics context, using the
* strokeColor and transitioning from the start width to the end width
* along the length of the curve.
*/
- (void)jotDrawBezier;
/**
* Draws a single circle at the given point in the current graphics context,
* using the current fillColor of the context and the given width.
*
* @param point The CGPoint to use as the center of the circle to be drawn.
* @param width The diameter of the circle to be drawn at the given point.
*/
+ (void)jotDrawBezierPoint:(CGPoint)point withWidth:(CGFloat)width;
@end
| 650 |
1,085 | <filename>lib/galaxy/model/migrate/versions/0091_add_tool_version_tables.py
"""
Migration script to create the tool_version and tool_version_association tables and drop the tool_id_guid_map table.
"""
import datetime
import logging
from json import loads
from sqlalchemy import (
Column,
DateTime,
ForeignKey,
Index,
Integer,
MetaData,
String,
Table,
TEXT
)
from galaxy.model.custom_types import (
_sniffnfix_pg9_hex,
TrimmedString
)
from galaxy.model.migrate.versions.util import (
localtimestamp,
nextval
)
log = logging.getLogger(__name__)
now = datetime.datetime.utcnow
metadata = MetaData()
ToolVersion_table = Table("tool_version", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("tool_id", String(255)),
Column("tool_shed_repository_id", Integer, ForeignKey("tool_shed_repository.id"), index=True, nullable=True))
ToolVersionAssociation_table = Table("tool_version_association", metadata,
Column("id", Integer, primary_key=True),
Column("tool_id", Integer, ForeignKey("tool_version.id"), index=True, nullable=False),
Column("parent_id", Integer, ForeignKey("tool_version.id"), index=True, nullable=False))
def upgrade(migrate_engine):
print(__doc__)
metadata.bind = migrate_engine
metadata.reflect()
# Create the tables.
try:
ToolVersion_table.create()
except Exception:
log.exception("Creating tool_version table failed.")
try:
ToolVersionAssociation_table.create()
except Exception:
log.exception("Creating tool_version_association table failed.")
# Populate the tool table with tools included in installed tool shed repositories.
cmd = "SELECT id, metadata FROM tool_shed_repository"
result = migrate_engine.execute(cmd)
count = 0
for row in result:
if row[1]:
tool_shed_repository_id = row[0]
repository_metadata = loads(_sniffnfix_pg9_hex(str(row[1])))
# Create a new row in the tool table for each tool included in repository. We will NOT
# handle tool_version_associaions because we do not have the information we need to do so.
tools = repository_metadata.get('tools', [])
for tool_dict in tools:
cmd = "INSERT INTO tool_version VALUES (%s, %s, %s, '%s', %s)" % \
(nextval(migrate_engine, 'tool_version'), localtimestamp(migrate_engine), localtimestamp(migrate_engine), tool_dict['guid'], tool_shed_repository_id)
migrate_engine.execute(cmd)
count += 1
print("Added %d rows to the new tool_version table." % count)
# Drop the tool_id_guid_map table since the 2 new tables render it unnecessary.
ToolIdGuidMap_table = Table("tool_id_guid_map", metadata, autoload=True)
try:
ToolIdGuidMap_table.drop()
except Exception:
log.exception("Dropping tool_id_guid_map table failed.")
def downgrade(migrate_engine):
metadata.bind = migrate_engine
ToolIdGuidMap_table = Table(
"tool_id_guid_map", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("tool_id", String(255)),
Column("tool_version", TEXT),
Column("tool_shed", TrimmedString(255)),
Column("repository_owner", TrimmedString(255)),
Column("repository_name", TrimmedString(255)),
Column("guid", TEXT),
Index('ix_tool_id_guid_map_guid', 'guid', unique=True, mysql_length=200),
)
metadata.reflect()
try:
ToolVersionAssociation_table.drop()
except Exception:
log.exception("Dropping tool_version_association table failed.")
try:
ToolVersion_table.drop()
except Exception:
log.exception("Dropping tool_version table failed.")
try:
ToolIdGuidMap_table.create()
except Exception:
log.exception("Creating tool_id_guid_map table failed.")
| 1,604 |
335 | {
"word": "Pressing",
"definitions": [
"An act or instance of applying force or weight to something.",
"A record or other object made by the application of force or weight.",
"A series of objects pressed at one time."
],
"parts-of-speech": "Noun"
} | 105 |
301 | package org.iotivity.cloud.base.healthcheck;
public interface HealthHolder {
boolean isHealthy();
void pingAccepted();
}
| 43 |
5,079 | <reponame>Yurzs/boto<gh_stars>1000+
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from tests.compat import unittest
from tests.unit import AWSMockServiceTestCase
from tests.unit import MockServiceWithConfigTestCase
from boto.s3.connection import S3Connection, HostRequiredError
from boto.s3.connection import S3ResponseError, Bucket
class TestSignatureAlteration(AWSMockServiceTestCase):
connection_class = S3Connection
def test_unchanged(self):
self.assertEqual(
self.service_connection._required_auth_capability(),
['s3']
)
def test_switched(self):
conn = self.connection_class(
aws_access_key_id='less',
aws_secret_access_key='more',
host='s3.cn-north-1.amazonaws.com.cn'
)
self.assertEqual(
conn._required_auth_capability(),
['hmac-v4-s3']
)
class TestPresigned(MockServiceWithConfigTestCase):
connection_class = S3Connection
def test_presign_respect_query_auth(self):
self.config = {
's3': {
'use-sigv4': False,
}
}
conn = self.connection_class(
aws_access_key_id='less',
aws_secret_access_key='more',
host='s3.amazonaws.com'
)
url_enabled = conn.generate_url(86400, 'GET', bucket='examplebucket',
key='test.txt', query_auth=True)
url_disabled = conn.generate_url(86400, 'GET', bucket='examplebucket',
key='test.txt', query_auth=False)
self.assertIn('Signature=', url_enabled)
self.assertNotIn('Signature=', url_disabled)
class TestSigV4HostError(MockServiceWithConfigTestCase):
connection_class = S3Connection
def test_historical_behavior(self):
self.assertEqual(
self.service_connection._required_auth_capability(),
['s3']
)
self.assertEqual(self.service_connection.host, 's3.amazonaws.com')
def test_sigv4_opt_in(self):
# Switch it at the config, so we can check to see how the host is
# handled.
self.config = {
's3': {
'use-sigv4': True,
}
}
with self.assertRaises(HostRequiredError):
# No host+SigV4 == KABOOM
self.connection_class(
aws_access_key_id='less',
aws_secret_access_key='more'
)
# Ensure passing a ``host`` still works.
conn = self.connection_class(
aws_access_key_id='less',
aws_secret_access_key='more',
host='s3.cn-north-1.amazonaws.com.cn'
)
self.assertEqual(
conn._required_auth_capability(),
['hmac-v4-s3']
)
self.assertEqual(
conn.host,
's3.cn-north-1.amazonaws.com.cn'
)
class TestSigV4Presigned(MockServiceWithConfigTestCase):
connection_class = S3Connection
def test_sigv4_presign(self):
self.config = {
's3': {
'use-sigv4': True,
}
}
conn = self.connection_class(
aws_access_key_id='less',
aws_secret_access_key='more',
host='s3.amazonaws.com'
)
# Here we force an input iso_date to ensure we always get the
# same signature.
url = conn.generate_url_sigv4(86400, 'GET', bucket='examplebucket',
key='test.txt',
iso_date='20140625T000000Z')
self.assertIn(
'a937f5fbc125d98ac8f04c49e0204ea1526a7b8ca058000a54c192457be05b7d',
url)
def test_sigv4_presign_respects_is_secure(self):
self.config = {
's3': {
'use-sigv4': True,
}
}
conn = self.connection_class(
aws_access_key_id='less',
aws_secret_access_key='more',
host='s3.amazonaws.com',
is_secure=True,
)
url = conn.generate_url_sigv4(86400, 'GET', bucket='examplebucket',
key='test.txt')
self.assertTrue(url.startswith(
'https://examplebucket.s3.amazonaws.com/test.txt?'))
conn = self.connection_class(
aws_access_key_id='less',
aws_secret_access_key='more',
host='s3.amazonaws.com',
is_secure=False,
)
url = conn.generate_url_sigv4(86400, 'GET', bucket='examplebucket',
key='test.txt')
self.assertTrue(url.startswith(
'http://examplebucket.s3.amazonaws.com/test.txt?'))
def test_sigv4_presign_optional_params(self):
self.config = {
's3': {
'use-sigv4': True,
}
}
conn = self.connection_class(
aws_access_key_id='less',
aws_secret_access_key='more',
security_token='token',
host='s3.amazonaws.com'
)
url = conn.generate_url_sigv4(86400, 'GET', bucket='examplebucket',
key='test.txt', version_id=2)
self.assertIn('VersionId=2', url)
self.assertIn('X-Amz-Security-Token=token', url)
def test_sigv4_presign_respect_query_auth(self):
self.config = {
's3': {
'use-sigv4': True,
}
}
conn = self.connection_class(
aws_access_key_id='less',
aws_secret_access_key='more',
host='s3.amazonaws.com'
)
url_enabled = conn.generate_url(86400, 'GET', bucket='examplebucket',
key='test.txt', query_auth=True)
url_disabled = conn.generate_url(86400, 'GET', bucket='examplebucket',
key='test.txt', query_auth=False)
self.assertIn('Signature=', url_enabled)
self.assertNotIn('Signature=', url_disabled)
def test_sigv4_presign_headers(self):
self.config = {
's3': {
'use-sigv4': True,
}
}
conn = self.connection_class(
aws_access_key_id='less',
aws_secret_access_key='more',
host='s3.amazonaws.com'
)
headers = {'x-amz-meta-key': 'val'}
url = conn.generate_url_sigv4(86400, 'GET', bucket='examplebucket',
key='test.txt', headers=headers)
self.assertIn('host', url)
self.assertIn('x-amz-meta-key', url)
def test_sigv4_presign_response_headers(self):
self.config = {
's3': {
'use-sigv4': True,
}
}
conn = self.connection_class(
aws_access_key_id='less',
aws_secret_access_key='more',
host='s3.amazonaws.com'
)
response_headers = {'response-content-disposition': 'attachment; filename="file.ext"'}
url = conn.generate_url_sigv4(86400, 'GET', bucket='examplebucket',
key='test.txt', response_headers=response_headers)
self.assertIn('host', url)
self.assertIn('response-content-disposition', url)
class TestUnicodeCallingFormat(AWSMockServiceTestCase):
connection_class = S3Connection
def default_body(self):
return """<?xml version="1.0" encoding="UTF-8"?>
<ListAllMyBucketsResult xmlns="http://doc.s3.amazonaws.com/2006-03-01">
<Owner>
<ID>bcaf1ffd86f461ca5fb16fd081034f</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<Buckets>
<Bucket>
<Name>quotes</Name>
<CreationDate>2006-02-03T16:45:09.000Z</CreationDate>
</Bucket>
<Bucket>
<Name>samples</Name>
<CreationDate>2006-02-03T16:41:58.000Z</CreationDate>
</Bucket>
</Buckets>
</ListAllMyBucketsResult>"""
def create_service_connection(self, **kwargs):
kwargs['calling_format'] = u'boto.s3.connection.OrdinaryCallingFormat'
return super(TestUnicodeCallingFormat,
self).create_service_connection(**kwargs)
def test_unicode_calling_format(self):
self.set_http_response(status_code=200)
self.service_connection.get_all_buckets()
class TestHeadBucket(AWSMockServiceTestCase):
connection_class = S3Connection
def default_body(self):
# HEAD requests always have an empty body.
return ""
def test_head_bucket_success(self):
self.set_http_response(status_code=200)
buck = self.service_connection.head_bucket('my-test-bucket')
self.assertTrue(isinstance(buck, Bucket))
self.assertEqual(buck.name, 'my-test-bucket')
def test_head_bucket_forbidden(self):
self.set_http_response(status_code=403)
with self.assertRaises(S3ResponseError) as cm:
self.service_connection.head_bucket('cant-touch-this')
err = cm.exception
self.assertEqual(err.status, 403)
self.assertEqual(err.error_code, 'AccessDenied')
self.assertEqual(err.message, 'Access Denied')
def test_head_bucket_notfound(self):
self.set_http_response(status_code=404)
with self.assertRaises(S3ResponseError) as cm:
self.service_connection.head_bucket('totally-doesnt-exist')
err = cm.exception
self.assertEqual(err.status, 404)
self.assertEqual(err.error_code, 'NoSuchBucket')
self.assertEqual(err.message, 'The specified bucket does not exist')
def test_head_bucket_other(self):
self.set_http_response(status_code=405)
with self.assertRaises(S3ResponseError) as cm:
self.service_connection.head_bucket('you-broke-it')
err = cm.exception
self.assertEqual(err.status, 405)
# We don't have special-cases for this error status.
self.assertEqual(err.error_code, None)
self.assertEqual(err.message, '')
if __name__ == "__main__":
unittest.main()
| 5,408 |
521 | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is mozilla.org code.
*
* The Initial Developer of the Original Code is
* Netscape Communications Corporation.
* Portions created by the Initial Developer are Copyright (C) 1998
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#ifndef nsFileStreams_h__
#define nsFileStreams_h__
#include "nsIFileStreams.h"
#include "nsIFile.h"
#include "nsIInputStream.h"
#include "nsIOutputStream.h"
#include "nsISeekableStream.h"
#include "nsILineInputStream.h"
#include "nsCOMPtr.h"
#include "prlog.h"
#include "prio.h"
template<class CharType> class nsLineBuffer;
////////////////////////////////////////////////////////////////////////////////
class nsFileStream : public nsISeekableStream
{
public:
NS_DECL_ISUPPORTS
NS_DECL_NSISEEKABLESTREAM
nsFileStream();
virtual ~nsFileStream();
nsresult Close();
nsresult InitWithFileDescriptor(PRFileDesc* fd, nsISupports* parent);
protected:
PRFileDesc* mFD;
nsCOMPtr<nsISupports> mParent; // strong reference to parent nsFileIO,
// which ensures mFD remains valid.
PRBool mCloseFD;
};
////////////////////////////////////////////////////////////////////////////////
class nsFileInputStream : public nsFileStream,
public nsIFileInputStream,
public nsILineInputStream
{
public:
NS_DECL_ISUPPORTS_INHERITED
NS_DECL_NSIINPUTSTREAM
NS_DECL_NSIFILEINPUTSTREAM
NS_DECL_NSILINEINPUTSTREAM
// Overrided from nsFileStream
NS_IMETHOD Seek(PRInt32 aWhence, PRInt64 aOffset);
nsFileInputStream() : nsFileStream()
{
mBehaviorFlags = 0;
}
virtual ~nsFileInputStream()
{
Close();
}
static NS_METHOD
Create(nsISupports *aOuter, REFNSIID aIID, void **aResult);
protected:
/**
* The file being opened. Only stored when DELETE_ON_CLOSE or
* REOPEN_ON_REWIND are true.
*/
nsCOMPtr<nsIFile> mFile;
/**
* The IO flags passed to Init() for the file open.
* Only set for REOPEN_ON_REWIND.
*/
PRInt32 mIOFlags;
/**
* The permissions passed to Init() for the file open.
* Only set for REOPEN_ON_REWIND.
*/
PRInt32 mPerm;
/**
* Flags describing our behavior. See the IDL file for possible values.
*/
PRInt32 mBehaviorFlags;
protected:
/**
* Internal, called to open a file. Parameters are the same as their
* Init() analogues.
*/
nsresult Open(nsIFile* file, PRInt32 ioFlags, PRInt32 perm);
/**
* Reopen the file (for OPEN_ON_READ only!)
*/
nsresult Reopen() { return Open(mFile, mIOFlags, mPerm); }
};
////////////////////////////////////////////////////////////////////////////////
class nsFileOutputStream : public nsFileStream,
public nsIFileOutputStream
{
public:
NS_DECL_ISUPPORTS_INHERITED
NS_DECL_NSIOUTPUTSTREAM
NS_DECL_NSIFILEOUTPUTSTREAM
nsFileOutputStream() : nsFileStream() {}
virtual ~nsFileOutputStream() { nsFileOutputStream::Close(); }
static NS_METHOD
Create(nsISupports *aOuter, REFNSIID aIID, void **aResult);
};
////////////////////////////////////////////////////////////////////////////////
#endif // nsFileStreams_h__
| 1,677 |
835 | <gh_stars>100-1000
package ai.verta.modeldb.config;
import ai.verta.modeldb.common.config.ArtifactStoreConfig;
import ai.verta.modeldb.common.config.Config;
import ai.verta.modeldb.common.config.InvalidConfigException;
import ai.verta.modeldb.common.config.S3Config;
import ai.verta.modeldb.common.exceptions.ModelDBException;
import com.google.rpc.Code;
public class MDBArtifactStoreConfig extends ArtifactStoreConfig {
public S3Config S3;
public void Validate(String base) throws InvalidConfigException {
if (getArtifactStoreType() == null || getArtifactStoreType().isEmpty())
throw new InvalidConfigException(base + ".artifactStoreType", Config.MISSING_REQUIRED);
switch (getArtifactStoreType()) {
case "S3":
if (S3 == null) throw new InvalidConfigException(base + ".S3", Config.MISSING_REQUIRED);
S3.Validate(base + ".S3");
break;
case "NFS":
if (getNFS() == null)
throw new InvalidConfigException(base + ".NFS", Config.MISSING_REQUIRED);
getNFS().Validate(base + ".NFS");
break;
default:
throw new InvalidConfigException(
base + ".artifactStoreType", "unknown type " + getArtifactStoreType());
}
if (getArtifactEndpoint() != null) {
getArtifactEndpoint().Validate(base + ".artifactEndpoint");
}
}
@Override
public String storeTypePathPrefix() {
switch (getArtifactStoreType()) {
case "S3":
return S3.storeTypePathPrefix();
case "NFS":
return getNFS().storeTypePathPrefix();
default:
throw new ModelDBException("Unknown artifact store type", Code.INTERNAL);
}
}
@Override
public String getPathPrefixWithSeparator() {
switch (getArtifactStoreType()) {
case "S3":
return S3.getCloudBucketPrefix();
case "NFS":
return getNFS().getNfsPathPrefix();
default:
return "";
}
}
}
| 764 |
1,756 | #include "com_libmailcore_AbstractMessagePart.h"
#include "TypesUtils.h"
#include "JavaHandle.h"
#include "MCMessageHeader.h"
#include "MCAbstractMessagePart.h"
using namespace mailcore;
#define nativeType AbstractMessagePart
#define javaType nativeType
MC_JAVA_SYNTHESIZE(MessageHeader, setHeader, header)
MC_JAVA_SYNTHESIZE(AbstractPart, setMainPart, mainPart)
| 125 |
764 | <filename>erc20/0x63D0eEa1D7C0d1e89d7e665708d7e8997C0a9eD6.json
{"symbol": "ENOL","address": "0x63D0eEa1D7C0d1e89d7e665708d7e8997C0a9eD6","overview":{"en": ""},"email": "","website": "https://ethanoltoken.com","state": "NORMAL","links": {"blog": "","twitter": "https://twitter.com/TokenEthanol","telegram": "","github": ""}} | 149 |
521 | /* $Id: QIArrowButtonSwitch.cpp $ */
/** @file
* VBox Qt GUI - QIArrowButtonSwitch class implementation.
*/
/*
* Copyright (C) 2006-2017 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
#ifdef VBOX_WITH_PRECOMPILED_HEADERS
# include <precomp.h>
#else /* !VBOX_WITH_PRECOMPILED_HEADERS */
/* Qt includes: */
# include <QKeyEvent>
/* GUI includes: */
# include "QIArrowButtonSwitch.h"
#endif /* !VBOX_WITH_PRECOMPILED_HEADERS */
QIArrowButtonSwitch::QIArrowButtonSwitch(QWidget *pParent /* = 0 */)
: QIRichToolButton(pParent)
, m_fExpanded(false)
{
/* Update icon: */
updateIcon();
}
void QIArrowButtonSwitch::setIcons(const QIcon &iconCollapsed, const QIcon &iconExpanded)
{
/* Assign icons: */
m_iconCollapsed = iconCollapsed;
m_iconExpanded = iconExpanded;
/* Update icon: */
updateIcon();
}
void QIArrowButtonSwitch::setExpanded(bool fExpanded)
{
/* Set button state: */
m_fExpanded = fExpanded;
/* Update icon: */
updateIcon();
}
void QIArrowButtonSwitch::sltButtonClicked()
{
/* Toggle button state: */
m_fExpanded = !m_fExpanded;
/* Update icon: */
updateIcon();
}
void QIArrowButtonSwitch::keyPressEvent(QKeyEvent *pEvent)
{
/* Handle different keys: */
switch (pEvent->key())
{
/* Animate-click for the Space key: */
case Qt::Key_Minus: if (m_fExpanded) return animateClick(); break;
case Qt::Key_Plus: if (!m_fExpanded) return animateClick(); break;
default: break;
}
/* Call to base-class: */
QIRichToolButton::keyPressEvent(pEvent);
}
| 744 |
685 | <reponame>adi-g15/LemonOS
#pragma once
#include <Video/VideoConsole.h>
#include <stdarg.h>
#include <Debug.h>
namespace Log{
extern VideoConsole* console;
void LateInitialize();
void SetVideoConsole(VideoConsole* con);
void DisableBuffer();
void EnableBuffer();
void WriteF(const char* __restrict format, va_list args);
void Write(const char* str, uint8_t r = 255, uint8_t g = 255, uint8_t b = 255);
void Write(unsigned long long num, bool hex = true, uint8_t r = 255, uint8_t g = 255, uint8_t b = 255);
void Print(const char* __restrict fmt, ...);
//void Warning(const char* str);
void Warning(unsigned long long num);
void Warning(const char* __restrict fmt, ...);
//void Error(const char* str);
void Error(unsigned long long num, bool hex = true);
void Error(const char* __restrict fmt, ...);
//void Info(const char* str);
void Info(unsigned long long num, bool hex = true);
void Info(const char* __restrict fmt, ...);
#ifdef KERNEL_DEBUG
__attribute__((always_inline)) inline static void Debug(const int& var, const int lvl, const char* __restrict fmt, ...){
if(var >= lvl){
Write("\r\n[INFO] ");
va_list args;
va_start(args, fmt);
WriteF(fmt, args);
va_end(args);
}
}
#else
__attribute__ ((unused, always_inline)) inline static void Debug(...){}
#endif
}
| 571 |
676 | package com.alorma.github.ui.fragment.donate;
import android.os.Parcel;
import android.os.Parcelable;
import java.text.DecimalFormat;
public class DonateItem implements Parcelable {
private String sku;
private double quantity;
public DonateItem(String sku, double quantity) {
this.sku = sku;
this.quantity = quantity;
}
public String getSku() {
return sku;
}
public double getQuantity() {
return quantity;
}
@Override
public String toString() {
String euro = "\u20ac";
DecimalFormat df = new DecimalFormat("#.00");
return df.format(quantity) + " " + euro;
}
@Override
public int describeContents() {
return 0;
}
@Override
public void writeToParcel(Parcel dest, int flags) {
dest.writeString(this.sku);
dest.writeDouble(this.quantity);
}
protected DonateItem(Parcel in) {
this.sku = in.readString();
this.quantity = in.readDouble();
}
public static final Parcelable.Creator<DonateItem> CREATOR = new Parcelable.Creator<DonateItem>() {
@Override
public DonateItem createFromParcel(Parcel source) {
return new DonateItem(source);
}
@Override
public DonateItem[] newArray(int size) {
return new DonateItem[size];
}
};
}
| 455 |
4,054 | <gh_stars>1000+
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
#include <memory>
namespace proton {
class ProtonConfigSnapshot;
/*
* Interface class to handle config changes to proton using config
* snapshots spanning all document types.
*/
class IProtonConfigurer
{
public:
virtual ~IProtonConfigurer() { }
virtual void reconfigure(std::shared_ptr<ProtonConfigSnapshot> configSnapshot) = 0;
};
} // namespace proton
| 152 |
2,824 | <filename>spine-libgdx/spine-libgdx/src/com/esotericsoftware/spine/ConstraintData.java
/******************************************************************************
* Spine Runtimes License Agreement
* Last updated January 1, 2020. Replaces all prior versions.
*
* Copyright (c) 2013-2020, Esoteric Software LLC
*
* Integration of the Spine Runtimes into software or otherwise creating
* derivative works of the Spine Runtimes is permitted under the terms and
* conditions of Section 2 of the Spine Editor License Agreement:
* http://esotericsoftware.com/spine-editor-license
*
* Otherwise, it is permitted to integrate the Spine Runtimes into software
* or otherwise create derivative works of the Spine Runtimes (collectively,
* "Products"), provided that each user of the Products must obtain their own
* Spine Editor license and redistribution of the Products in any form must
* include this license and copyright notice.
*
* THE SPINE RUNTIMES ARE PROVIDED BY ESOTERIC SOFTWARE LLC "AS IS" AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL ESOTERIC SOFTWARE LLC BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES,
* BUSINESS INTERRUPTION, OR LOSS OF USE, DATA, OR PROFITS) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THE SPINE RUNTIMES, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
package com.esotericsoftware.spine;
/** The base class for all constraint datas. */
abstract public class ConstraintData {
final String name;
int order;
boolean skinRequired;
public ConstraintData (String name) {
if (name == null) throw new IllegalArgumentException("name cannot be null.");
this.name = name;
}
/** The constraint's name, which is unique across all constraints in the skeleton of the same type. */
public String getName () {
return name;
}
/** The ordinal of this constraint for the order a skeleton's constraints will be applied by
* {@link Skeleton#updateWorldTransform()}. */
public int getOrder () {
return order;
}
public void setOrder (int order) {
this.order = order;
}
/** When true, {@link Skeleton#updateWorldTransform()} only updates this constraint if the {@link Skeleton#getSkin()} contains
* this constraint.
* <p>
* See {@link Skin#getConstraints()}. */
public boolean getSkinRequired () {
return skinRequired;
}
public void setSkinRequired (boolean skinRequired) {
this.skinRequired = skinRequired;
}
public String toString () {
return name;
}
}
| 818 |
953 | int main() {
struct A *q;
struct A {
int a_int_one;
struct B {
int b_int_one;
long b_long;
int b_int_two;
} b_struct;
int a_int_two, *a_ptr;
int;
} a;
q = &a;
void* p1 = q + 1;
char* p2 = p1;
// this is a hacky test to check sizeof(struct A)
void* p3 = p2 - 8*4;
if(p3 != q) return 1;
struct {};
struct I {} b;
if(&b != (&b + 1)) return 2;
//////////////////////////
a.a_int_one = 10;
if(a.a_int_one != 10) return 3;
a.a_ptr = &a.a_int_one;
*a.a_ptr = 20;
if(a.a_int_one != 20) return 4;
q = &a;
(*q).a_int_two = 15;
if(a.a_int_two != 15) return 5;
if(q->a_int_two != 15) return 11;
p1 = q;
p3 = &a.a_int_one;
if(p1 != p3) return 6;
a.b_struct.b_long = 10;
if(a.b_struct.b_long != 10) return 7;
if((*(&a.b_struct)).b_long != 10) return 8;
if((&a.b_struct)->b_long != 10) return 12;
long* p_val = &a.b_struct.b_long;
if(*p_val != 10) return 9;
*p_val = 20;
if(a.b_struct.b_long != 20) return 10;
struct A array[10];
array[3].b_struct.b_int_one = 3;
if(array[3].b_struct.b_int_one != 3) return 13;
if((&array[0] + 3)->b_struct.b_int_one != 3) return 14;
// Check with array members
struct F {
int array[10];
};
struct F array2[10];
array2[5].array[5] = 3;
if(array2[5].array[5] != 3) return 15;
// Check anonymous struct
struct {
int a;
} s;
s.a = 3;
if(s.a != 3) return 16;
// Check with union members
struct C {
int c_int;
union D {
int d_int;
long d_long;
} nested_union_d;
union E {
int e_int;
} nested_union_e;
};
}
| 789 |
348 | <reponame>chamberone/Leaflet.PixiOverlay<gh_stars>100-1000
{"nom":"Visan","circ":"4ème circonscription","dpt":"Vaucluse","inscrits":1538,"abs":793,"votants":745,"blancs":11,"nuls":5,"exp":729,"res":[{"nuance":"REM","nom":"Mme <NAME>","voix":175},{"nuance":"FN","nom":"Mme <NAME>","voix":129},{"nuance":"EXD","nom":"M. <NAME>","voix":94},{"nuance":"LR","nom":"M. <NAME>","voix":80},{"nuance":"FI","nom":"M. <NAME>","voix":69},{"nuance":"SOC","nom":"Mme <NAME>","voix":65},{"nuance":"DVD","nom":"M. <NAME>","voix":48},{"nuance":"ECO","nom":"M. <NAME>","voix":18},{"nuance":"DLF","nom":"Mme <NAME>","voix":14},{"nuance":"ECO","nom":"Mme <NAME>","voix":12},{"nuance":"COM","nom":"Mme <NAME>","voix":12},{"nuance":"EXG","nom":"Mme <NAME>","voix":8},{"nuance":"DIV","nom":"Mme <NAME>","voix":4},{"nuance":"REG","nom":"Mme <NAME>","voix":1},{"nuance":"DVD","nom":"M. <NAME>","voix":0}]} | 361 |
3,442 | <filename>src/net/java/sip/communicator/impl/protocol/sip/OperationSetTypingNotificationsSipImpl.java
/*
* Jitsi, the OpenSource Java VoIP and Instant Messaging client.
*
* Copyright @ 2015 Atlassian Pty Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.java.sip.communicator.impl.protocol.sip;
import java.text.*;
import java.util.*;
import javax.sip.*;
import javax.sip.header.*;
import javax.sip.message.*;
import net.java.sip.communicator.service.protocol.*;
import net.java.sip.communicator.service.protocol.Message;
import net.java.sip.communicator.service.protocol.event.*;
import net.java.sip.communicator.util.*;
import org.jitsi.util.xml.XMLUtils;
import org.w3c.dom.*;
/**
* A implementation of the typing notification operation
* set.
*
* rfc3994
*
* @author <NAME>
*/
public class OperationSetTypingNotificationsSipImpl
extends AbstractOperationSetTypingNotifications<ProtocolProviderServiceSipImpl>
implements SipMessageProcessor,
MessageListener
{
/**
* The logger.
*/
private static final Logger logger =
Logger.getLogger(OperationSetTypingNotificationsSipImpl.class);
/**
* A reference to the persistent presence operation set that we use
* to match incoming messages to <tt>Contact</tt>s and vice versa.
*/
private OperationSetPresenceSipImpl opSetPersPresence = null;
/**
* A reference to the persistent presence operation set that we use
* to match incoming messages to <tt>Contact</tt>s and vice versa.
*/
private OperationSetBasicInstantMessagingSipImpl opSetBasicIm = null;
/**
* Registration listener instance.
*/
private final RegistrationStateListener registrationListener;
// XML documents types
/**
* The content type of the sent message.
*/
private static final String CONTENT_TYPE = "application/im-iscomposing+xml";
/**
* The subtype of the message.
*/
private static final String CONTENT_SUBTYPE = "im-iscomposing+xml";
// isComposing elements and attributes
/**
* IsComposing body name space.
*/
private static final String NS_VALUE = "urn:ietf:params:xml:ns:im-iscomposing";
/**
* The state element.
*/
private static final String STATE_ELEMENT= "state";
/**
* The refresh element.
*/
private static final String REFRESH_ELEMENT= "refresh";
/**
* Default refresh time for incoming events.
*/
private static final int REFRESH_DEFAULT_TIME = 120;
/**
* The minimum refresh time used to be sent.
*/
private static final int REFRESH_TIME = 60;
/**
* The state active when composing.
*/
private static final String COMPOSING_STATE_ACTIVE = "active";
/**
* The state idle when composing is finished.
*/
private static final String COMPOSING_STATE_IDLE = "idle";
/**
* The global timer managing the tasks.
*/
private Timer timer = new Timer();
/**
* The timer tasks for received events, it timer time is reached this
* means the user has gone idle.
*/
private final List<TypingTask> typingTasks = new Vector<TypingTask>();
/**
* Creates an instance of this operation set.
* @param provider a ref to the <tt>ProtocolProviderServiceImpl</tt>
* that created us and that we'll use for retrieving the underlying aim
* connection.
* @param opSetBasicIm the parent instant messaging operation set.
*/
OperationSetTypingNotificationsSipImpl(
ProtocolProviderServiceSipImpl provider,
OperationSetBasicInstantMessagingSipImpl opSetBasicIm)
{
super(provider);
this.registrationListener = new RegistrationStateListener();
provider.addRegistrationStateChangeListener(registrationListener);
this.opSetBasicIm = opSetBasicIm;
opSetBasicIm.addMessageProcessor(this);
}
/**
* Our listener that will tell us when we're registered to
*/
private class RegistrationStateListener
implements RegistrationStateChangeListener
{
/**
* The method is called by a ProtocolProvider implementation whenever
* a change in the registration state of the corresponding provider had
* occurred.
* @param evt ProviderStatusChangeEvent the event describing the status
* change.
*/
public void registrationStateChanged(RegistrationStateChangeEvent evt)
{
if (logger.isDebugEnabled())
logger.debug("The provider changed state from: "
+ evt.getOldState()
+ " to: " + evt.getNewState());
if (evt.getNewState() == RegistrationState.REGISTERED)
{
opSetPersPresence =
(OperationSetPresenceSipImpl) parentProvider
.getOperationSet(OperationSetPersistentPresence.class);
}
}
}
/**
* Process the incoming sip messages
* @param requestEvent the incoming event holding the message
* @return whether this message needs further processing(true) or no(false)
*/
public boolean processMessage(RequestEvent requestEvent)
{
// get the content
String content = null;
Request req = requestEvent.getRequest();
ContentTypeHeader ctheader =
(ContentTypeHeader)req.getHeader(ContentTypeHeader.NAME);
// ignore messages which are not typing
// notifications and continue processing
if (ctheader == null || !ctheader.getContentSubType()
.equalsIgnoreCase(CONTENT_SUBTYPE))
return true;
content = new String(req.getRawContent());
if(content == null || content.length() == 0)
{
// send error
sendResponse(requestEvent, Response.BAD_REQUEST);
return false;
}
// who sent this request ?
FromHeader fromHeader = (FromHeader)
requestEvent.getRequest().getHeader(FromHeader.NAME);
if (fromHeader == null)
{
logger.error("received a request without a from header");
return true;
}
Contact from = opSetPersPresence.resolveContactID(
fromHeader.getAddress().getURI().toString());
// create fn not in contact list
if (from == null)
{
//create the volatile contact
if (fromHeader.getAddress().getDisplayName() != null)
{
from = opSetPersPresence.createVolatileContact(
fromHeader.getAddress().getURI().toString(),
fromHeader.getAddress().getDisplayName().toString());
}
else
{
from = opSetPersPresence.createVolatileContact(
fromHeader.getAddress().getURI().toString());
}
}
// parse content
Document doc = null;
try
{
// parse content
doc = opSetPersPresence.convertDocument(content);
}
catch(Exception e){}
if (doc == null)
{
// send error
sendResponse(requestEvent, Response.BAD_REQUEST);
return false;
}
if (logger.isDebugEnabled())
logger.debug("parsing:\n" + content);
// <state>
NodeList stateList = doc.getElementsByTagNameNS(NS_VALUE,
STATE_ELEMENT);
if (stateList.getLength() == 0)
{
logger.error("no state element in this document");
// send error
sendResponse(requestEvent, Response.BAD_REQUEST);
return false;
}
Node stateNode = stateList.item(0);
if (stateNode.getNodeType() != Node.ELEMENT_NODE)
{
logger.error("the state node is not an element");
// send error
sendResponse(requestEvent, Response.BAD_REQUEST);
return false;
}
String state = XMLUtils.getText((Element)stateNode);
if(state == null || state.length() == 0)
{
logger.error("the state element without value");
// send error
sendResponse(requestEvent, Response.BAD_REQUEST);
return false;
}
// <refresh>
NodeList refreshList = doc.getElementsByTagNameNS(NS_VALUE,
REFRESH_ELEMENT);
int refresh = REFRESH_DEFAULT_TIME;
if (refreshList.getLength() != 0)
{
Node refreshNode = refreshList.item(0);
if (refreshNode.getNodeType() == Node.ELEMENT_NODE)
{
String refreshStr = XMLUtils.getText((Element)refreshNode);
try
{
refresh = Integer.parseInt(refreshStr);
}
catch (Exception e)
{
logger.error("Wrong content for refresh", e);
}
}
}
// process the typing info we have gathered
if(state.equals(COMPOSING_STATE_ACTIVE))
{
TypingTask task = findTypingTask(from);
if(task != null)
{
typingTasks.remove(task);
task.cancel();
}
// when a task is canceled it cannot be
// resheduled, we will create new task each time we shedule
task = new TypingTask(from, true);
typingTasks.add(task);
timer.schedule(task, refresh * 1000);
fireTypingNotificationsEvent(from, STATE_TYPING);
}
else
if(state.equals(COMPOSING_STATE_IDLE))
{
fireTypingNotificationsEvent(from, STATE_PAUSED);
}
// send ok
sendResponse(requestEvent, Response.OK);
return false;
}
/**
* Process the responses of sent messages
* @param responseEvent the incoming event holding the response
* @param sentMsg map containing sent messages
* @return whether this message needs further processing(true) or no(false)
*/
public boolean processResponse(ResponseEvent responseEvent,
Map<String, Message> sentMsg)
{
Request req = responseEvent.getClientTransaction().getRequest();
ContentTypeHeader ctheader =
(ContentTypeHeader)req.getHeader(ContentTypeHeader.NAME);
// ignore messages which are not typing
// notifications and continue processing
if (ctheader == null || !ctheader.getContentSubType()
.equalsIgnoreCase(CONTENT_SUBTYPE))
return true;
int status = responseEvent.getResponse().getStatusCode();
// we retrieve the original message
String key = ((CallIdHeader)req.getHeader(CallIdHeader.NAME))
.getCallId();
if (status >= 200 && status < 300)
{
if (logger.isDebugEnabled())
logger.debug("Ack received from the network : "
+ responseEvent.getResponse().getReasonPhrase());
// we don't need this message anymore
sentMsg.remove(key);
return false;
}
else if (status >= 400 && status != 401 && status != 407)
{
logger.warn(
"Error received : "
+ responseEvent.getResponse().getReasonPhrase());
// we don't need this message anymore
sentMsg.remove(key);
return false;
}
// process messages as auth required
return true;
}
/**
* Process the timeouts of sent messages
* @param timeoutEvent the event holding the request
* @return whether this message needs further processing(true) or no(false)
*/
public boolean processTimeout(TimeoutEvent timeoutEvent,
Map<String, Message> sentMessages)
{
Request req = timeoutEvent.getClientTransaction().getRequest();
ContentTypeHeader ctheader =
(ContentTypeHeader)req.getHeader(ContentTypeHeader.NAME);
// ignore messages which are not typing
// notifications and continue processing
return
(ctheader == null)
|| !CONTENT_SUBTYPE
.equalsIgnoreCase(ctheader.getContentSubType());
}
/**
* Finds typing task for a contact.
* @param contact the contact.
* @return the typing task.
*/
private TypingTask findTypingTask(Contact contact)
{
for (TypingTask typingTask : typingTasks)
{
if (typingTask.getContact().equals(contact))
return typingTask;
}
return null;
}
public void sendTypingNotification(Contact to, int typingState)
throws IllegalStateException, IllegalArgumentException
{
assertConnected();
if( !(to instanceof ContactSipImpl) )
throw new IllegalArgumentException(
"The specified contact is not a Sip contact."
+ to);
Document doc = opSetPersPresence.createDocument();
Element rootEl = doc.createElementNS(NS_VALUE, "isComposing");
rootEl.setAttribute("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance");
doc.appendChild(rootEl);
/*
Element contentType = doc.createElement("contenttype");
Node contentTypeValue =
doc.createTextNode(OperationSetBasicInstantMessaging.DEFAULT_MIME_TYPE);
contentType.appendChild(contentTypeValue);
rootEl.appendChild(contentType);*/
if(typingState == STATE_TYPING)
{
Element state = doc.createElement("state");
Node stateValue =
doc.createTextNode(COMPOSING_STATE_ACTIVE);
state.appendChild(stateValue);
rootEl.appendChild(state);
Element refresh = doc.createElement("refresh");
Node refreshValue = doc.createTextNode(String.valueOf(REFRESH_TIME));
refresh.appendChild(refreshValue);
rootEl.appendChild(refresh);
}
else if(typingState == STATE_STOPPED)
{
Element state = doc.createElement("state");
Node stateValue =
doc.createTextNode(COMPOSING_STATE_IDLE);
state.appendChild(stateValue);
rootEl.appendChild(state);
}
else // ignore other events
return;
Message message =
opSetBasicIm.createMessage(opSetPersPresence.convertDocument(doc),
CONTENT_TYPE,
OperationSetBasicInstantMessaging.DEFAULT_MIME_ENCODING, null);
//create the message
Request messageRequest;
try
{
messageRequest = opSetBasicIm.createMessageRequest(to, message);
}
catch (OperationFailedException ex)
{
logger.error(
"Failed to create the message."
, ex);
return;
}
try
{
opSetBasicIm.sendMessageRequest(messageRequest, to, message);
}
catch(TransactionUnavailableException ex)
{
logger.error(
"Failed to create messageTransaction.\n"
+ "This is most probably a network connection error."
, ex);
return;
}
catch(SipException ex)
{
logger.error(
"Failed to send the message."
, ex);
return;
}
}
/**
* Sending responses.
* @param requestEvent the request
* @param response the response code.
*/
private void sendResponse(RequestEvent requestEvent, int response)
{
// answer
try
{
Response ok = parentProvider.getMessageFactory()
.createResponse(response, requestEvent.getRequest());
SipStackSharing.getOrCreateServerTransaction(requestEvent).
sendResponse(ok);
}
catch (ParseException exc)
{
logger.error("failed to build the response", exc);
}
catch (SipException exc)
{
logger.error("failed to send the response : "
+ exc.getMessage(),
exc);
}
catch (InvalidArgumentException exc)
{
if (logger.isDebugEnabled())
logger.debug("Invalid argument for createResponse : "
+ exc.getMessage(),
exc);
}
}
/**
* When a message is delivered fire that typing has stopped.
* @param evt the received message event
*/
public void messageReceived(MessageReceivedEvent evt)
{
Contact from = evt.getSourceContact();
TypingTask task = findTypingTask(from);
if(task != null)
{
task.cancel();
fireTypingNotificationsEvent(from, STATE_STOPPED);
}
}
public void messageDelivered(MessageDeliveredEvent evt)
{}
public void messageDeliveryFailed(MessageDeliveryFailedEvent evt)
{}
/**
* Frees allocated resources.
*/
void shutdown()
{
parentProvider.removeRegistrationStateChangeListener(
registrationListener);
}
/**
* Task that will fire typing stopped when refresh time expires.
*/
private class TypingTask
extends TimerTask
{
/**
* The contact that is typing in case of receiving the event and
* the contact to which we are sending notifications in case of
* sending events.
*/
private final Contact contact;
/**
* Create typing task.
* @param contact the contact.
* @param receiving the direction.
*/
TypingTask(Contact contact, boolean receiving)
{
this.contact = contact;
}
@Override
public void run()
{
typingTasks.remove(this);
fireTypingNotificationsEvent(contact, STATE_STOPPED);
}
/**
* @return the contact
*/
public Contact getContact()
{
return contact;
}
}
}
| 8,197 |
1,272 | <reponame>AndersSpringborg/avs-device-sdk
/*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#ifndef ACSDKEQUALIZERINTERFACES_EQUALIZERCONTROLLERLISTENERINTERFACE_H_
#define ACSDKEQUALIZERINTERFACES_EQUALIZERCONTROLLERLISTENERINTERFACE_H_
#include "EqualizerTypes.h"
namespace alexaClientSDK {
namespace acsdkEqualizerInterfaces {
/**
* An interface to listen for @c EqualizerController state changes.
*/
class EqualizerControllerListenerInterface {
public:
/**
* Destructor.
*/
virtual ~EqualizerControllerListenerInterface() = default;
/**
* Receives the new state of the @c EqualizerController. This callback is called after all changes has been applied.
*
* @param newState New state of the @c EqualizerController.
*/
virtual void onEqualizerStateChanged(const EqualizerState& newState) = 0;
/**
* Receives the same state of the @c EqualizerController when equalizer setting is changed but to an identical state
* to the current state. This callback is called after all changes has been applied.
*
* @param newState New state of the @c EqualizerController.
*/
virtual void onEqualizerSameStateChanged(const EqualizerState& newState) = 0;
};
} // namespace acsdkEqualizerInterfaces
} // namespace alexaClientSDK
#endif // ACSDKEQUALIZERINTERFACES_EQUALIZERCONTROLLERLISTENERINTERFACE_H_
| 609 |
945 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.db.mpp.plan.expression.leaf;
import org.apache.iotdb.db.mpp.plan.expression.Expression;
import org.apache.iotdb.db.mpp.transformation.dag.udf.UDTFExecutor;
import java.time.ZoneId;
import java.util.Collections;
import java.util.List;
import java.util.Map;
public abstract class LeafOperand extends Expression {
@Override
public final List<Expression> getExpressions() {
return Collections.emptyList();
}
@Override
public final void constructUdfExecutors(
Map<String, UDTFExecutor> expressionName2Executor, ZoneId zoneId) {
// nothing to do
}
}
| 402 |
651 | import os, psutil
from aes import AES
# Pin the Python process to the last CPU to measure performance
# Note: this code works for psutil 1.2.x, not 2.x!
cpu_count = psutil.cpu_count()
p = psutil.Process(os.getpid())
proc_list = p.cpu_affinity()
p.cpu_affinity([proc_list[-1]])
# Perform several encryption / decryption operations
random_iv = bytearray(os.urandom(16))
random_key = bytearray(os.urandom(16))
data = bytearray(list(range(256)))
data1 = data[:151]
data2 = data[151:]
# Note: __PROFILE_AES__ must be defined when building the native
# module in order for the print statements below to work
aes_ctr = AES(mode='ctr', key=random_key, iv=random_iv)
result = aes_ctr.encrypt(data1)
if result:
print('Encrypted data1 in: %5d cycles' % result)
result = aes_ctr.encrypt(data2)
if result:
print('Encrypted data2 in: %5d cycles' % result)
data_new = data1 + data2
aes_ctr = AES(mode='ctr', key=random_key, iv=random_iv)
result = aes_ctr.decrypt(data_new)
if result:
print('Decrypted data in: %5d cycles' % result)
assert data == data_new, "The demo has failed."
| 391 |
1,178 | /*
* Copyright 2020 Makani Technologies LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef CONTROL_ESTIMATOR_ESTIMATOR_PERCH_AZI_H_
#define CONTROL_ESTIMATOR_ESTIMATOR_PERCH_AZI_H_
#include "control/estimator/estimator_types.h"
#include "control/fault_detection/fault_detection_types.h"
#ifdef __cplusplus
extern "C" {
#endif
// Initialize the perch azimuth estimator state.
void EstimatorPerchAziInit(EstimatorPerchAziState *state);
// Populate the perch azimuth and azimuth angular rate estimate.
//
// If a fault is declared, the last valid angle is held.
void EstimatorPerchAziStep(double perch_azi_encoder,
bool perch_azi_encoder_valid,
const EstimatorPerchAziParams *params,
EstimatorPerchAziState *state,
PerchAziEstimate *perch_azi);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // CONTROL_ESTIMATOR_ESTIMATOR_PERCH_AZI_H_
| 553 |
691 | #pragma once
//#include "taskflow.hpp"
namespace tf {
// ----------------------------------------------------------------------------
// class: Topology
class Topology {
friend class Taskflow;
friend class Executor;
public:
template <typename P, typename C>
Topology(Taskflow&, P&&, C&&);
private:
Taskflow& _taskflow;
std::promise<void> _promise;
PassiveVector<Node*> _sources;
std::atomic<int> _num_sinks {0};
int _cached_num_sinks {0};
std::function<bool()> _pred;
std::function<void()> _call;
void _bind(Graph& g);
void _recover_num_sinks();
};
// Constructor
template <typename P, typename C>
inline Topology::Topology(Taskflow& tf, P&& p, C&& c):
_taskflow(tf),
_pred {std::forward<P>(p)},
_call {std::forward<C>(c)} {
}
// Procedure: _bind
// Re-builds the source links and the sink number for this topology.
inline void Topology::_bind(Graph& g) {
_num_sinks = 0;
_sources.clear();
// scan each node in the graph and build up the links
for(auto node : g.nodes()) {
node->_topology = this;
if(node->num_dependents() == 0) {
_sources.push_back(node);
}
if(node->num_successors() == 0) {
_num_sinks++;
}
}
_cached_num_sinks = _num_sinks;
}
// Procedure: _recover_num_sinks
inline void Topology::_recover_num_sinks() {
_num_sinks = _cached_num_sinks;
}
} // end of namespace tf. ----------------------------------------------------
| 577 |
507 | <reponame>mjuenema/python-terrascript
# terrascript/data/vmware/vcd.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:30:19 UTC)
import terrascript
class vcd_catalog(terrascript.Data):
pass
class vcd_catalog_item(terrascript.Data):
pass
class vcd_catalog_media(terrascript.Data):
pass
class vcd_edgegateway(terrascript.Data):
pass
class vcd_external_network(terrascript.Data):
pass
class vcd_external_network_v2(terrascript.Data):
pass
class vcd_global_role(terrascript.Data):
pass
class vcd_independent_disk(terrascript.Data):
pass
class vcd_lb_app_profile(terrascript.Data):
pass
class vcd_lb_app_rule(terrascript.Data):
pass
class vcd_lb_server_pool(terrascript.Data):
pass
class vcd_lb_service_monitor(terrascript.Data):
pass
class vcd_lb_virtual_server(terrascript.Data):
pass
class vcd_network_direct(terrascript.Data):
pass
class vcd_network_isolated(terrascript.Data):
pass
class vcd_network_isolated_v2(terrascript.Data):
pass
class vcd_network_routed(terrascript.Data):
pass
class vcd_network_routed_v2(terrascript.Data):
pass
class vcd_nsxt_app_port_profile(terrascript.Data):
pass
class vcd_nsxt_edge_cluster(terrascript.Data):
pass
class vcd_nsxt_edgegateway(terrascript.Data):
pass
class vcd_nsxt_firewall(terrascript.Data):
pass
class vcd_nsxt_ip_set(terrascript.Data):
pass
class vcd_nsxt_ipsec_vpn_tunnel(terrascript.Data):
pass
class vcd_nsxt_manager(terrascript.Data):
pass
class vcd_nsxt_nat_rule(terrascript.Data):
pass
class vcd_nsxt_network_dhcp(terrascript.Data):
pass
class vcd_nsxt_network_imported(terrascript.Data):
pass
class vcd_nsxt_security_group(terrascript.Data):
pass
class vcd_nsxt_tier0_router(terrascript.Data):
pass
class vcd_nsxv_dhcp_relay(terrascript.Data):
pass
class vcd_nsxv_dnat(terrascript.Data):
pass
class vcd_nsxv_firewall_rule(terrascript.Data):
pass
class vcd_nsxv_ip_set(terrascript.Data):
pass
class vcd_nsxv_snat(terrascript.Data):
pass
class vcd_org(terrascript.Data):
pass
class vcd_org_user(terrascript.Data):
pass
class vcd_org_vdc(terrascript.Data):
pass
class vcd_portgroup(terrascript.Data):
pass
class vcd_resource_list(terrascript.Data):
pass
class vcd_resource_schema(terrascript.Data):
pass
class vcd_right(terrascript.Data):
pass
class vcd_rights_bundle(terrascript.Data):
pass
class vcd_role(terrascript.Data):
pass
class vcd_storage_profile(terrascript.Data):
pass
class vcd_vapp(terrascript.Data):
pass
class vcd_vapp_network(terrascript.Data):
pass
class vcd_vapp_org_network(terrascript.Data):
pass
class vcd_vapp_vm(terrascript.Data):
pass
class vcd_vcenter(terrascript.Data):
pass
class vcd_vm(terrascript.Data):
pass
class vcd_vm_affinity_rule(terrascript.Data):
pass
class vcd_vm_sizing_policy(terrascript.Data):
pass
__all__ = [
"vcd_catalog",
"vcd_catalog_item",
"vcd_catalog_media",
"vcd_edgegateway",
"vcd_external_network",
"vcd_external_network_v2",
"vcd_global_role",
"vcd_independent_disk",
"vcd_lb_app_profile",
"vcd_lb_app_rule",
"vcd_lb_server_pool",
"vcd_lb_service_monitor",
"vcd_lb_virtual_server",
"vcd_network_direct",
"vcd_network_isolated",
"vcd_network_isolated_v2",
"vcd_network_routed",
"vcd_network_routed_v2",
"vcd_nsxt_app_port_profile",
"vcd_nsxt_edge_cluster",
"vcd_nsxt_edgegateway",
"vcd_nsxt_firewall",
"vcd_nsxt_ip_set",
"vcd_nsxt_ipsec_vpn_tunnel",
"vcd_nsxt_manager",
"vcd_nsxt_nat_rule",
"vcd_nsxt_network_dhcp",
"vcd_nsxt_network_imported",
"vcd_nsxt_security_group",
"vcd_nsxt_tier0_router",
"vcd_nsxv_dhcp_relay",
"vcd_nsxv_dnat",
"vcd_nsxv_firewall_rule",
"vcd_nsxv_ip_set",
"vcd_nsxv_snat",
"vcd_org",
"vcd_org_user",
"vcd_org_vdc",
"vcd_portgroup",
"vcd_resource_list",
"vcd_resource_schema",
"vcd_right",
"vcd_rights_bundle",
"vcd_role",
"vcd_storage_profile",
"vcd_vapp",
"vcd_vapp_network",
"vcd_vapp_org_network",
"vcd_vapp_vm",
"vcd_vcenter",
"vcd_vm",
"vcd_vm_affinity_rule",
"vcd_vm_sizing_policy",
]
| 2,004 |
678 | <reponame>bzxy/cydia
/**
* This header is generated by class-dump-z 0.2b.
*
* Source: /System/Library/PrivateFrameworks/iWorkImport.framework/iWorkImport
*/
#import <iWorkImport/XXUnknownSuperclass.h>
__attribute__((visibility("hidden")))
@interface GQGroupDisplayInfo : XXUnknownSuperclass {
@private
int mDisplayType; // 4 = 0x4
BOOL mIsTypeVisible; // 8 = 0x8
}
@end
| 138 |
423 | <gh_stars>100-1000
// clang-format off
/*
* Firebase UI Bindings iOS Library
*
* Copyright © 2015 Firebase - All Rights Reserved
* https://www.firebase.com
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binaryform must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY FIREBASE AS IS AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL FIREBASE BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// clang-format on
#import <UIKit/UIKit.h>
#import <FirebaseUI/XCodeMacros.h>
#import "FirebaseDataSource.h"
@class Firebase;
/**
* FirebaseCollectionViewDataSource provides an class that conforms to the
* UICollcetionViewDataSource protocol which allows UICollectionViews to
* implement
* FirebaseCollectionViewDataSource in order to provide a UICollectionView
* synchronized to a
* Firebase reference or query. In addition to handling all Firebase child
* events (added, changed,
* removed, moved), FirebaseCollectionViewDataSource handles UITableViewCell
* creation, either with
* the default UICollectionViewCell, prototype cells, custom
* UICollectionViewCell subclasses, or
* custom XIBs, and provides a simple [FirebaseCollectionViewDataSource
* populateCellWithBlock:]
* method which allows developers to populate the cells created for them with
* desired data from
* Firebase.
*/
@interface FirebaseCollectionViewDataSource : FirebaseDataSource<UICollectionViewDataSource>
/**
* The model class to coerce FDataSnapshots to (if desired). For instance, if
* the modelClass is set
* to [Message class] in Obj-C or Message.self in Swift, then objects of type
* Message will be
* returned instead of type FDataSnapshot.
*/
@property(strong, nonatomic, __NON_NULL) Class modelClass;
/**
* The cell class to coerce UICollectionViewCells to (if desired). For instance,
* if the cellClass is
* set to [CustomCollectionViewCell class] in Obj-C or CustomCollectionViewCell
* in Swift, then
* objects of type CustomCollectionViewCell will be returned instead of type
* UICollectionViewCell.
*/
@property(strong, nonatomic, __NON_NULL) Class cellClass;
/**
* The reuse identifier for cells in the UICollectionView.
*/
@property(strong, nonatomic, __NON_NULL) NSString *reuseIdentifier;
/**
* The UICollectionView instance that operations (inserts, removals, moves,
* etc.) are performed
* against.
*/
@property(strong, nonatomic, __NON_NULL) UICollectionView *collectionView;
/**
* Property to keep track of prototype cell use, to not register a class for the
* UICollectionView or
* do similar book keeping.
*/
@property BOOL hasPrototypeCell;
/**
* The callback to populate a subclass of UICollectionViewCell with an object
* provided by the
* datasource.
*/
@property(strong, nonatomic, __NON_NULL) void (^populateCell)
(__KINDOF(UICollectionViewCell) __NON_NULL_PTR cell, __KINDOF(NSObject) __NON_NULL_PTR object);
/**
* Initialize an instance of FirebaseCollectionViewDataSource that populates
* UICollectionViewCells
* with FDataSnapshots.
* @param ref A Firebase reference to bind the datasource to
* @param identifier A string to use as a CellReuseIdentifier
* @param collectionView An instance of a UICollectionView to bind to
* @return An instance of FirebaseCollectionViewDataSource that populates
* UICollectionViewCells with
* FDataSnapshots
*/
- (__NON_NULL instancetype)initWithRef:(__NON_NULL Firebase *)ref
cellReuseIdentifier:(__NON_NULL NSString *)identifier
view:(__NON_NULL UICollectionView *)collectionView;
/**
* Initialize an instance of FirebaseCollectionViewDataSource that populates
* UICollectionViewCells
* with FDataSnapshots. Note that this method is used when using prototype
* cells, where the cells
* don't need to be registered in the class.
* @param ref A Firebase reference to bind the datasource to
* @param identifier A string to use as a CellReuseIdentifier
* @param collectionView An instance of a UICollectionView to bind to
* @return An instance of FirebaseCollectionViewDataSource that populates
* UICollectionViewCells with
* FDataSnapshots
*/
- (__NON_NULL instancetype)initWithRef:(__NON_NULL Firebase *)ref
prototypeReuseIdentifier:(__NON_NULL NSString *)identifier
view:(__NON_NULL UICollectionView *)collectionView;
/**
* Initialize an instance of FirebaseCollectionViewDataSource that populates a
* custom subclass of
* UICollectionViewCell with FDataSnapshots.
* @param ref A Firebase reference to bind the datasource to
* @param cell A subclass of UICollectionViewCell used to populate the
* UICollectionView, defaults to
* UICollectionViewCell if nil
* @param identifier A string to use as a CellReuseIdentifier
* @param collectionView An instance of a UICollectionView to bind to
* @return An instance of FirebaseCollectionViewDataSource that populates a
* custom subclass of
* UICollectionViewCell with FDataSnapshots
*/
- (__NON_NULL instancetype)initWithRef:(__NON_NULL Firebase *)ref
cellClass:(__NULLABLE Class)cell
cellReuseIdentifier:(__NON_NULL NSString *)identifier
view:(__NON_NULL UICollectionView *)collectionView;
/**
* Initialize an instance of FirebaseCollectionViewDataSource that populates a
* custom xib with
* FDataSnapshots.
* @param ref A Firebase reference to bind the datasource to
* @param nibName The name of a xib file to create the layout for a
* UICollectionViewCell
* @param identifier A string to use as a CellReuseIdentifier
* @param collectionView An instance of a UICollectionView to bind to
* @return An instance of FirebaseCollectionViewDataSource that populates a
* custom xib with
* FDataSnapshots
*/
- (__NON_NULL instancetype)initWithRef:(__NON_NULL Firebase *)ref
nibNamed:(__NON_NULL NSString *)nibName
cellReuseIdentifier:(__NON_NULL NSString *)identifier
view:(__NON_NULL UICollectionView *)collectionView;
/**
* Initialize an instance of FirebaseCollectionViewDataSource that populates
* UICollectionViewCells
* with a custom model class.
* @param ref A Firebase reference to bind the datasource to
* @param model A custom class that FDataSnapshots are coerced to, defaults to
* FDataSnapshot if nil
* @param identifier A string to use as a CellReuseIdentifier
* @param collectionView An instance of a UICollectionView to bind to
* @return An instance of FirebaseCollectionViewDataSource that populates
* UICollectionViewCells with
* a custom model class
*/
- (__NON_NULL instancetype)initWithRef:(__NON_NULL Firebase *)ref
modelClass:(__NULLABLE Class)model
cellReuseIdentifier:(__NON_NULL NSString *)identifier
view:(__NON_NULL UICollectionView *)collectionView;
/**
* Initialize an instance of FirebaseCollectionViewDataSource that populates
* UICollectionViewCells
* with a custom model class. Note that this method is used when using prototype
* cells, where the
* cells don't need to be registered in the class.
* @param ref A Firebase reference to bind the datasource to
* @param model A custom class that FDataSnapshots are coerced to, defaults to
* FDataSnapshot if nil
* @param identifier A string to use as a CellReuseIdentifier
* @param collectionView An instance of a UICollectionView to bind to
* @return An instance of FirebaseCollectionViewDataSource that populates
* UICollectionViewCells with
* a custom model class
*/
- (__NON_NULL instancetype)initWithRef:(__NON_NULL Firebase *)ref
modelClass:(__NULLABLE Class)model
prototypeReuseIdentifier:(__NON_NULL NSString *)identifier
view:(__NON_NULL UICollectionView *)collectionView;
/**
* Initialize an instance of FirebaseCollectionViewDataSource that populates a
* custom subclass of
* UICollectionViewCell with a custom model class.
* @param ref A Firebase reference to bind the datasource to
* @param model A custom class that FDataSnapshots are coerced to, defaults to
* FDataSnapshot if nil
* @param cell A subclass of UICollectionViewCell used to populate the
* UICollectionView, defaults to
* UICollectionViewCell if nil
* @param identifier A string to use as a CellReuseIdentifier
* @param collectionView An instance of a UICollectionView to bind to
* @return An instance of FirebaseCollectionViewDataSource that populates a
* custom subclass of
* UICollectionViewCell with a custom model class
*/
- (__NON_NULL instancetype)initWithRef:(__NON_NULL Firebase *)ref
modelClass:(__NULLABLE Class)model
cellClass:(__NULLABLE Class)cell
cellReuseIdentifier:(__NON_NULL NSString *)identifier
view:(__NON_NULL UICollectionView *)collectionView;
/**
* Initialize an instance of FirebaseCollectionViewDataSource that populates a
* custom xib with a
* custom model class.
* @param ref A Firebase reference to bind the datasource to
* @param model A custom class that FDataSnapshots are coerced to, defaults to
* FDataSnapshot if nil
* @param nibName The name of a xib file to create the layout for a
* UICollectionViewCell
* @param identifier A string to use as a CellReuseIdentifier
* @param collectionView An instance of a UICollectionView to bind to
* @return An instance of FirebaseCollectionViewDataSource that populates a
* custom xib with a custom
* model class
*/
- (__NON_NULL instancetype)initWithRef:(__NON_NULL Firebase *)ref
modelClass:(__NULLABLE Class)model
nibNamed:(__NON_NULL NSString *)nibName
cellReuseIdentifier:(__NON_NULL NSString *)identifier
view:(__NON_NULL UICollectionView *)collectionView;
/**
* Initialize an instance of FirebaseCollectionViewDataSource that populates
* UICollectionViewCells
* with FDataSnapshots.
* @param query A Firebase query to bind the datasource to
* @param identifier A string to use as a CellReuseIdentifier
* @param collectionView An instance of a UICollectionView to bind to
* @return An instance of FirebaseCollectionViewDataSource that populates
* UICollectionViewCells with
* FDataSnapshots
*/
- (__NON_NULL instancetype)initWithQuery:(__NON_NULL FQuery *)query
cellReuseIdentifier:(__NON_NULL NSString *)identifier
view:(__NON_NULL UICollectionView *)collectionView;
/**
* Initialize an instance of FirebaseCollectionViewDataSource that populates
* UICollectionViewCells
* with FDataSnapshots. Note that this method is used when using prototype
* cells, where the cells
* don't need to be registered in the class.
* @param query A Firebase query to bind the datasource to
* @param identifier A string to use as a CellReuseIdentifier
* @param collectionView An instance of a UICollectionView to bind to
* @return An instance of FirebaseCollectionViewDataSource that populates
* UICollectionViewCells with
* FDataSnapshots
*/
- (__NON_NULL instancetype)initWithQuery:(__NON_NULL FQuery *)query
prototypeReuseIdentifier:(__NON_NULL NSString *)identifier
view:(__NON_NULL UICollectionView *)collectionView;
/**
* Initialize an instance of FirebaseCollectionViewDataSource that populates a
* custom subclass of
* UICollectionViewCell with FDataSnapshots.
* @param query A Firebase query to bind the datasource to
* @param cell A subclass of UICollectionViewCell used to populate the
* UICollectionView, defaults to
* UICollectionViewCell if nil
* @param identifier A string to use as a CellReuseIdentifier
* @param collectionView An instance of a UICollectionView to bind to
* @return An instance of FirebaseCollectionViewDataSource that populates a
* custom subclass of
* UICollectionViewCell with FDataSnapshots
*/
- (__NON_NULL instancetype)initWithQuery:(__NON_NULL FQuery *)query
cellClass:(__NULLABLE Class)cell
cellReuseIdentifier:(__NON_NULL NSString *)identifier
view:(__NON_NULL UICollectionView *)collectionView;
/**
* Initialize an instance of FirebaseCollectionViewDataSource that populates a
* custom xib with
* FDataSnapshots.
* @param query A Firebase query to bind the datasource to
* @param nibName The name of a xib file to create the layout for a
* UICollectionViewCell
* @param identifier A string to use as a CellReuseIdentifier
* @param collectionView An instance of a UICollectionView to bind to
* @return An instance of FirebaseCollectionViewDataSource that populates a
* custom xib with
* FDataSnapshots
*/
- (__NON_NULL instancetype)initWithQuery:(__NON_NULL FQuery *)query
nibNamed:(__NON_NULL NSString *)nibName
cellReuseIdentifier:(__NON_NULL NSString *)identifier
view:(__NON_NULL UICollectionView *)collectionView;
/**
* Initialize an instance of FirebaseCollectionViewDataSource that populates
* UICollectionViewCells
* with a custom model class.
* @param query A Firebase query to bind the datasource to
* @param model A custom class that FDataSnapshots are coerced to, defaults to
* FDataSnapshot if nil
* @param identifier A string to use as a CellReuseIdentifier
* @param collectionView An instance of a UICollectionView to bind to
* @return An instance of FirebaseCollectionViewDataSource that populates
* UICollectionViewCells with
* a custom model class
*/
- (__NON_NULL instancetype)initWithQuery:(__NON_NULL FQuery *)query
modelClass:(__NULLABLE Class)model
cellReuseIdentifier:(__NON_NULL NSString *)identifier
view:(__NON_NULL UICollectionView *)collectionView;
/**
* Initialize an instance of FirebaseCollectionViewDataSource that populates
* UICollectionViewCells
* with a custom model class. Note that this method is used when using prototype
* cells, where the
* cells don't need to be registered in the class.
* @param query A Firebase query to bind the datasource to
* @param model A custom class that FDataSnapshots are coerced to, defaults to
* FDataSnapshot if nil
* @param identifier A string to use as a CellReuseIdentifier
* @param collectionView An instance of a UICollectionView to bind to
* @return An instance of FirebaseCollectionViewDataSource that populates
* UICollectionViewCells with
* a custom model class
*/
- (__NON_NULL instancetype)initWithQuery:(__NON_NULL FQuery *)query
modelClass:(__NULLABLE Class)model
prototypeReuseIdentifier:(__NON_NULL NSString *)identifier
view:(__NON_NULL UICollectionView *)collectionView;
/**
* Initialize an instance of FirebaseCollectionViewDataSource that populates a
* custom subclass of
* UICollectionViewCell with a custom model class.
* @param query A Firebase query to bind the datasource to
* @param model A custom class that FDataSnapshots are coerced to, defaults to
* FDataSnapshot if nil
* @param cell A subclass of UICollectionViewCell used to populate the
* UICollectionView, defaults to
* UICollectionViewCell if nil
* @param identifier A string to use as a CellReuseIdentifier
* @param collectionView An instance of a UICollectionView to bind to
* @return An instance of FirebaseCollectionViewDataSource that populates a
* custom subclass of
* UICollectionViewCell with a custom model class
*/
- (__NON_NULL instancetype)initWithQuery:(__NON_NULL FQuery *)query
modelClass:(__NULLABLE Class)model
cellClass:(__NULLABLE Class)cell
cellReuseIdentifier:(__NON_NULL NSString *)identifier
view:(__NON_NULL UICollectionView *)collectionView;
/**
* Initialize an instance of FirebaseCollectionViewDataSource that populates a
* custom xib with a
* custom model class.
* @param query A Firebase query to bind the datasource to
* @param model A custom class that FDataSnapshots are coerced to, defaults to
* FDataSnapshot if nil
* @param nibName The name of a xib file to create the layout for a
* UICollectionViewCell
* @param identifier A string to use as a CellReuseIdentifier
* @param collectionView An instance of a UICollectionView to bind to
* @return An instance of FirebaseCollectionViewDataSource that populates a
* custom xib with a custom
* model class
*/
- (__NON_NULL instancetype)initWithQuery:(__NON_NULL FQuery *)query
modelClass:(__NULLABLE Class)model
nibNamed:(__NON_NULL NSString *)nibName
cellReuseIdentifier:(__NON_NULL NSString *)identifier
view:(__NON_NULL UICollectionView *)collectionView;
/**
* This method populates the fields of a UICollectionViewCell or subclass given
* an FDataSnapshot (or
* custom model object).
* @param callback A block which returns an initialized UICollectionViewCell (or
* subclass) and the
* corresponding object to populate the cell with.
*/
- (void)populateCellWithBlock:
(__NON_NULL void (^)(__KINDOF(UICollectionViewCell)__NON_NULL_PTR cell,
__KINDOF(NSObject)__NON_NULL_PTR object))callback;
@end
| 6,202 |
304 | <gh_stars>100-1000
/*
* Copyright 2019-2021 CloudNetService team & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.dytanic.cloudnet.ext.bridge.player;
import de.dytanic.cloudnet.common.concurrent.ITask;
import java.util.Collection;
import java.util.UUID;
import org.jetbrains.annotations.NotNull;
public interface PlayerProvider {
@NotNull
Collection<? extends ICloudPlayer> asPlayers();
@NotNull
Collection<UUID> asUUIDs();
@NotNull
Collection<String> asNames();
int count();
@NotNull
ITask<Collection<? extends ICloudPlayer>> asPlayersAsync();
@NotNull
ITask<Collection<UUID>> asUUIDsAsync();
@NotNull
ITask<Collection<String>> asNamesAsync();
@NotNull
ITask<Integer> countAsync();
}
| 375 |
514 |
package no.priv.garshol.duke.utils;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import no.priv.garshol.duke.DukeException;
import no.priv.garshol.duke.LinkDatabase;
import org.junit.Test;
import static org.junit.Assert.fail;
public class LinkDatabaseUtilsTest {
private LinkDatabase db;
@Test
public void testOldStyle() throws IOException {
// tries to load a pre-1.2 format test file
try {
load("old-format.txt");
fail("accepted old-style test file");
} catch (DukeException e) {
// this is expected
}
}
private void load(String filename) throws IOException {
ClassLoader cloader = Thread.currentThread().getContextClassLoader();
InputStream istream = cloader.getResourceAsStream(filename);
db = LinkDatabaseUtils.loadTestFile(new InputStreamReader(istream));
}
} | 303 |
378 | <filename>test/integration/iam/opencypher/test_opencypher_query_with_iam.py
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
"""
import pytest
from botocore.session import get_session
from test.integration.DataDrivenOpenCypherTest import DataDrivenOpenCypherTest
class TestOpenCypherQueryWithIam(DataDrivenOpenCypherTest):
def setUp(self) -> None:
super().setUp()
self.client = self.client_builder.with_iam(get_session()).build()
@pytest.mark.neptune
@pytest.mark.opencypher
def test_do_opencypher_query(self):
expected_league_name = 'English Premier League'
'''
{
"head": {
"vars": [
"l.name"
]
},
"results": {
"bindings": [
{
"l.name": {
"type": "string",
"value": "English Premier League"
}
}
]
}
}'''
query = 'MATCH (l:League) RETURN l.name'
oc_res = self.client.opencypher_http(query)
assert oc_res.status_code == 200
res = oc_res.json()
assert type(res) == dict
assert expected_league_name == res['results']['bindings'][0]['l.name']['value']
@pytest.mark.opencypher
@pytest.mark.bolt
def test_do_opencypher_bolt_query(self):
query = 'MATCH (p) RETURN p LIMIT 10'
res = self.client.opencyper_bolt(query)
assert len(res) == 10
| 758 |
666 | <reponame>ryansunboy/spring-cloud-config-admin
package com.didispace.scca.rest.web;
import com.didispace.scca.rest.domain.User;
import com.didispace.scca.rest.dto.UserDto;
import com.didispace.scca.rest.dto.base.WebResp;
import com.didispace.scca.rest.exception.ServiceException;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
import lombok.extern.slf4j.Slf4j;
import org.springframework.security.access.annotation.Secured;
import org.springframework.security.core.annotation.AuthenticationPrincipal;
import org.springframework.web.bind.annotation.*;
import java.util.List;
/**
* Created by Anoyi on 2018/8/1.
* <p>
* Blog: https://anoyi.com/
* Github: https://github.com/ChineseSilence
*/
@Api("Admin(管理员-用户管理)")
@Slf4j
@RestController
@RequestMapping("${scca.rest.context-path:}/admin")
@Secured("ROLE_ADMIN")
public class UserAdminController extends BaseController {
@ApiOperation("Get User List / 获取用户列表")
@RequestMapping(path = "/list", method = RequestMethod.GET)
public WebResp<List<UserDto>> getUserList() {
// 分页获取所有用户信息
List<UserDto> users = userService.getUsers();
return WebResp.success(users);
}
@ApiOperation("Save User / 添加新用户")
@RequestMapping(method = RequestMethod.POST)
public WebResp<String> saveUser(@RequestBody User user) {
// 管理员添加新用户
userService.createUser(user);
return WebResp.success("save new user success");
}
@ApiOperation("Update User / 修改用户信息")
@RequestMapping(method = RequestMethod.PUT)
public WebResp<String> updateUser(@RequestBody User user) {
// 管理员修改用户信息
User dbUser = userService.getByUsername(user.getUsername());
dbUser.setNickname(user.getNickname());
dbUser.setRole(user.getRole());
dbUser.setPassword(user.<PASSWORD>());
userService.updateUser(dbUser);
return WebResp.success("update user success : " + user.getUsername());
}
@ApiOperation("Delete User / 删除用户")
@RequestMapping(method = RequestMethod.DELETE)
public WebResp<String> deleteUser(@AuthenticationPrincipal org.springframework.security.core.userdetails.User principal,
@RequestParam("username") String username) {
// 管理员删除用户
if(principal.getUsername().equals(username)){
throw new ServiceException("不能删除自己");
}
userService.deleteUserByUsername(username);
return WebResp.success("save new user success");
}
}
| 1,103 |
1,085 | /*
* Copyright (C) 2017-2019 Dremio Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.dremio.plugins.elastic;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.lang.annotation.Annotation;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.ws.rs.client.ClientRequestContext;
import javax.ws.rs.core.MultivaluedHashMap;
import javax.ws.rs.core.MultivaluedMap;
import javax.ws.rs.ext.MessageBodyWriter;
import org.glassfish.jersey.message.MessageBodyWorkers;
import org.glassfish.jersey.uri.UriComponent;
import com.amazonaws.AmazonWebServiceRequest;
import com.amazonaws.ReadLimitInfo;
import com.amazonaws.Request;
import com.amazonaws.handlers.HandlerContextKey;
import com.amazonaws.http.HttpMethodName;
import com.amazonaws.util.AWSRequestMetrics;
import com.google.common.base.Joiner;
/**
* Used to wrap up ClientRequestContext and AWS4Signer will add headers to the request for signature.
*/
public class AWSRequest<T> implements Request<T> {
/* the name of AWS service */
private final String serviceName;
/* original request context */
private final ClientRequestContext clientRequestContext;
/* Amazon web service request used to process content */
private final AmazonWebServiceRequest originalAmazonWebServiceRequest = AmazonWebServiceRequest.NOOP;
private final MessageBodyWorkers workers;
public AWSRequest(String serviceName, ClientRequestContext clientRequestContext, MessageBodyWorkers workers) {
this.serviceName = serviceName;
this.clientRequestContext = clientRequestContext;
this.workers = workers;
}
@Override
public void setHeaders(Map<String, String> headers) {
throw new UnsupportedOperationException("Not supported by AWSRequest");
}
@Override
public void setResourcePath(String path) {
throw new UnsupportedOperationException("Not supported by AWSRequest");
}
@Override
public Request<T> withParameter(String name, String value) {
throw new UnsupportedOperationException("Not supported by AWSRequest");
}
@Override
public void setParameters(Map<String, List<String>> parameters) {
throw new UnsupportedOperationException("Not supported by AWSRequest");
}
@Override
public void addParameters(String name, List<String> values) {
throw new UnsupportedOperationException("Not supported by AWSRequest");
}
@Override
public void setEndpoint(URI endpoint) {
throw new UnsupportedOperationException("Not supported by AWSRequest");
}
@Override
public void setHttpMethod(HttpMethodName httpMethod) {
throw new UnsupportedOperationException("Not supported by AWSRequest");
}
@Override
public String getServiceName() {
return serviceName;
}
@Override
public AmazonWebServiceRequest getOriginalRequest() {
return originalAmazonWebServiceRequest;
}
@Override
public void setTimeOffset(int timeOffset) {
throw new UnsupportedOperationException("Not supported by AWSRequest");
}
@Override
public Request<T> withTimeOffset(int timeOffset) {
throw new UnsupportedOperationException("Not supported by AWSRequest");
}
@Override
public AWSRequestMetrics getAWSRequestMetrics() {
throw new UnsupportedOperationException("Not supported by AWSRequest");
}
@Override
public void setAWSRequestMetrics(AWSRequestMetrics metrics) {
throw new UnsupportedOperationException("Not supported by AWSRequest");
}
@Override
public <X> void addHandlerContext(HandlerContextKey<X> key, X value) {
throw new UnsupportedOperationException("Not supported by AWSRequest");
}
@Override
public <X> X getHandlerContext(HandlerContextKey<X> key) {
throw new UnsupportedOperationException("Not supported by AWSRequest");
}
@Override
public void addHeader(String name, String value) {
clientRequestContext.getHeaders().putSingle(name, value);
}
@Override
public void addParameter(String name, String value) {
throw new UnsupportedOperationException("Not supported by AWSRequest");
}
@Override
public void setContent(InputStream content) {
throw new UnsupportedOperationException("Not supported by AWSRequest");
}
@Override
public Map<String, String> getHeaders() {
return getHeadersMap(clientRequestContext.getStringHeaders());
}
@Override
public String getResourcePath() {
return clientRequestContext.getUri().getPath();
}
@Override
public Map<String, List<String>> getParameters() {
return UriComponent.decodeQuery(clientRequestContext.getUri(), true);
}
@Override
public URI getEndpoint() {
URI uri = clientRequestContext.getUri();
try {
return new URI(uri.getScheme(), null, uri.getHost(), uri.getPort(), null, null, null);
} catch (URISyntaxException e) {
throw new RuntimeException("failed to set endpoint for aws request, url is " + uri.toString(), e);
}
}
@Override
public HttpMethodName getHttpMethod() {
return HttpMethodName.valueOf(clientRequestContext.getMethod());
}
@Override
public int getTimeOffset() {
return 0;
}
@Override
public InputStream getContent() {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
final Object entity = clientRequestContext.getEntity();
if (entity == null) {
return null;
} else {
MessageBodyWriter messageBodyWriter = workers.getMessageBodyWriter(entity.getClass(), entity.getClass(),
new Annotation[]{}, clientRequestContext.getMediaType());
try {
// use the MBW to serialize entity into baos
messageBodyWriter.writeTo(entity,
entity.getClass(), entity.getClass(), new Annotation[] {},
clientRequestContext.getMediaType(), new MultivaluedHashMap<String, Object>(),
baos);
} catch (IOException e) {
throw new RuntimeException(
"Error while serializing entity.", e);
}
return new ByteArrayInputStream(baos.toByteArray());
}
}
@Override
public InputStream getContentUnwrapped() {
return getContent();
}
@Override
public ReadLimitInfo getReadLimitInfo() {
return originalAmazonWebServiceRequest;
}
@Override
public Object getOriginalRequestObject() {
return null;
}
private Map<String, String> getHeadersMap(MultivaluedMap<String, String> m) {
Map<String, String> map = new HashMap<String, String>();
if (m == null) {
return map;
}
for (Map.Entry<String, List<String>> entry : m.entrySet()) {
map.put(entry.getKey(), Joiner.on(",").join(entry.getValue()));
}
return map;
}
}
| 2,185 |
507 | <reponame>wujiezero/SpringBoot<filename>springboot-security/springboot-security-jwt/src/main/java/top/lrshuai/security/handler/SelfAuthenticationProvider.java<gh_stars>100-1000
package top.lrshuai.security.handler;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.security.authentication.AuthenticationProvider;
import org.springframework.security.authentication.BadCredentialsException;
import org.springframework.security.authentication.UsernamePasswordAuthenticationToken;
import org.springframework.security.core.Authentication;
import org.springframework.security.core.AuthenticationException;
import org.springframework.security.core.userdetails.UserDetails;
import org.springframework.security.crypto.bcrypt.BCryptPasswordEncoder;
import org.springframework.stereotype.Component;
import top.lrshuai.security.config.security.SecurityUserService;
/**
* 自定义认证逻辑
*/
//@Component
public class SelfAuthenticationProvider implements AuthenticationProvider{
@Autowired
private SecurityUserService securityUserService;
@Autowired
private BCryptPasswordEncoder bCryptPasswordEncoder;
@Override
public Authentication authenticate(Authentication authentication) throws AuthenticationException {
//获取用户名
String account= authentication.getName();
//获取密码
String password= (String) authentication.getCredentials();
UserDetails userDetails= securityUserService.loadUserByUsername(account);
boolean checkPassword= bCryptPasswordEncoder.matches(password,userDetails.getPassword());
if(!checkPassword){
throw new BadCredentialsException("密码不正确,请重新登录!");
}
return new UsernamePasswordAuthenticationToken(account,password,userDetails.getAuthorities());
}
@Override
public boolean supports(Class<?> aClass) {
return true;
}
}
| 621 |
2,591 | <reponame>maketubo/liquibase<filename>liquibase-core/src/main/java/liquibase/precondition/Precondition.java
package liquibase.precondition;
import liquibase.changelog.ChangeSet;
import liquibase.changelog.DatabaseChangeLog;
import liquibase.changelog.visitor.ChangeExecListener;
import liquibase.database.Database;
import liquibase.exception.PreconditionErrorException;
import liquibase.exception.PreconditionFailedException;
import liquibase.exception.ValidationErrors;
import liquibase.exception.Warnings;
import liquibase.parser.core.ParsedNode;
import liquibase.parser.core.ParsedNodeException;
import liquibase.resource.ResourceAccessor;
import liquibase.serializer.LiquibaseSerializable;
/**
* Marker interface for preconditions. May become an annotation in the future.
*/
public interface Precondition extends LiquibaseSerializable {
public String getName();
public Warnings warn(Database database);
public ValidationErrors validate(Database database);
public void check(Database database, DatabaseChangeLog changeLog, ChangeSet changeSet, ChangeExecListener changeExecListener)
throws PreconditionFailedException, PreconditionErrorException;
public void load(ParsedNode parsedNode, ResourceAccessor resourceAccessor) throws ParsedNodeException;
}
| 377 |
2,101 | <gh_stars>1000+
import time
from http import HTTPStatus
from itertools import count
from typing import Sequence
import gevent
import grequests
import pytest
import structlog
from eth_utils import to_canonical_address
from flask import url_for
from raiden.api.python import RaidenAPI
from raiden.api.rest import APIServer, RestAPI
from raiden.constants import RoutingMode
from raiden.message_handler import MessageHandler
from raiden.network.transport import MatrixTransport
from raiden.raiden_event_handler import RaidenEventHandler
from raiden.raiden_service import RaidenService
from raiden.settings import RestApiConfig
from raiden.tests.integration.api.utils import wait_for_listening_port
from raiden.tests.integration.fixtures.raiden_network import RestartNode
from raiden.tests.utils.detect_failure import raise_on_failure
from raiden.tests.utils.protocol import HoldRaidenEventHandler
from raiden.tests.utils.transfer import (
assert_synced_channel_state,
wait_assert,
watch_for_unlock_failures,
)
from raiden.transfer import views
from raiden.ui.startup import RaidenBundle
from raiden.utils.formatting import to_checksum_address
from raiden.utils.typing import (
Address,
BlockNumber,
Host,
Iterator,
List,
Port,
TokenAddress,
TokenAmount,
TokenNetworkAddress,
Tuple,
)
log = structlog.get_logger(__name__)
def iwait_and_get(items: Sequence[gevent.Greenlet]) -> None:
"""Iteratively wait and get on passed greenlets.
This ensures exceptions in the greenlets are re-raised as soon as possible.
"""
for item in gevent.iwait(items):
item.get()
def _url_for(apiserver: APIServer, endpoint: str, **kwargs) -> str:
# url_for() expects binary address so we have to convert here
for key, val in kwargs.items():
if isinstance(val, str) and val.startswith("0x"):
kwargs[key] = to_canonical_address(val)
with apiserver.flask_app.app_context():
return url_for(f"v1_resources.{endpoint}", **kwargs)
def start_apiserver(raiden_app: RaidenService, rest_api_port_number: Port) -> APIServer:
raiden_api = RaidenAPI(raiden_app)
rest_api = RestAPI(raiden_api)
api_server = APIServer(
rest_api, config=RestApiConfig(host=Host("localhost"), port=rest_api_port_number)
)
# required for url_for
api_server.flask_app.config["SERVER_NAME"] = f"localhost:{rest_api_port_number}"
api_server.start()
wait_for_listening_port(rest_api_port_number)
return api_server
def start_apiserver_for_network(
raiden_network: List[RaidenService], port_generator: Iterator[Port]
) -> List[APIServer]:
return [start_apiserver(app, next(port_generator)) for app in raiden_network]
def restart_app(app: RaidenService, restart_node: RestartNode) -> RaidenService:
new_transport = MatrixTransport(
config=app.config.transport, environment=app.config.environment_type
)
raiden_event_handler = RaidenEventHandler()
hold_handler = HoldRaidenEventHandler(raiden_event_handler)
app = RaidenService(
config=app.config,
rpc_client=app.rpc_client,
proxy_manager=app.proxy_manager,
query_start_block=BlockNumber(0),
raiden_bundle=RaidenBundle(
app.default_registry,
app.default_secret_registry,
),
services_bundle=app.default_services_bundle,
transport=new_transport,
raiden_event_handler=hold_handler,
message_handler=MessageHandler(),
routing_mode=RoutingMode.PRIVATE,
)
restart_node(app)
return app
def restart_network(
raiden_network: List[RaidenService], restart_node: RestartNode
) -> List[RaidenService]:
for app in raiden_network:
app.stop()
wait_network = (gevent.spawn(restart_app, app, restart_node) for app in raiden_network)
gevent.joinall(set(wait_network), raise_error=True)
new_network = [greenlet.get() for greenlet in wait_network]
return new_network
def restart_network_and_apiservers(
raiden_network: List[RaidenService],
restart_node: RestartNode,
api_servers: List[APIServer],
port_generator: Iterator[Port],
) -> Tuple[List[RaidenService], List[APIServer]]:
"""Stop an app and start it back"""
for rest_api in api_servers:
rest_api.stop()
new_network = restart_network(raiden_network, restart_node)
new_servers = start_apiserver_for_network(new_network, port_generator)
return (new_network, new_servers)
def address_from_apiserver(apiserver: APIServer) -> Address:
return apiserver.rest_api.raiden_api.address
def transfer_and_assert(
server_from: APIServer,
server_to: APIServer,
token_address: TokenAddress,
identifier: int,
amount: TokenAmount,
) -> None:
url = _url_for(
server_from,
"token_target_paymentresource",
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(address_from_apiserver(server_to)),
)
json = {"amount": amount, "identifier": identifier}
log.debug("PAYMENT REQUEST", url=url, json=json)
request = grequests.post(url, json=json)
start = time.monotonic()
response = request.send().response
duration = time.monotonic() - start
log.debug("PAYMENT RESPONSE", url=url, json=json, response=response, duration=duration)
assert getattr(request, "exception", None) is None
assert response is not None
assert response.status_code == HTTPStatus.OK, f"Payment failed, reason: {response.content}"
assert response.headers["Content-Type"] == "application/json"
def sequential_transfers(
server_from: APIServer,
server_to: APIServer,
number_of_transfers: int,
token_address: TokenAddress,
identifier_generator: Iterator[int],
) -> None:
for _ in range(number_of_transfers):
transfer_and_assert(
server_from=server_from,
server_to=server_to,
token_address=token_address,
identifier=next(identifier_generator),
amount=TokenAmount(1),
)
def stress_send_serial_transfers(
rest_apis: List[APIServer],
token_address: TokenAddress,
identifier_generator: Iterator[int],
deposit: TokenAmount,
) -> None:
"""Send `deposit` transfers of value `1` one at a time, without changing
the initial capacity.
"""
pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]]))
# deplete the channels in one direction
for server_from, server_to in pairs:
sequential_transfers(
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit,
token_address=token_address,
identifier_generator=identifier_generator,
)
# deplete the channels in the backwards direction
for server_to, server_from in pairs:
sequential_transfers(
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit * 2,
token_address=token_address,
identifier_generator=identifier_generator,
)
# reset the balances balances by sending the "extra" deposit forward
for server_from, server_to in pairs:
sequential_transfers(
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit,
token_address=token_address,
identifier_generator=identifier_generator,
)
def stress_send_parallel_transfers(
rest_apis: List[APIServer],
token_address: TokenAddress,
identifier_generator: Iterator[int],
deposit: TokenAmount,
) -> None:
"""Send `deposit` transfers in parallel, without changing the initial capacity."""
pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]]))
# deplete the channels in one direction
iwait_and_get(
[
gevent.spawn(
sequential_transfers,
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit,
token_address=token_address,
identifier_generator=identifier_generator,
)
for server_from, server_to in pairs
]
)
# deplete the channels in the backwards direction
iwait_and_get(
[
gevent.spawn(
sequential_transfers,
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit * 2,
token_address=token_address,
identifier_generator=identifier_generator,
)
for server_to, server_from in pairs
]
)
# reset the balances balances by sending the "extra" deposit forward
iwait_and_get(
[
gevent.spawn(
sequential_transfers,
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit,
token_address=token_address,
identifier_generator=identifier_generator,
)
for server_from, server_to in pairs
]
)
def stress_send_and_receive_parallel_transfers(
rest_apis: List[APIServer],
token_address: TokenAddress,
identifier_generator: Iterator[int],
deposit: TokenAmount,
) -> None:
"""Send transfers of value one in parallel"""
pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]]))
forward_transfers = [
gevent.spawn(
sequential_transfers,
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit,
token_address=token_address,
identifier_generator=identifier_generator,
)
for server_from, server_to in pairs
]
backwards_transfers = [
gevent.spawn(
sequential_transfers,
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit,
token_address=token_address,
identifier_generator=identifier_generator,
)
for server_to, server_from in pairs
]
iwait_and_get(forward_transfers + backwards_transfers)
def assert_channels(
raiden_network: List[RaidenService],
token_network_address: TokenNetworkAddress,
deposit: TokenAmount,
) -> None:
pairs = list(zip(raiden_network, raiden_network[1:] + [raiden_network[0]]))
for first, second in pairs:
wait_assert(
assert_synced_channel_state,
token_network_address,
first,
deposit,
[],
second,
deposit,
[],
)
@pytest.mark.skip(reason="flaky, see https://github.com/raiden-network/raiden/issues/4803")
@raise_on_failure
@pytest.mark.parametrize("number_of_nodes", [3])
@pytest.mark.parametrize("number_of_tokens", [1])
@pytest.mark.parametrize("channels_per_node", [2])
@pytest.mark.parametrize("deposit", [2])
@pytest.mark.parametrize("reveal_timeout", [15])
@pytest.mark.parametrize("settle_timeout", [120])
def test_stress(
raiden_network: List[RaidenService],
restart_node: RestartNode,
deposit: TokenAmount,
token_addresses: List[TokenAddress],
port_generator: Iterator[Port],
) -> None:
token_address = token_addresses[0]
rest_apis = start_apiserver_for_network(raiden_network, port_generator)
identifier_generator = count(start=1)
token_network_address = views.get_token_network_address_by_token_address(
views.state_from_raiden(raiden_network[0]),
raiden_network[0].default_registry.address,
token_address,
)
assert token_network_address
for _ in range(2):
assert_channels(raiden_network, token_network_address, deposit)
with watch_for_unlock_failures(*raiden_network):
stress_send_serial_transfers(rest_apis, token_address, identifier_generator, deposit)
raiden_network, rest_apis = restart_network_and_apiservers(
raiden_network, restart_node, rest_apis, port_generator
)
assert_channels(raiden_network, token_network_address, deposit)
with watch_for_unlock_failures(*raiden_network):
stress_send_parallel_transfers(rest_apis, token_address, identifier_generator, deposit)
raiden_network, rest_apis = restart_network_and_apiservers(
raiden_network, restart_node, rest_apis, port_generator
)
assert_channels(raiden_network, token_network_address, deposit)
with watch_for_unlock_failures(*raiden_network):
stress_send_and_receive_parallel_transfers(
rest_apis, token_address, identifier_generator, deposit
)
raiden_network, rest_apis = restart_network_and_apiservers(
raiden_network, restart_node, rest_apis, port_generator
)
restart_network(raiden_network, restart_node)
| 5,439 |
1,131 | <gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Tests for cpu resource limits - Maximum Limits
"""
# Import Local Modules
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.lib.base import (
Account,
ServiceOffering,
VirtualMachine,
Resources,
Domain,
Project
)
from marvin.lib.common import (get_domain,
get_zone,
get_template
)
from marvin.lib.utils import cleanup_resources
class TestMaxCPULimits(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestMaxCPULimits, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.testdata["mode"] = cls.zone.networktype
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.testdata["ostype"]
)
cls.testdata["virtual_machine"]["zoneid"] = cls.zone.id
cls._cleanup = []
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
# Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def createInstance(self, service_off, account=None,
project=None, networks=None, api_client=None):
"""Creates an instance in account"""
if api_client is None:
api_client = self.apiclient
self.debug("Deploying instance")
try:
if account:
vm = VirtualMachine.create(
api_client,
self.testdata["virtual_machine"],
templateid=self.template.id,
accountid=account.name,
domainid=account.domainid,
networkids=networks,
serviceofferingid=service_off.id)
elif project:
vm = VirtualMachine.create(
api_client,
self.testdata["virtual_machine"],
templateid=self.template.id,
projectid=project.id,
networkids=networks,
serviceofferingid=service_off.id)
vms = VirtualMachine.list(api_client, id=vm.id, listall=True)
self.assertIsInstance(vms,
list,
"List VMs should return a valid response")
self.assertEqual(vms[0].state, "Running",
"Vm state should be running after deployment")
return vm
except Exception as e:
self.fail("Failed to deploy an instance: %s" % e)
def setupAccounts(self, account_limit=2, domain_limit=2, project_limit=2):
self.debug("Creating a domain under: %s" % self.domain.name)
self.child_domain = Domain.create(self.apiclient,
services=self.testdata["domain"],
parentdomainid=self.domain.id)
self.debug("domain crated with domain id %s" % self.child_domain.id)
self.child_do_admin = Account.create(self.apiclient,
self.testdata["account"],
admin=True,
domainid=self.child_domain.id)
self.debug("domain admin created for domain id %s" %
self.child_do_admin.domainid)
# Create project as a domain admin
self.project = Project.create(self.apiclient,
self.testdata["project"],
account=self.child_do_admin.name,
domainid=self.child_do_admin.domainid)
# Cleanup created project at end of test
self.cleanup.append(self.project)
# Cleanup accounts created
self.cleanup.append(self.child_do_admin)
self.cleanup.append(self.child_domain)
self.debug("Updating the CPU resource count for domain: %s" %
self.child_domain.name)
# Update resource limits for account 1
responses = Resources.updateLimit(self.apiclient,
resourcetype=8,
max=account_limit,
account=self.child_do_admin.name,
domainid=self.child_do_admin.domainid)
self.debug("CPU Resource count for child domain admin account is now: %s" %
responses.max)
self.debug("Updating the CPU limit for project")
responses = Resources.updateLimit(self.apiclient,
resourcetype=8,
max=project_limit,
projectid=self.project.id)
self.debug("CPU Resource count for project is now")
self.debug(responses.max)
self.debug("Updating the CPU limit for domain only")
responses = Resources.updateLimit(self.apiclient,
resourcetype=8,
max=domain_limit,
domainid=self.child_domain.id)
self.debug("CPU Resource count for domain %s with id %s is now %s" %
(responses.domain, responses.domainid, responses.max))
return
@attr(tags=["advanced", "advancedns","simulator"], required_hardware="false")
def test_01_deploy_vm_domain_limit_reached(self):
"""Test Try to deploy VM with admin account where account has not used
the resources but @ domain they are not available"""
# Validate the following
# 1. Try to deploy VM with admin account where account has not used the
# resources but @ domain they are not available
# 2. Deploy VM should error out saying ResourceAllocationException
# with "resource limit exceeds"
self.debug("Creating service offering with 3 CPU cores")
so = self.testdata["service_offering"]
so["cpunumber"] = 3
self.service_offering = ServiceOffering.create(
self.apiclient,
so
)
# Adding to cleanup list after execution
self.cleanup.append(self.service_offering)
self.debug("Setting up account and domain hierarchy")
self.setupAccounts(account_limit=4, domain_limit=2)
api_client_admin = self.testClient.getUserApiClient(
UserName=self.child_do_admin.name,
DomainName=self.child_do_admin.domain)
with self.assertRaises(Exception):
self.createInstance(account=self.child_do_admin,
service_off=self.service_offering, api_client=api_client_admin)
return
@attr(tags=["advanced", "advancedns","simulator"], required_hardware="false")
def test_02_deploy_vm_account_limit_reached(self):
"""Test Try to deploy VM with admin account where account has used
the resources but @ domain they are available"""
# Validate the following
# 1. Try to deploy VM with admin account where account has used the
# resources but @ domain they are available
# 2. Deploy VM should error out saying ResourceAllocationException
# with "resource limit exceeds"
self.debug("Creating service offering with 4 CPU cores")
self.service_offering = ServiceOffering.create(
self.apiclient,
self.testdata["service_offering_multiple_cores"]
)
# Adding to cleanup list after execution
self.cleanup.append(self.service_offering)
self.debug("Setting up account and domain hierarchy")
self.setupAccounts(account_limit=6, domain_limit=8)
api_client_admin = self.testClient.getUserApiClient(
UserName=self.child_do_admin.name,
DomainName=self.child_do_admin.domain)
self.debug("Deploying instance with account: %s" %
self.child_do_admin.name)
self.createInstance(account=self.child_do_admin,
service_off=self.service_offering, api_client=api_client_admin)
self.debug("Deploying instance when CPU limit is reached in account")
with self.assertRaises(Exception):
self.createInstance(account=self.chid_do_admin,
service_off=self.service_offering, api_client=api_client_admin)
return
@attr(tags=["advanced", "advancedns","simulator"], required_hardware="false")
def test_03_deploy_vm_project_limit_reached(self):
"""Test TTry to deploy VM with admin account where account has not used
the resources but @ project they are not available"""
# Validate the following
# 1. Try to deploy VM with admin account where account has not used the
# resources but @ project they are not available
# 2. Deploy VM should error out saying ResourceAllocationException
# with "resource limit exceeds"
self.debug("Creating service offering with 3 CPU cores")
so = self.testdata["service_offering"]
so["cpunumber"] = 3
self.service_offering = ServiceOffering.create(
self.apiclient,
so
)
# Adding to cleanup list after execution
self.cleanup.append(self.service_offering)
self.debug("Setting up account and domain hierarchy")
self.setupAccounts(account_limit=4, domain_limit=4, project_limit=2)
api_client_admin = self.testClient.getUserApiClient(
UserName=self.child_do_admin.name,
DomainName=self.child_do_admin.domain)
self.debug("Deploying instance in account 2 when CPU limit is reached")
with self.assertRaises(Exception):
self.createInstance(project=self.project,
service_off=self.service_offering, api_client=api_client_admin)
return
@attr(tags=["advanced", "advancedns","simulator"], required_hardware="false")
def test_04_deployVm__account_limit_reached(self):
"""Test Try to deploy VM with admin account where account has used
the resources but @ project they are available"""
# Validate the following
# 1. Try to deploy VM with admin account where account has used the
# resources but @ project they are not available
# 2. Deploy VM should error out saying ResourceAllocationException
# with "resource limit exceeds"
self.debug("Creating service offering with 4 CPU cores")
self.service_offering = ServiceOffering.create(
self.apiclient,
self.testdata["service_offering_multiple_cores"]
)
# Adding to cleanup list after execution
self.cleanup.append(self.service_offering)
self.debug("Setting up account and domain hierarchy")
self.setupAccounts(account_limit=5, domain_limit=5, project_limit=5)
api_client_admin = self.testClient.getUserApiClient(
UserName=self.child_do_admin.name,
DomainName=self.child_do_admin.domain)
self.debug("Deploying instance with account: %s" %
self.child_do_admin.name)
self.createInstance(account=self.child_do_admin,
service_off=self.service_offering, api_client=api_client_admin)
self.debug("Deploying instance in project when CPU limit is reached in account")
with self.assertRaises(Exception):
self.createInstance(project=self.project,
service_off=self.service_offering)
return
| 5,782 |
649 | package net.thucydides.core.fixtureservices;
import org.openqa.selenium.remote.DesiredCapabilities;
public class SampleFixtureService implements FixtureService {
@Override
public void setup() {
}
@Override
public void shutdown() {
}
@Override
public void addCapabilitiesTo(DesiredCapabilities capabilities) {
}
}
| 120 |
2,177 | <gh_stars>1000+
import os
import alp
import re
from alp.item import Item as I
import codecs
import json
def find_projects():
q = alp.args()[0] if len(alp.args()) else ""
if os.path.exists("/Applications/Sublime Text.app"):
session_path = "~/Library/Application Support/Sublime Text 3/Local/Session.sublime_session"
session_path = os.path.expanduser(session_path)
elif os.path.exists("/Applications/Sublime Text 2.app"):
session_path = "~/Library/Application Support/Sublime Text 2/Settings/Session.sublime_session"
session_path = os.path.expanduser(session_path)
else:
alp.feedback(I(title="No Sublime Installation",
subtitle="Sublime Text 2 or 3 is required.",
valid=False))
return
with codecs.open(session_path, "r", "utf-8") as f:
projects = json.load(f)["workspaces"]["recent_workspaces"]
projectNames = []
for project in projects:
projPath = project
(projPath, projFile) = os.path.split(projPath)
(projTitle, _) = projFile.rsplit(".", 1)
projPath = os.path.join(projPath, projTitle + ".sublime-project")
projectNames.append((projPath, projTitle))
items = []
for path, title in projectNames:
if re.match("(?i).*%s.*" % q, path) or re.match("(?i).*%s.*" % q, title) or len(q) == 0:
items.append(I(title=title,
subtitle=path,
arg=path,
valid=True,
uid=path))
if len(items):
alp.feedback(items)
else:
alp.feedback(I(title="No Matches",
subtitle="No recent projects matched your query.",
valid=False))
if __name__ == "__main__":
find_projects()
| 877 |
1,235 | <reponame>CodeLingoBot/learn
#include <iostream>
using namespace std;
int main()
{
for ( int i = 1; i < 101; i++ )
{
if ( i%15 == 0 )
{
cout << "FizzBuzz" << endl;
}
else if ( i%3 == 0 )
{
cout << "Fizz" << endl;
}
else if ( i%5 == 0 )
{
cout << "Buzz" << endl;
}
else
{
cout << i << endl;
}
}
}
// ...
// Buzz
// 41
// Fizz
// 43
// 44
// FizzBuzz
// 46
// ...
| 216 |
1,083 | <reponame>agxmaster/polarphp<filename>include/polarphp/parser/LexerFlags.h
// This source file is part of the polarphp.org open source project
//
// Copyright (c) 2017 - 2019 polarphp software foundation
// Copyright (c) 2017 - 2019 zzu_softboy <<EMAIL>>
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://polarphp.org/LICENSE.txt for license information
// See https://polarphp.org/CONTRIBUTORS.txt for the list of polarphp project authors
//
// Created by polarboy on 2019/07/06.
#ifndef POLARPHP_PARSER_LEXER_FLAGS_H
#define POLARPHP_PARSER_LEXER_FLAGS_H
#include "polarphp/basic/FlagSet.h"
#include <cstdint>
namespace polar::parser {
using polar::FlagSet;
class LexerFlags final : public FlagSet<std::uint16_t>
{
protected:
enum {
LexingBinaryString,
HeredocScanAhead,
IncrementLineNumber,
HeredocIndentationUsesSpaces,
ReserveHeredocSpaces,
LexExceptionOccurred,
CheckHeredocIndentation
};
public:
explicit LexerFlags(std::uint16_t bits)
: FlagSet(bits)
{}
constexpr LexerFlags()
{}
FLAGSET_DEFINE_FLAG_ACCESSORS(LexingBinaryString, isLexingBinaryString, setLexingBinaryString)
FLAGSET_DEFINE_FLAG_ACCESSORS(HeredocScanAhead, isHeredocScanAhead, setHeredocScanAhead)
FLAGSET_DEFINE_FLAG_ACCESSORS(IncrementLineNumber, isIncrementLineNumber, setIncrementLineNumber)
FLAGSET_DEFINE_FLAG_ACCESSORS(HeredocIndentationUsesSpaces, isHeredocIndentationUsesSpaces, setHeredocIndentationUsesSpaces)
FLAGSET_DEFINE_FLAG_ACCESSORS(LexExceptionOccurred, isLexExceptionOccurred, setLexExceptionOccurred)
FLAGSET_DEFINE_FLAG_ACCESSORS(ReserveHeredocSpaces, isReserveHeredocSpaces, setReserveHeredocSpaces)
FLAGSET_DEFINE_FLAG_ACCESSORS(CheckHeredocIndentation, isCheckHeredocIndentation, setCheckHeredocIndentation)
FLAGSET_DEFINE_EQUALITY(LexerFlags)
};
} // polar::parser
#endif // POLARPHP_PARSER_LEXER_FLAGS_H
| 731 |
1,018 | <reponame>rajdavies/glowroot
/*
* Copyright 2016-2018 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.glowroot.agent.impl;
import java.util.List;
import java.util.Map;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.junit.jupiter.api.Test;
import org.glowroot.agent.model.QueryCollector;
import org.glowroot.agent.model.SharedQueryTextCollection;
import org.glowroot.wire.api.model.AggregateOuterClass.Aggregate;
import static org.assertj.core.api.Assertions.assertThat;
public class QueryCollectorTest {
@Test
public void testAddInAscendingOrder() throws Exception {
QueryCollector queries = new QueryCollector(100, 4);
for (int i = 1; i <= 300; i++) {
queries.mergeQuery("SQL", Integer.toString(i), i, 1, true, 1, false);
}
test(queries);
}
@Test
public void testAddInDescendingOrder() throws Exception {
QueryCollector queries = new QueryCollector(100, 4);
for (int i = 300; i > 0; i--) {
queries.mergeQuery("SQL", Integer.toString(i), i, 1, true, 1, false);
}
test(queries);
}
private void test(QueryCollector collector) throws Exception {
// when
SharedQueryTextCollectionImpl sharedQueryTextCollection =
new SharedQueryTextCollectionImpl();
List<Aggregate.Query> queries =
collector.toAggregateProto(sharedQueryTextCollection, false);
// then
assertThat(queries).hasSize(101);
Aggregate.Query topQuery = queries.get(0);
assertThat(
sharedQueryTextCollection.sharedQueryTexts.get(topQuery.getSharedQueryTextIndex()))
.isEqualTo("LIMIT EXCEEDED BUCKET");
int limitExceededBucketTotalNanos = 0;
for (int i = 1; i <= 200; i++) {
limitExceededBucketTotalNanos += i;
}
assertThat(topQuery.getTotalDurationNanos()).isEqualTo(limitExceededBucketTotalNanos);
assertThat(queries.get(1).getTotalDurationNanos()).isEqualTo(300);
assertThat(queries.get(100).getTotalDurationNanos()).isEqualTo(201);
}
private static class SharedQueryTextCollectionImpl implements SharedQueryTextCollection {
private final Map<String, Integer> sharedQueryTextIndexes = Maps.newHashMap();
private List<String> sharedQueryTexts = Lists.newArrayList();
@Override
public int getSharedQueryTextIndex(String queryText) {
Integer sharedQueryTextIndex = sharedQueryTextIndexes.get(queryText);
if (sharedQueryTextIndex == null) {
sharedQueryTextIndex = sharedQueryTextIndexes.size();
sharedQueryTextIndexes.put(queryText, sharedQueryTextIndex);
sharedQueryTexts.add(queryText);
}
return sharedQueryTextIndex;
}
}
}
| 1,311 |
1,707 | #pragma once
//------------------------------------------------------------------------------
/**
@class Oryol::_priv::ioRouter
@ingroup IO
@brief route IO requests to ioWorkers
*/
#include "Core/Containers/StaticArray.h"
#include "IO/private/ioPointers.h"
#include "IO/private/ioWorker.h"
namespace Oryol {
namespace _priv {
class ioRouter {
public:
/// setup the router
void setup(const ioPointers& ptrs);
/// discard the router
void discard();
/// route a ioMsg to one or more workers
void put(const Ptr<ioMsg>& msg);
/// perform per-frame work
void doWork();
static const int NumWorkers = 4;
int curWorker = 0;
StaticArray<ioWorker, NumWorkers> workers;
};
} // namespace _priv
} // namespace Oryol
| 262 |
2,151 | <filename>gpu/command_buffer/service/gles2_cmd_validation_autogen.h<gh_stars>1000+
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is auto-generated from
// gpu/command_buffer/build_gles2_cmd_buffer.py
// It's formatted by clang-format using chromium coding style:
// clang-format -i -style=chromium filename
// DO NOT EDIT!
#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_VALIDATION_AUTOGEN_H_
#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_VALIDATION_AUTOGEN_H_
ValueValidator<GLenum> attachment;
ValueValidator<GLenum> attachment_query;
class BackbufferAttachmentValidator {
public:
bool IsValid(const GLenum value) const;
};
BackbufferAttachmentValidator backbuffer_attachment;
class BlitFilterValidator {
public:
bool IsValid(const GLenum value) const;
};
BlitFilterValidator blit_filter;
class BufferModeValidator {
public:
bool IsValid(const GLenum value) const;
};
BufferModeValidator buffer_mode;
class BufferParameterValidator {
public:
bool IsValid(const GLenum value) const;
BufferParameterValidator();
void SetIsES3(bool is_es3) { is_es3_ = is_es3; }
private:
bool is_es3_;
};
BufferParameterValidator buffer_parameter;
class BufferParameter64Validator {
public:
bool IsValid(const GLenum value) const;
};
BufferParameter64Validator buffer_parameter_64;
class BufferTargetValidator {
public:
bool IsValid(const GLenum value) const;
BufferTargetValidator();
void SetIsES3(bool is_es3) { is_es3_ = is_es3; }
private:
bool is_es3_;
};
BufferTargetValidator buffer_target;
class BufferUsageValidator {
public:
bool IsValid(const GLenum value) const;
BufferUsageValidator();
void SetIsES3(bool is_es3) { is_es3_ = is_es3; }
private:
bool is_es3_;
};
BufferUsageValidator buffer_usage;
ValueValidator<GLenum> bufferfi;
class BufferfvValidator {
public:
bool IsValid(const GLenum value) const;
};
BufferfvValidator bufferfv;
class BufferivValidator {
public:
bool IsValid(const GLenum value) const;
};
BufferivValidator bufferiv;
ValueValidator<GLenum> bufferuiv;
ValueValidator<GLenum> capability;
class CmpFunctionValidator {
public:
bool IsValid(const GLenum value) const;
};
CmpFunctionValidator cmp_function;
ValueValidator<GLenum> compressed_texture_format;
ValueValidator<GLenum> coverage_modulation_components;
class DrawModeValidator {
public:
bool IsValid(const GLenum value) const;
};
DrawModeValidator draw_mode;
ValueValidator<GLenum> dst_blend_factor;
ValueValidator<GLenum> equation;
class FaceModeValidator {
public:
bool IsValid(const GLenum value) const;
};
FaceModeValidator face_mode;
class FaceTypeValidator {
public:
bool IsValid(const GLenum value) const;
};
FaceTypeValidator face_type;
ValueValidator<GLenum> framebuffer_parameter;
ValueValidator<GLenum> framebuffer_target;
ValueValidator<GLenum> g_l_state;
class GetMaxIndexTypeValidator {
public:
bool IsValid(const GLenum value) const;
};
GetMaxIndexTypeValidator get_max_index_type;
ValueValidator<GLenum> get_tex_param_target;
class HintModeValidator {
public:
bool IsValid(const GLenum value) const;
};
HintModeValidator hint_mode;
ValueValidator<GLenum> hint_target;
ValueValidator<GLenum> image_internal_format;
ValueValidator<GLenum> index_type;
class IndexedBufferTargetValidator {
public:
bool IsValid(const GLenum value) const;
};
IndexedBufferTargetValidator indexed_buffer_target;
ValueValidator<GLenum> indexed_g_l_state;
class InternalFormatParameterValidator {
public:
bool IsValid(const GLenum value) const;
};
InternalFormatParameterValidator internal_format_parameter;
class MapBufferAccessValidator {
public:
bool IsValid(const GLenum value) const;
};
MapBufferAccessValidator map_buffer_access;
class MatrixModeValidator {
public:
bool IsValid(const GLenum value) const;
};
MatrixModeValidator matrix_mode;
class PathCoordTypeValidator {
public:
bool IsValid(const GLenum value) const;
};
PathCoordTypeValidator path_coord_type;
class PathCoverModeValidator {
public:
bool IsValid(const GLenum value) const;
};
PathCoverModeValidator path_cover_mode;
class PathFillModeValidator {
public:
bool IsValid(const GLenum value) const;
};
PathFillModeValidator path_fill_mode;
class PathFragmentInputGenModeValidator {
public:
bool IsValid(const GLenum value) const;
};
PathFragmentInputGenModeValidator path_fragment_input_gen_mode;
class PathInstancedCoverModeValidator {
public:
bool IsValid(const GLenum value) const;
};
PathInstancedCoverModeValidator path_instanced_cover_mode;
class PathNameTypeValidator {
public:
bool IsValid(const GLenum value) const;
};
PathNameTypeValidator path_name_type;
class PathParameterValidator {
public:
bool IsValid(const GLenum value) const;
};
PathParameterValidator path_parameter;
class PathParameterCapValuesValidator {
public:
bool IsValid(const GLint value) const;
};
PathParameterCapValuesValidator path_parameter_cap_values;
class PathParameterJoinValuesValidator {
public:
bool IsValid(const GLint value) const;
};
PathParameterJoinValuesValidator path_parameter_join_values;
class PathTransformTypeValidator {
public:
bool IsValid(const GLenum value) const;
};
PathTransformTypeValidator path_transform_type;
ValueValidator<GLenum> pixel_store;
class PixelStoreAlignmentValidator {
public:
bool IsValid(const GLint value) const;
};
PixelStoreAlignmentValidator pixel_store_alignment;
ValueValidator<GLenum> pixel_type;
class ProgramParameterValidator {
public:
bool IsValid(const GLenum value) const;
ProgramParameterValidator();
void SetIsES3(bool is_es3) { is_es3_ = is_es3; }
private:
bool is_es3_;
};
ProgramParameterValidator program_parameter;
class QueryObjectParameterValidator {
public:
bool IsValid(const GLenum value) const;
};
QueryObjectParameterValidator query_object_parameter;
class QueryTargetValidator {
public:
bool IsValid(const GLenum value) const;
};
QueryTargetValidator query_target;
ValueValidator<GLenum> read_buffer;
ValueValidator<GLenum> read_pixel_format;
ValueValidator<GLenum> read_pixel_type;
ValueValidator<GLenum> render_buffer_format;
ValueValidator<GLenum> render_buffer_parameter;
ValueValidator<GLenum> render_buffer_target;
class ResetStatusValidator {
public:
bool IsValid(const GLenum value) const;
};
ResetStatusValidator reset_status;
class SamplerParameterValidator {
public:
bool IsValid(const GLenum value) const;
};
SamplerParameterValidator sampler_parameter;
ValueValidator<GLenum> shader_binary_format;
class ShaderParameterValidator {
public:
bool IsValid(const GLenum value) const;
};
ShaderParameterValidator shader_parameter;
class ShaderPrecisionValidator {
public:
bool IsValid(const GLenum value) const;
};
ShaderPrecisionValidator shader_precision;
class ShaderTypeValidator {
public:
bool IsValid(const GLenum value) const;
};
ShaderTypeValidator shader_type;
ValueValidator<GLenum> src_blend_factor;
class StencilOpValidator {
public:
bool IsValid(const GLenum value) const;
};
StencilOpValidator stencil_op;
class StringTypeValidator {
public:
bool IsValid(const GLenum value) const;
};
StringTypeValidator string_type;
class SwapBuffersFlagsValidator {
public:
bool IsValid(const GLbitfield value) const;
};
SwapBuffersFlagsValidator swap_buffers_flags;
ValueValidator<GLbitfield> sync_flush_flags;
class SyncParameterValidator {
public:
bool IsValid(const GLenum value) const;
};
SyncParameterValidator sync_parameter;
class Texture3DTargetValidator {
public:
bool IsValid(const GLenum value) const;
};
Texture3DTargetValidator texture_3_d_target;
ValueValidator<GLenum> texture_bind_target;
class TextureCompareFuncValidator {
public:
bool IsValid(const GLenum value) const;
};
TextureCompareFuncValidator texture_compare_func;
ValueValidator<GLenum> texture_compare_mode;
ValueValidator<GLenum> texture_depth_renderable_internal_format;
ValueValidator<GLenum> texture_format;
ValueValidator<GLenum> texture_internal_format;
ValueValidator<GLenum> texture_internal_format_storage;
class TextureMagFilterModeValidator {
public:
bool IsValid(const GLenum value) const;
};
TextureMagFilterModeValidator texture_mag_filter_mode;
class TextureMinFilterModeValidator {
public:
bool IsValid(const GLenum value) const;
};
TextureMinFilterModeValidator texture_min_filter_mode;
ValueValidator<GLenum> texture_parameter;
ValueValidator<GLenum> texture_sized_color_renderable_internal_format;
ValueValidator<GLenum> texture_sized_texture_filterable_internal_format;
class TextureSrgbDecodeExtValidator {
public:
bool IsValid(const GLenum value) const;
};
TextureSrgbDecodeExtValidator texture_srgb_decode_ext;
ValueValidator<GLenum> texture_stencil_renderable_internal_format;
class TextureSwizzleValidator {
public:
bool IsValid(const GLenum value) const;
};
TextureSwizzleValidator texture_swizzle;
ValueValidator<GLenum> texture_target;
ValueValidator<GLenum> texture_unsized_internal_format;
class TextureUsageValidator {
public:
bool IsValid(const GLenum value) const;
};
TextureUsageValidator texture_usage;
class TextureWrapModeValidator {
public:
bool IsValid(const GLenum value) const;
};
TextureWrapModeValidator texture_wrap_mode;
ValueValidator<GLenum> transform_feedback_bind_target;
class TransformFeedbackPrimitiveModeValidator {
public:
bool IsValid(const GLenum value) const;
};
TransformFeedbackPrimitiveModeValidator transform_feedback_primitive_mode;
class UniformBlockParameterValidator {
public:
bool IsValid(const GLenum value) const;
};
UniformBlockParameterValidator uniform_block_parameter;
class UniformParameterValidator {
public:
bool IsValid(const GLenum value) const;
};
UniformParameterValidator uniform_parameter;
class VertexAttribITypeValidator {
public:
bool IsValid(const GLenum value) const;
};
VertexAttribITypeValidator vertex_attrib_i_type;
ValueValidator<GLenum> vertex_attrib_type;
ValueValidator<GLenum> vertex_attribute;
ValueValidator<GLenum> vertex_pointer;
class WindowRectanglesModeValidator {
public:
bool IsValid(const GLenum value) const;
};
WindowRectanglesModeValidator window_rectangles_mode;
#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_VALIDATION_AUTOGEN_H_
| 3,228 |
348 | {"nom":"Tence","circ":"1ère circonscription","dpt":"Haute-Loire","inscrits":2501,"abs":1256,"votants":1245,"blancs":33,"nuls":27,"exp":1185,"res":[{"nuance":"LR","nom":"<NAME>","voix":739},{"nuance":"REM","nom":"<NAME>","voix":446}]} | 94 |
1,891 | <reponame>appdevzhang/AndroidDevByArt-Res
#include "com_ryg_JniTest.h"
#include <stdio.h>
JNIEXPORT jstring JNICALL Java_com_ryg_JniTest_get(JNIEnv *env, jobject thiz) {
printf("invoke get from C\n");
return (*env)->NewStringUTF(env, "Hello from JNI !");
}
JNIEXPORT void JNICALL Java_com_ryg_JniTest_set(JNIEnv *env, jobject thiz, jstring string) {
printf("invoke set from C\n");
char* str = (char*)(*env)->GetStringUTFChars(env,string,NULL);
printf("%s\n", str);
(*env)->ReleaseStringUTFChars(env, string, str);
} | 233 |
921 | // Copyright (c) 2015-2020 <NAME> <<EMAIL>> Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.vladsch.md.nav.actions.api;
import com.intellij.openapi.editor.Editor;
import com.intellij.openapi.extensions.ExtensionPointName;
import com.vladsch.flexmark.util.format.CharWidthProvider;
import com.vladsch.md.nav.actions.handlers.util.CaretContextInfo;
import com.vladsch.md.nav.actions.handlers.util.ListItemContext;
import com.vladsch.md.nav.actions.handlers.util.ParagraphContext;
import com.vladsch.md.nav.actions.handlers.util.PsiEditContext;
import com.vladsch.md.nav.actions.handlers.util.WrappingContext;
import com.vladsch.md.nav.psi.element.MdListImpl;
import com.vladsch.md.nav.psi.element.MdListItemImpl;
import com.vladsch.md.nav.util.MdExtensions;
import com.vladsch.plugin.util.LazyComputable;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
public interface MdElementContextInfoProvider {
ExtensionPointName<MdElementContextInfoProvider> EP_NAME = ExtensionPointName.create("com.vladsch.idea.multimarkdown.elementContextInfoProvider");
MdExtensions<MdElementContextInfoProvider> EXTENSIONS = new MdExtensions<>(EP_NAME, new MdElementContextInfoProvider[0]);
LazyComputable<MdElementContextInfoProvider> PROVIDER = new LazyComputable<>(() -> {
for (MdElementContextInfoProvider provider : EXTENSIONS.getValue()) {
return provider;
}
// use default implementation
return new MdElementContextInfoProvider() {
@NotNull
@Override
public ListItemContext getListItemContext(@NotNull CaretContextInfo context, @NotNull MdListImpl listElement, @NotNull MdListItemImpl listItemElement, int lineOffset, boolean isEmptyItem, boolean isTaskItem, boolean isItemDone, @NotNull WrappingContext wrappingContext) {
return new ListItemContext(context, listElement, listItemElement, lineOffset, isEmptyItem, isTaskItem, isItemDone, wrappingContext);
}
@NotNull
@Override
public CharWidthProvider getCharWidthProvider(@NotNull PsiEditContext editContext, @NotNull Editor editor, int startOffset, int endOffset) {
return CharWidthProvider.NULL;
}
@Nullable
@Override
public ParagraphContext getParagraphContext(@NotNull CaretContextInfo context) {
return ParagraphContext.Companion.createContext(context);
}
};
});
@NotNull
ListItemContext getListItemContext(
@NotNull CaretContextInfo context,
@NotNull MdListImpl listElement,
@NotNull MdListItemImpl listItemElement,
int lineOffset,
boolean isEmptyItem,
boolean isTaskItem,
boolean isItemDone,
@NotNull WrappingContext wrappingContext
);
@Nullable
ParagraphContext getParagraphContext(@NotNull CaretContextInfo context);
@NotNull
CharWidthProvider getCharWidthProvider(@NotNull PsiEditContext editContext, @NotNull Editor editor, int startOffset, int endOffset);
}
| 1,183 |
1,443 | <gh_stars>1000+
{
"copyright": "<NAME>",
"url": "http://danreev.es",
"email": "<EMAIL>",
"format": "html"
}
| 54 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.