max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
2,542 | <filename>src/prod/src/Hosting2/OverlayNetworkResourceProvider.h
// ------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License (MIT). See License.txt in the repo root for license information.
// ------------------------------------------------------------
#pragma once
namespace Hosting2
{
// This class provides the following features:
// 1) Encapsulates the overlay IPAM client.
// 2) Exposes apis such as acquire and release overlay network resources (ip and mac address pairs).
class OverlayNetworkResourceProvider :
public Common::RootedObject,
public Common::FabricComponent,
private Common::TextTraceComponent<Common::TraceTaskCodes::Hosting>
{
DENY_COPY(OverlayNetworkResourceProvider);
public:
OverlayNetworkResourceProvider(
Common::ComponentRootSPtr const & root,
__in OverlayNetworkDefinitionSPtr networkDefinition,
InternalReplenishNetworkResourcesCallback const & internalReplenishNetworkResourcesCallback,
GhostChangeCallback const & ghostChangeCallback);
// Constructor used for testing.
// First parameter is the mock implementation of IIPAM.
// Second parameter is the parent object for this instance.
OverlayNetworkResourceProvider(
Common::ComponentRootSPtr const & root,
__in IOverlayIPAMSPtr ipamClient,
__in OverlayNetworkDefinitionSPtr networkDefinition,
InternalReplenishNetworkResourcesCallback const & internalReplenishNetworkResourcesCallback,
GhostChangeCallback const & ghostChangeCallback);
virtual ~OverlayNetworkResourceProvider();
// Get a flag indicating if the ipam client has been successfully initialized.
__declspec(property(get = get_Initialized)) bool Initialized;
bool get_Initialized() const { return this->ipamInitialized_; };
// AcquireNetworkResource: Acquire network resources for all code packages passed in. If there is a failure
// we release all network resources acquired so far.
//
// The arguments are:
// nodeId: Id of the node.
// servicePackageId: Id of the service package being deployed.
// codePackageNames: Code packages being deployed that are part of the service package.
//
// Returns:
// An error code indicating if the acquisition was successful.
//
Common::AsyncOperationSPtr BeginAcquireNetworkResources(
std::wstring const & nodeId,
std::wstring const & servicePackageId,
std::vector<std::wstring> const & codePackageNames,
Common::TimeSpan const timeout,
Common::AsyncCallback const & callback,
Common::AsyncOperationSPtr const & parent);
Common::ErrorCode EndAcquireNetworkResources(
Common::AsyncOperationSPtr const & operation,
__out std::vector<std::wstring> & assignedNetworkResources);
// ReleaseNetworkResources: Releases network resources for all code packages in the service package for the given node.
//
// The arguments are:
// nodeId: Id of the node.
// servicePackageId: Id of the service package being deployed.
//
// Returns:
// An error code indicating if the release was successful.
//
Common::AsyncOperationSPtr BeginReleaseNetworkResources(
std::wstring const & nodeId,
std::wstring const & servicePackageId,
Common::TimeSpan const timeout,
Common::AsyncCallback const & callback,
Common::AsyncOperationSPtr const & parent);
Common::ErrorCode EndReleaseNetworkResources(
Common::AsyncOperationSPtr const & operation);
// ReleaseAllNetworkResourcesForNode: Release all network resources assigned to the node when node crashes/fails.
//
// The arguments are:
// nodeId: Id of the node.
//
// Returns:
// An error code indicating if the release was successful.
//
Common::AsyncOperationSPtr BeginReleaseAllNetworkResourcesForNode(
std::wstring const & nodeId,
Common::TimeSpan const timeout,
Common::AsyncCallback const & callback,
Common::AsyncOperationSPtr const & parent);
Common::ErrorCode EndReleaseAllNetworkResourcesForNode(
Common::AsyncOperationSPtr const & operation);
// GetReservedCodePackages: Get a copy of a map of reserved ids and code packages.
//
// The arguments are:
// reservedCodePackages: Updated with a reference to a copy of the internal reservedCodePackages map.
// reservationIdCodePackageMap: Updated with a reference to a copy of the internal reservationIdCodePackageMap map.
//
void GetReservedCodePackages(std::map<std::wstring, std::map<std::wstring, vector<std::wstring>>> & reservedCodePackages,
std::map<std::wstring, std::wstring> & reservationIdCodePackageMap);
// OnNewIpamData: This method is called when network resource data has been
// updated by the Network Inventory Manager. It is responsible for maintaining
// a running list of network resources that can be applied to the reservation pool.
//
// The arguments are:
// ipMacAddressMapToBeAdded: The collection of ip/mac pairs to be added.
// ipMacAddressMapToBeRemoved: The collection of ip/mac pairs to be deleted.
//
void OnNewIpamData(std::map<std::wstring, std::wstring> const & ipMacAddressMapToBeAdded,
std::map<std::wstring, std::wstring> const & ipMacAddressMapToBeRemoved);
protected:
Common::ErrorCode OnOpen();
Common::ErrorCode OnClose();
void OnAbort();
private:
class AcquireNetworkResourcesAsyncOperation;
class ReleaseNetworkResourcesAsyncOperation;
class ReleaseAllNetworkResourcesForNodeAsyncOperation;
// Populate ip/mac pair into network resources set
std::unordered_set<OverlayNetworkResourceSPtr> PopulateNetworkResources(std::map<std::wstring, std::wstring> const & ipMacAddressMap);
// Lock used for access to network resources
Common::ExclusiveLock networkResourceProviderLock_;
// Overlay network definition
OverlayNetworkDefinitionSPtr networkDefinition_;
// Indicator for ipam client initialization
bool ipamInitialized_;
// Overlay IPAM client interface
IOverlayIPAMSPtr ipam_;
// Mapping of node id, service package id and code packages.
std::map<std::wstring, std::map<std::wstring, vector<std::wstring>>> reservedCodePackages_;
// Mapping of reservation id and code package
std::map<std::wstring, std::wstring> reservationIdCodePackageMap_;
// Overlay network manager callback to replenish network resources
InternalReplenishNetworkResourcesCallback internalReplenishNetworkResourcesCallback_;
// This contains a reference to the method to call whenever the set of
// ghost reservations changes.
//
GhostChangeCallback ghostChangeCallback_;
};
} | 3,053 |
1,056 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.modules.bugzilla.kenai;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.eclipse.mylyn.internal.bugzilla.core.RepositoryConfiguration;
import org.netbeans.modules.bugzilla.repository.BugzillaConfiguration;
import org.netbeans.modules.bugzilla.repository.BugzillaRepository;
import org.netbeans.modules.bugzilla.util.BugzillaUtil;
/**
*
* @author <NAME>
*/
public class KenaiConfiguration extends BugzillaConfiguration {
private List<String> products;
private KenaiRepository repository;
/** one instance for all kenai repositories on on each kenai site */
private static Map<String, RepositoryConfiguration> rcs;
public KenaiConfiguration(KenaiRepository repository, String product) {
this.repository = repository;
ArrayList<String> l = new ArrayList<String>();
l.add(product);
this.products = Collections.unmodifiableList(l);
}
@Override
public List<String> getProducts() {
if(!BugzillaUtil.isNbRepository(repository)) {
return products;
} else {
return super.getProducts();
}
}
@Override
public List<String> getComponents(String product) {
return super.getComponents(product);
}
@Override
public List<String> getVersions(String product) {
return super.getVersions(product);
}
void reset() {
if(rcs != null) {
rcs.remove(repository.getUrl());
}
}
@Override
protected RepositoryConfiguration getRepositoryConfiguration(BugzillaRepository repository, boolean forceRefresh) {
if(rcs == null) {
rcs = new HashMap<String, RepositoryConfiguration>(1);
}
RepositoryConfiguration rc = rcs.get(repository.getUrl());
if(rc == null || forceRefresh) {
rc = super.getRepositoryConfiguration(repository, forceRefresh);
rcs.put(repository.getUrl(), rc);
}
if(rc != null && (!forceRefresh && !hasProduct(rc))) {
// mylyn is cashing the configuration for us so in case
// forceRefresh=false and it doesn't contain the given project
// we have to force refresh it one more time to get the needed
// project data from the server
forceRefresh = true;
rc = super.getRepositoryConfiguration(repository, forceRefresh);
}
return rc;
}
private boolean hasProduct(RepositoryConfiguration rc) {
List<String> knownProducts = rc.getProducts();
for (String product : products) {
if(!knownProducts.contains(product)) {
return false;
}
}
return true;
}
}
| 1,292 |
763 | package org.batfish.datamodel.flow;
import static org.hamcrest.Matchers.equalTo;
import static org.junit.Assert.assertThat;
import com.google.common.testing.EqualsTester;
import org.batfish.common.util.BatfishObjectMapper;
import org.batfish.datamodel.Ip;
import org.batfish.datamodel.collections.NodeInterfacePair;
import org.batfish.datamodel.flow.DeliveredStep.DeliveredStepDetail;
import org.junit.Test;
public class DeliveredStepTest {
@Test
public void testJsonSerialization() {
DeliveredStep step =
DeliveredStep.builder()
.setAction(StepAction.DELIVERED_TO_SUBNET)
.setDetail(
DeliveredStepDetail.builder()
.setOutputInterface(NodeInterfacePair.of("node", "iface"))
.setResolvedNexthopIp(Ip.parse("1.1.1.1"))
.build())
.build();
DeliveredStep clonedStep = BatfishObjectMapper.clone(step, DeliveredStep.class);
assertThat(clonedStep.getAction(), equalTo(StepAction.DELIVERED_TO_SUBNET));
assertThat(
clonedStep.getDetail().getOutputInterface(),
equalTo(NodeInterfacePair.of("node", "iface")));
assertThat(clonedStep.getDetail().getResolvedNexthopIp(), equalTo(Ip.parse("1.1.1.1")));
}
@Test
public void testEquals() {
new EqualsTester()
.addEqualityGroup(new Object())
.addEqualityGroup(
DeliveredStep.builder()
.setAction(StepAction.DELIVERED_TO_SUBNET)
.setDetail(
DeliveredStepDetail.builder()
.setOutputInterface(NodeInterfacePair.of("h", "i"))
.build())
.build(),
DeliveredStep.builder()
.setAction(StepAction.DELIVERED_TO_SUBNET)
.setDetail(
DeliveredStepDetail.builder()
.setOutputInterface(NodeInterfacePair.of("h", "i"))
.build())
.build())
.addEqualityGroup(
DeliveredStep.builder()
.setAction(StepAction.EXITS_NETWORK)
.setDetail(
DeliveredStepDetail.builder()
.setOutputInterface(NodeInterfacePair.of("h", "i"))
.build())
.build())
.addEqualityGroup(
DeliveredStep.builder()
.setAction(StepAction.EXITS_NETWORK)
.setDetail(
DeliveredStepDetail.builder()
.setOutputInterface(NodeInterfacePair.of("h", "i1"))
.build())
.build())
.addEqualityGroup(
DeliveredStep.builder()
.setAction(StepAction.EXITS_NETWORK)
.setDetail(
DeliveredStepDetail.builder()
.setOutputInterface(NodeInterfacePair.of("h", "i1"))
.setResolvedNexthopIp(Ip.ZERO)
.build())
.build())
.testEquals();
}
}
| 1,620 |
2,338 | // RUN: %clang_cc1 -fno-builtin-memccpy -emit-llvm < %s| FileCheck %s
typedef __SIZE_TYPE__ size_t;
void *memccpy(void *, void const *, int, size_t);
void test13(char *d, char *s, int c, size_t n) {
// CHECK: call i8* @memccpy{{.*}} #2
memccpy(d, s, c, n);
}
// CHECK: attributes #2 = { nobuiltin "no-builtin-memccpy" }
| 152 |
317 | <gh_stars>100-1000
//
// Serialize.cc
// drafter
//
// Created by <NAME> on 5/3/13.
// Copyright (c) 2013 Apiary Inc. All rights reserved.
//
#include "Serialize.h"
#include "StringUtility.h"
#include <cstdlib>
using namespace drafter;
using namespace refract;
template <>
std::pair<bool, dsd::Boolean> drafter::LiteralTo<dsd::Boolean>(const mson::Literal& literal)
{
if (literal == "true" || literal == "false") {
return std::make_pair(true, dsd::Boolean{ literal == SerializeKey::True });
}
return std::make_pair(false, dsd::Boolean{});
}
namespace
{
///
/// Regular expression matching a ECMA-404 number.
///
/// > A number is a sequence of decimal digits with no superfluous
/// > leading zero. It may have a preceding minus sign (U+002D).
/// > It may have a fractional part prefixed by a decimal point (U+002E).
/// > It may have an exponent, prefixed by e (U+0065) or E (U+0045) and
/// > optionally + (U+002B) or – (U+002D) . The digits are the code points
/// > U+0030 through U+0039.
///
// clang-format off
// const std::regex json_number_expression(R"REGEX(-?(?:0|[1-9]\d*)(?:\.\d+)?(?:[eE][+-]?\d+)?)REGEX");
// clang-format on
template <typename It, typename Predicate>
It many(It begin, It end, Predicate predicate)
{
for (; begin != end; ++begin)
if (!predicate(*begin))
break;
return begin;
}
template <typename It, typename Predicate>
It optional(It begin, It end, Predicate predicate)
{
if (begin != end && predicate(*begin))
++begin;
return begin;
}
template <typename It>
bool isValidNumber(It b, It e)
{
// >
// > It may have a preceding minus sign (U+002D).
static_assert(0x2D == '-', "");
b = optional(b, e, [](char c) { return c == '-'; });
// >
// > The digits are the code points U+0030 through U+0039.
static_assert(0x30 == '0', "");
static_assert(0x39 == '9', "");
const auto is_digit = [](char c) -> bool { return c >= '0' && c <= '9'; };
// >
// > A number is a sequence of decimal digits with no superfluous
// > leading zero.
if (b == e)
return false;
if (*b == '0') {
++b;
if (b == e)
return true;
} else {
if (!is_digit(*b))
return false;
b = many(b, e, is_digit);
if (b == e)
return true;
}
// >
// > It may have a fractional part prefixed by a decimal point (U+002E).
static_assert(0x2E == '.', "");
if (*b == '.') {
++b;
if (b == e)
return false;
if (!is_digit(*b))
return false;
b = many(b, e, is_digit);
if (b == e)
return true;
}
// >
// > It may have an exponent, prefixed by e (U+0065) or E (U+0045) and
// > optionally + (U+002B) or – (U+002D)
static_assert(0x65 == 'e', "");
static_assert(0x45 == 'E', "");
if (*b == 'e' || *b == 'E') {
++b;
static_assert(0x2B == '+', "");
static_assert(0x2D == '-', "");
b = optional(b, e, [](char c) { return c == '+' || c == '-'; });
if (b == e)
return false;
if (!is_digit(*b))
return false;
b = many(b, e, is_digit);
if (b == e)
return true;
}
return false;
}
} // namespace
template <>
std::pair<bool, dsd::Number> drafter::LiteralTo<dsd::Number>(const mson::Literal& literal)
{
using std::begin;
using std::end;
// ignore spaces to the right
auto match_end = end(literal);
for (; match_end != begin(literal); --match_end) {
if (!std::isspace(*(match_end - 1)))
break;
}
if (isValidNumber(begin(literal), match_end)) {
return { true, dsd::Number{ std::string(begin(literal), match_end) } };
}
return { false, dsd::Number{} };
}
template <>
std::pair<bool, dsd::String> drafter::LiteralTo<dsd::String>(const mson::Literal& literal)
{
return std::make_pair(!literal.empty(), dsd::String{ literal });
}
| 2,098 |
2,816 | /*
* Legal Notice
*
* This document and associated source code (the "Work") is a part of a
* benchmark specification maintained by the TPC.
*
* The TPC reserves all right, title, and interest to the Work as provided
* under U.S. and international laws, including without limitation all patent
* and trademark rights therein.
*
* No Warranty
*
* 1.1 TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, THE INFORMATION
* CONTAINED HEREIN IS PROVIDED "AS IS" AND WITH ALL FAULTS, AND THE
* AUTHORS AND DEVELOPERS OF THE WORK HEREBY DISCLAIM ALL OTHER
* WARRANTIES AND CONDITIONS, EITHER EXPRESS, IMPLIED OR STATUTORY,
* INCLUDING, BUT NOT LIMITED TO, ANY (IF ANY) IMPLIED WARRANTIES,
* DUTIES OR CONDITIONS OF MERCHANTABILITY, OF FITNESS FOR A PARTICULAR
* PURPOSE, OF ACCURACY OR COMPLETENESS OF RESPONSES, OF RESULTS, OF
* WORKMANLIKE EFFORT, OF LACK OF VIRUSES, AND OF LACK OF NEGLIGENCE.
* ALSO, THERE IS NO WARRANTY OR CONDITION OF TITLE, QUIET ENJOYMENT,
* QUIET POSSESSION, CORRESPONDENCE TO DESCRIPTION OR NON-INFRINGEMENT
* WITH REGARD TO THE WORK.
* 1.2 IN NO EVENT WILL ANY AUTHOR OR DEVELOPER OF THE WORK BE LIABLE TO
* ANY OTHER PARTY FOR ANY DAMAGES, INCLUDING BUT NOT LIMITED TO THE
* COST OF PROCURING SUBSTITUTE GOODS OR SERVICES, LOST PROFITS, LOSS
* OF USE, LOSS OF DATA, OR ANY INCIDENTAL, CONSEQUENTIAL, DIRECT,
* INDIRECT, OR SPECIAL DAMAGES WHETHER UNDER CONTRACT, TORT, WARRANTY,
* OR OTHERWISE, ARISING IN ANY WAY OUT OF THIS OR ANY OTHER AGREEMENT
* RELATING TO THE WORK, WHETHER OR NOT SUCH AUTHOR OR DEVELOPER HAD
* ADVANCE NOTICE OF THE POSSIBILITY OF SUCH DAMAGES.
*
* Contributors
* - <NAME>, <NAME>, <NAME>, <NAME>
*/
/******************************************************************************
* Description: This file is used to "expose" the configurable driver
* parameters. These values may be set by test sponsors
* for testing and prototyping. The default values
* represent those that must be used for a compliant run.
******************************************************************************/
#ifndef DRIVER_PARAM_SETTINGS_H
#define DRIVER_PARAM_SETTINGS_H
#include <iostream>
#include <iomanip> // for log message formatting
#include <sstream> // for log message construction
#include "utilities/EGenUtilities_stdafx.h"
namespace TPCE {
// CHECK tests for CheckValid and CheckCompliant, use macros so we can get a
// textual representation of the tested values, these tests could be
// significantly condensed but we're trying for readibility here...
#define DRIVERPARAM_CHECK_EQUAL(name, lhs, rhs) \
if ((lhs) != (rhs)) { \
std::ostringstream strm; \
strm << #lhs << "(" << (lhs) << ") != " << #rhs << "(" << (rhs) << ")"; \
throw CCheckErr(name, strm.str()); \
}
#define DRIVERPARAM_CHECK_GE(name, lhs, rhs) \
if ((lhs) < (rhs)) { \
std::ostringstream strm; \
strm << #lhs << "(" << (lhs) << ") < " << #rhs << "(" << (rhs) << ")"; \
throw CCheckErr(name, strm.str()); \
}
#define DRIVERPARAM_CHECK_LE(name, lhs, rhs) \
if ((lhs) > (rhs)) { \
std::ostringstream strm; \
strm << #lhs << "(" << (lhs) << ") > " << #rhs << "(" << (rhs) << ")"; \
throw CCheckErr(name, strm.str()); \
}
#define DRIVERPARAM_CHECK_BETWEEN(name, lhs, minval, maxval) \
DRIVERPARAM_CHECK_GE(name, lhs, minval) \
DRIVERPARAM_CHECK_LE(name, lhs, maxval)
#define DRIVERPARAM_CHECK_DEFAULT(name) \
if (cur.name != dft.name) { \
std::ostringstream strm; \
strm << #name << "(" << cur.name << ") != " << dft.name; \
throw CCheckErr(#name, strm.str()); \
}
/******************************************************************************
* Parameter Base Class Template
******************************************************************************/
template <typename T> class CParametersWithoutDefaults {
public:
T cur;
CParametersWithoutDefaults() {
}
/*
* Virtual destructor. Provided so that a sponsor-specific
* destructor can be called on destruction from the base-class pointer.
*
* PARAMETERS:
* none.
*
* RETURNS:
* not applicable.
*/
virtual ~CParametersWithoutDefaults(){};
virtual void CheckValid(void) = 0;
virtual void CheckCompliant(void) = 0;
bool IsValid(void) {
try {
CheckValid();
return true;
} catch (CCheckErr) {
throw;
return false;
}
}
bool IsCompliant(void) {
// This will always return true for the classes defined in this file
try {
CheckCompliant();
return true;
} catch (CCheckErr) {
throw;
return false;
}
}
};
template <typename T, typename T2> class CParametersWithDefaults {
// protected:
public:
T dft;
T2 state;
public:
T cur;
CParametersWithDefaults() {
}
/*
* Virtual destructor. Provided so that a sponsor-specific
* destructor can be called on destruction from the base-class pointer.
*
* PARAMETERS:
* none.
*
* RETURNS:
* not applicable.
*/
virtual ~CParametersWithDefaults(){};
void Initialize(void) {
InitializeDefaults();
SetToDefaults();
}
void SetToDefaults(void) {
cur = dft;
CheckDefaults();
}
virtual void InitializeDefaults(void) {
}
virtual void CheckDefaults(void) {
}
virtual void CheckValid(void) = 0;
virtual void CheckCompliant(void) = 0;
bool IsValid(void) {
try {
CheckValid();
return true;
} catch (CCheckErr) {
throw;
return false;
}
}
bool IsCompliant(void) {
try {
CheckCompliant();
return true;
} catch (CCheckErr) {
throw;
return false;
}
}
};
/******************************************************************************
* Parameter Structures (Data)
******************************************************************************/
typedef struct TBrokerVolumeSettings {
} * PBrokerVolumeSettings;
typedef struct TCustomerPositionSettings {
INT32 by_cust_id; // percentage
INT32 by_tax_id; // percentage
INT32 get_history; // percentage
} * PCustomerPositionSettings;
typedef struct TMarketWatchSettings {
INT32 by_acct_id; // percentage
INT32 by_industry; // percentage
INT32 by_watch_list; // percentage
} * PMarketWatchSettings;
typedef struct TSecurityDetailSettings {
INT32 LOBAccessPercentage;
} * PSecurityDetailSettings;
typedef struct TTradeLookupSettings {
INT32 do_frame1; // percentage
INT32 do_frame2; // percentage
INT32 do_frame3; // percentage
INT32 do_frame4; // percentage
INT32 MaxRowsFrame1; // Max number of trades for frame
INT32 BackOffFromEndTimeFrame2; // Used to cap time interval generated.
INT32 MaxRowsFrame2; // Max number of trades for frame
INT32 BackOffFromEndTimeFrame3; // Used to cap time interval generated.
INT32 MaxRowsFrame3; // Max number of trades for frame
INT32 BackOffFromEndTimeFrame4; // Used to cap time interval generated.
INT32 MaxRowsFrame4; // Max number of rows for frame
} * PTradeLookupSettings;
typedef struct TTradeOrderSettings {
INT32 market;
INT32 limit;
INT32 stop_loss;
INT32 security_by_name;
INT32 security_by_symbol;
INT32 buy_orders;
INT32 sell_orders;
INT32 lifo;
INT32 exec_is_owner;
INT32 rollback;
INT32 type_is_margin;
} * PTradeOrderSettings;
typedef struct TTradeUpdateSettings {
INT32 do_frame1; // percentage
INT32 do_frame2; // percentage
INT32 do_frame3; // percentage
INT32 MaxRowsFrame1; // Max number of trades for frame
INT32 MaxRowsToUpdateFrame1; // Max number of rows to update
INT32 BackOffFromEndTimeFrame2; // Used to cap time interval generated.
INT32 MaxRowsFrame2; // Max number of trades for frame
INT32 MaxRowsToUpdateFrame2; // Max number of rows to update
INT32 BackOffFromEndTimeFrame3; // Used to cap time interval generated.
INT32 MaxRowsFrame3; // Max number of trades for frame
INT32 MaxRowsToUpdateFrame3; // Max number of rows to update
} * PTradeUpdateSettings;
typedef struct TTxnMixGeneratorSettings {
// Market-Feed and Trade-Result settings don't really alter the mix.
// They are done as a by-product of Trade-Orders. However, the values
// still need to be set correctly because they get used when generating
// the random number for selecting the other transaction types.
INT32 BrokerVolumeMixLevel;
INT32 CustomerPositionMixLevel;
INT32 MarketFeedMixLevel;
INT32 MarketWatchMixLevel;
INT32 SecurityDetailMixLevel;
INT32 TradeLookupMixLevel;
INT32 TradeOrderMixLevel;
INT32 TradeResultMixLevel;
INT32 TradeStatusMixLevel;
INT32 TradeUpdateMixLevel;
// Transaction mix levels are expressed out of a total of 1000.
INT32 TransactionMixTotal;
} * PTxnMixGeneratorSettings;
typedef struct TLoaderSettings {
TIdent iConfiguredCustomerCount;
TIdent iActiveCustomerCount;
INT32 iScaleFactor;
INT32 iDaysOfInitialTrades;
TIdent iStartingCustomer;
TIdent iCustomerCount;
} * pLoaderSettings;
typedef struct TDriverGlobalSettings {
TIdent iConfiguredCustomerCount;
TIdent iActiveCustomerCount;
INT32 iScaleFactor;
INT32 iDaysOfInitialTrades;
} * PDriverGlobalSettings;
typedef struct TDriverCESettings {
UINT32 UniqueId;
RNGSEED TxnMixRNGSeed;
RNGSEED TxnInputRNGSeed;
} * PDriverCESettings;
typedef struct TDriverCEPartitionSettings {
TIdent iMyStartingCustomerId;
TIdent iMyCustomerCount;
INT32 iPartitionPercent;
} * PDriverCEPartitionSettings;
typedef struct TDriverMEESettings {
UINT32 UniqueId;
RNGSEED RNGSeed;
RNGSEED TickerTapeRNGSeed;
RNGSEED TradingFloorRNGSeed;
} * PDriverMEESettings;
typedef struct TDriverDMSettings {
UINT32 UniqueId;
RNGSEED RNGSeed;
} * PDriverDMSettings;
/******************************************************************************
* Parameter Structures (Boolean "Is Default" State)
******************************************************************************/
struct TBrokerVolumeSettingsState {};
struct TCustomerPositionSettingsState {
bool by_cust_id; // percentage
bool by_tax_id; // percentage
bool get_history; // percentage
};
struct TMarketWatchSettingsState {
bool by_acct_id; // percentage
bool by_industry; // percentage
bool by_watch_list; // percentage
};
struct TSecurityDetailSettingsState {
bool LOBAccessPercentage;
};
struct TTradeLookupSettingsState {
bool do_frame1; // percentage
bool do_frame2; // percentage
bool do_frame3; // percentage
bool do_frame4; // percentage
bool MaxRowsFrame1; // Max number of trades for frame
bool BackOffFromEndTimeFrame2; // Used to cap time interval generated.
bool MaxRowsFrame2; // Max number of trades for frame
bool BackOffFromEndTimeFrame3; // Used to cap time interval generated.
bool MaxRowsFrame3; // Max number of trades for frame
bool BackOffFromEndTimeFrame4; // Used to cap time interval generated.
bool MaxRowsFrame4; // Max number of rows for frame
};
struct TTradeOrderSettingsState {
bool market;
bool limit;
bool stop_loss;
bool security_by_name;
bool security_by_symbol;
bool buy_orders;
bool sell_orders;
bool lifo;
bool exec_is_owner;
bool rollback;
bool type_is_margin;
};
struct TTradeUpdateSettingsState {
bool do_frame1; // percentage
bool do_frame2; // percentage
bool do_frame3; // percentage
bool MaxRowsFrame1; // Max number of trades for frame
bool MaxRowsToUpdateFrame1; // Max number of rows to update
bool BackOffFromEndTimeFrame2; // Used to cap time interval generated.
bool MaxRowsFrame2; // Max number of trades for frame
bool MaxRowsToUpdateFrame2; // Max number of rows to update
bool BackOffFromEndTimeFrame3; // Used to cap time interval generated.
bool MaxRowsFrame3; // Max number of trades for frame
bool MaxRowsToUpdateFrame3; // Max number of rows to update
};
struct TTxnMixGeneratorSettingsState {
bool BrokerVolumeMixLevel;
bool CustomerPositionMixLevel;
bool MarketWatchMixLevel;
bool SecurityDetailMixLevel;
bool TradeLookupMixLevel;
bool TradeOrderMixLevel;
bool TradeStatusMixLevel;
bool TradeUpdateMixLevel;
bool TransactionMixTotal;
};
struct TLoaderSettingsState {
bool iConfiguredCustomerCount;
bool iActiveCustomerCount;
bool iScaleFactor;
bool iDaysOfInitialTrades;
bool iStartingCustomer;
bool iCustomerCount;
};
struct TDriverCEPartitionSettingsState {
bool iPartitionPercent;
};
struct TDriverGlobalSettingsState {
bool iConfiguredCustomerCount;
bool iActiveCustomerCount;
bool iScaleFactor;
bool iDaysOfInitialTrades;
};
/******************************************************************************
* Parameter Derived Class / Template Instantiation
******************************************************************************/
class CBrokerVolumeSettings
: public CParametersWithDefaults<struct TBrokerVolumeSettings, struct TBrokerVolumeSettingsState> {
public:
CBrokerVolumeSettings() {
Initialize();
}
void InitializeDefaults(void) {
}
void CheckDefaults(void) {
}
void CheckValid(void) {
}
void CheckCompliant(void) {
}
};
class CCustomerPositionSettings
: public CParametersWithDefaults<struct TCustomerPositionSettings, struct TCustomerPositionSettingsState> {
public:
CCustomerPositionSettings() {
Initialize();
}
void InitializeDefaults(void) {
dft.by_cust_id = 50;
dft.by_tax_id = 50;
dft.get_history = 50;
}
void CheckDefaults(void) {
state.by_cust_id = (cur.by_cust_id == dft.by_cust_id);
state.by_tax_id = (cur.by_tax_id == dft.by_tax_id);
state.get_history = (cur.get_history == dft.get_history);
}
void CheckValid(void) {
DRIVERPARAM_CHECK_BETWEEN("by_cust_id", cur.by_cust_id, 0, 100);
DRIVERPARAM_CHECK_BETWEEN("by_tax_id", cur.by_tax_id, 0, 100);
DRIVERPARAM_CHECK_EQUAL("by_*_id total", cur.by_cust_id + cur.by_tax_id, 100);
DRIVERPARAM_CHECK_BETWEEN("get_history", cur.get_history, 0, 100);
}
void CheckCompliant(void) {
CheckValid();
DRIVERPARAM_CHECK_DEFAULT(by_cust_id);
DRIVERPARAM_CHECK_DEFAULT(by_tax_id);
DRIVERPARAM_CHECK_DEFAULT(get_history);
}
};
class CMarketWatchSettings
: public CParametersWithDefaults<struct TMarketWatchSettings, struct TMarketWatchSettingsState> {
public:
CMarketWatchSettings() {
Initialize();
}
void InitializeDefaults(void) {
dft.by_acct_id = 35;
dft.by_industry = 5;
dft.by_watch_list = 60;
}
void CheckDefaults(void) {
state.by_acct_id = (cur.by_acct_id == dft.by_acct_id);
state.by_industry = (cur.by_industry == dft.by_industry);
state.by_watch_list = (cur.by_watch_list == dft.by_watch_list);
}
void CheckValid(void) {
DRIVERPARAM_CHECK_BETWEEN("by_acct_id", cur.by_acct_id, 0, 100);
DRIVERPARAM_CHECK_BETWEEN("by_industry", cur.by_industry, 0, 100);
DRIVERPARAM_CHECK_BETWEEN("by_watch_list", cur.by_watch_list, 0, 100);
DRIVERPARAM_CHECK_EQUAL("by_* total", cur.by_acct_id + cur.by_industry + cur.by_watch_list, 100);
}
void CheckCompliant(void) {
CheckValid();
DRIVERPARAM_CHECK_DEFAULT(by_acct_id);
DRIVERPARAM_CHECK_DEFAULT(by_industry);
DRIVERPARAM_CHECK_DEFAULT(by_watch_list);
}
};
class CSecurityDetailSettings
: public CParametersWithDefaults<struct TSecurityDetailSettings, struct TSecurityDetailSettingsState> {
public:
CSecurityDetailSettings() {
Initialize();
}
void InitializeDefaults(void) {
dft.LOBAccessPercentage = 1;
}
void CheckDefaults(void) {
state.LOBAccessPercentage = (cur.LOBAccessPercentage == dft.LOBAccessPercentage);
}
void CheckValid(void) {
DRIVERPARAM_CHECK_BETWEEN("LOBAccessPercentage", cur.LOBAccessPercentage, 0, 100);
}
void CheckCompliant(void) {
CheckValid();
DRIVERPARAM_CHECK_DEFAULT(LOBAccessPercentage);
}
};
class CTradeLookupSettings
: public CParametersWithDefaults<struct TTradeLookupSettings, struct TTradeLookupSettingsState> {
public:
CTradeLookupSettings() {
Initialize();
}
void InitializeDefaults(void) {
dft.do_frame1 = 30;
dft.do_frame2 = 30;
dft.do_frame3 = 30;
dft.do_frame4 = 10;
dft.MaxRowsFrame1 = 20;
dft.BackOffFromEndTimeFrame2 = 4 * 8 * 3600; // four 8-hour days or 32 hours
dft.MaxRowsFrame2 = 20;
dft.BackOffFromEndTimeFrame3 = 200 * 60; // 200 minutes
dft.MaxRowsFrame3 = 20;
dft.BackOffFromEndTimeFrame4 = 500 * 60; // 30,000 seconds
dft.MaxRowsFrame4 = 20;
}
void CheckDefaults(void) {
state.do_frame1 = (cur.do_frame1 == dft.do_frame1);
state.do_frame2 = (cur.do_frame2 == dft.do_frame2);
state.do_frame3 = (cur.do_frame3 == dft.do_frame3);
state.do_frame4 = (cur.do_frame4 == dft.do_frame4);
state.MaxRowsFrame1 = (cur.MaxRowsFrame1 == dft.MaxRowsFrame1);
state.BackOffFromEndTimeFrame2 = (cur.BackOffFromEndTimeFrame2 == dft.BackOffFromEndTimeFrame2);
state.MaxRowsFrame2 = (cur.MaxRowsFrame2 == dft.MaxRowsFrame2);
state.BackOffFromEndTimeFrame3 = (cur.BackOffFromEndTimeFrame3 == dft.BackOffFromEndTimeFrame3);
state.MaxRowsFrame3 = (cur.MaxRowsFrame3 == dft.MaxRowsFrame3);
state.BackOffFromEndTimeFrame4 = (cur.BackOffFromEndTimeFrame4 == dft.BackOffFromEndTimeFrame4);
state.MaxRowsFrame4 = (cur.MaxRowsFrame4 == dft.MaxRowsFrame4);
}
void CheckValid(void) {
DRIVERPARAM_CHECK_BETWEEN("do_frame1", cur.do_frame1, 0, 100);
DRIVERPARAM_CHECK_BETWEEN("do_frame2", cur.do_frame2, 0, 100);
DRIVERPARAM_CHECK_BETWEEN("do_frame3", cur.do_frame3, 0, 100);
DRIVERPARAM_CHECK_BETWEEN("do_frame4", cur.do_frame4, 0, 100);
DRIVERPARAM_CHECK_EQUAL("do_frame* total", cur.do_frame1 + cur.do_frame2 + cur.do_frame3 + cur.do_frame4, 100);
DRIVERPARAM_CHECK_LE("MaxRowsFrame1", cur.MaxRowsFrame1, TradeLookupFrame1MaxRows);
DRIVERPARAM_CHECK_LE("MaxRowsFrame2", cur.MaxRowsFrame2, TradeLookupFrame2MaxRows);
DRIVERPARAM_CHECK_LE("MaxRowsFrame3", cur.MaxRowsFrame3, TradeLookupFrame3MaxRows);
DRIVERPARAM_CHECK_LE("MaxRowsFrame4", cur.MaxRowsFrame4, TradeLookupFrame4MaxRows);
}
void CheckCompliant(void) {
CheckValid();
DRIVERPARAM_CHECK_DEFAULT(do_frame1);
DRIVERPARAM_CHECK_DEFAULT(do_frame2);
DRIVERPARAM_CHECK_DEFAULT(do_frame3);
DRIVERPARAM_CHECK_DEFAULT(do_frame4);
DRIVERPARAM_CHECK_DEFAULT(MaxRowsFrame1);
DRIVERPARAM_CHECK_DEFAULT(BackOffFromEndTimeFrame2);
DRIVERPARAM_CHECK_DEFAULT(MaxRowsFrame2);
DRIVERPARAM_CHECK_DEFAULT(BackOffFromEndTimeFrame3);
DRIVERPARAM_CHECK_DEFAULT(MaxRowsFrame3);
DRIVERPARAM_CHECK_DEFAULT(BackOffFromEndTimeFrame4);
DRIVERPARAM_CHECK_DEFAULT(MaxRowsFrame4);
}
};
class CTradeOrderSettings
: public CParametersWithDefaults<struct TTradeOrderSettings, struct TTradeOrderSettingsState> {
public:
CTradeOrderSettings() {
Initialize();
}
void InitializeDefaults(void) {
dft.market = 60;
dft.limit = 40;
dft.stop_loss = 50;
dft.security_by_name = 40;
dft.security_by_symbol = 60;
dft.buy_orders = 50;
dft.sell_orders = 50;
dft.lifo = 35;
dft.exec_is_owner = 90;
dft.rollback = 1;
dft.type_is_margin = 8;
}
void CheckDefaults(void) {
state.market = (cur.market == dft.market);
state.limit = (cur.limit == dft.limit);
state.stop_loss = (cur.stop_loss == dft.stop_loss);
state.security_by_name = (cur.security_by_name == dft.security_by_name);
state.security_by_symbol = (cur.security_by_symbol == dft.security_by_symbol);
state.buy_orders = (cur.buy_orders == dft.buy_orders);
state.sell_orders = (cur.sell_orders == dft.sell_orders);
state.lifo = (cur.lifo == dft.lifo);
state.exec_is_owner = (cur.exec_is_owner == dft.exec_is_owner);
state.rollback = (cur.rollback == dft.rollback);
state.type_is_margin = (cur.type_is_margin == dft.type_is_margin);
}
void CheckValid(void) {
DRIVERPARAM_CHECK_BETWEEN("market", cur.market, 0, 100);
DRIVERPARAM_CHECK_BETWEEN("limit", cur.limit, 0, 100);
DRIVERPARAM_CHECK_EQUAL("market or limit total", cur.market + cur.limit, 100);
DRIVERPARAM_CHECK_BETWEEN("stop_loss", cur.stop_loss, 0, 100);
DRIVERPARAM_CHECK_BETWEEN("security_by_name", cur.security_by_name, 0, 100);
DRIVERPARAM_CHECK_BETWEEN("security_by_symbol", cur.security_by_symbol, 0, 100);
DRIVERPARAM_CHECK_EQUAL("security_by_* total", cur.security_by_name + cur.security_by_symbol, 100);
DRIVERPARAM_CHECK_BETWEEN("buy_orders", cur.buy_orders, 0, 100);
DRIVERPARAM_CHECK_BETWEEN("sell_orders", cur.sell_orders, 0, 100);
DRIVERPARAM_CHECK_EQUAL("*_orders total", cur.buy_orders + cur.sell_orders, 100);
DRIVERPARAM_CHECK_BETWEEN("lifo", cur.lifo, 0, 100);
DRIVERPARAM_CHECK_BETWEEN("exec_is_owner", cur.exec_is_owner, 0, 100);
DRIVERPARAM_CHECK_BETWEEN("rollback", cur.rollback, 0, 100);
DRIVERPARAM_CHECK_BETWEEN("type_is_margin", cur.type_is_margin, 0, 100);
}
void CheckCompliant(void) {
CheckValid();
DRIVERPARAM_CHECK_BETWEEN("exec_is_owner", cur.exec_is_owner, 60, 100);
DRIVERPARAM_CHECK_DEFAULT(market);
DRIVERPARAM_CHECK_DEFAULT(limit);
DRIVERPARAM_CHECK_DEFAULT(stop_loss);
DRIVERPARAM_CHECK_DEFAULT(security_by_name);
DRIVERPARAM_CHECK_DEFAULT(security_by_symbol);
DRIVERPARAM_CHECK_DEFAULT(buy_orders);
DRIVERPARAM_CHECK_DEFAULT(sell_orders);
DRIVERPARAM_CHECK_DEFAULT(lifo);
DRIVERPARAM_CHECK_DEFAULT(exec_is_owner);
DRIVERPARAM_CHECK_DEFAULT(rollback);
DRIVERPARAM_CHECK_DEFAULT(type_is_margin);
}
};
class CTradeUpdateSettings
: public CParametersWithDefaults<struct TTradeUpdateSettings, struct TTradeUpdateSettingsState> {
public:
CTradeUpdateSettings() {
Initialize();
}
void InitializeDefaults(void) {
dft.do_frame1 = 33;
dft.do_frame2 = 33;
dft.do_frame3 = 34;
dft.MaxRowsFrame1 = 20;
dft.MaxRowsToUpdateFrame1 = 20;
dft.MaxRowsFrame2 = 20;
dft.MaxRowsToUpdateFrame2 = 20;
dft.BackOffFromEndTimeFrame2 = 4 * 8 * 3600; // four 8-hour days or 32 hours
dft.MaxRowsFrame3 = 20;
dft.MaxRowsToUpdateFrame3 = 20;
dft.BackOffFromEndTimeFrame3 = 200 * 60; // 200 minutes
}
void CheckDefaults(void) {
state.do_frame1 = (cur.do_frame1 == dft.do_frame1);
state.do_frame2 = (cur.do_frame2 == dft.do_frame2);
state.do_frame3 = (cur.do_frame3 == dft.do_frame3);
state.MaxRowsFrame1 = (cur.MaxRowsFrame1 == dft.MaxRowsFrame1);
state.MaxRowsToUpdateFrame1 = (cur.MaxRowsToUpdateFrame1 == dft.MaxRowsToUpdateFrame1);
state.MaxRowsFrame2 = (cur.MaxRowsFrame2 == dft.MaxRowsFrame2);
state.MaxRowsToUpdateFrame2 = (cur.MaxRowsToUpdateFrame2 == dft.MaxRowsToUpdateFrame2);
state.BackOffFromEndTimeFrame2 = (cur.BackOffFromEndTimeFrame2 == dft.BackOffFromEndTimeFrame2);
state.MaxRowsFrame3 = (cur.MaxRowsFrame3 == dft.MaxRowsFrame3);
state.MaxRowsToUpdateFrame3 = (cur.MaxRowsToUpdateFrame3 == dft.MaxRowsToUpdateFrame3);
state.BackOffFromEndTimeFrame3 = (cur.BackOffFromEndTimeFrame3 == dft.BackOffFromEndTimeFrame3);
}
void CheckValid(void) {
DRIVERPARAM_CHECK_BETWEEN("do_frame1", cur.do_frame1, 0, 100);
DRIVERPARAM_CHECK_BETWEEN("do_frame2", cur.do_frame2, 0, 100);
DRIVERPARAM_CHECK_BETWEEN("do_frame3", cur.do_frame3, 0, 100);
DRIVERPARAM_CHECK_EQUAL("do_frame* total", cur.do_frame1 + cur.do_frame2 + cur.do_frame3, 100);
DRIVERPARAM_CHECK_LE("MaxRowsFrame1", cur.MaxRowsFrame1, TradeUpdateFrame1MaxRows);
DRIVERPARAM_CHECK_LE("MaxRowsFrame2", cur.MaxRowsFrame2, TradeUpdateFrame2MaxRows);
DRIVERPARAM_CHECK_LE("MaxRowsFrame3", cur.MaxRowsFrame3, TradeUpdateFrame3MaxRows);
DRIVERPARAM_CHECK_LE("MaxRowsToUpdateFrame1", cur.MaxRowsToUpdateFrame1, TradeUpdateFrame1MaxRows);
DRIVERPARAM_CHECK_LE("MaxRowsToUpdateFrame2", cur.MaxRowsToUpdateFrame2, TradeUpdateFrame2MaxRows);
DRIVERPARAM_CHECK_LE("MaxRowsToUpdateFrame3", cur.MaxRowsToUpdateFrame3, TradeUpdateFrame3MaxRows);
}
void CheckCompliant(void) {
CheckValid();
DRIVERPARAM_CHECK_DEFAULT(do_frame1);
DRIVERPARAM_CHECK_DEFAULT(do_frame2);
DRIVERPARAM_CHECK_DEFAULT(do_frame3);
DRIVERPARAM_CHECK_DEFAULT(MaxRowsFrame1);
DRIVERPARAM_CHECK_DEFAULT(MaxRowsToUpdateFrame1);
DRIVERPARAM_CHECK_DEFAULT(MaxRowsFrame2);
DRIVERPARAM_CHECK_DEFAULT(MaxRowsToUpdateFrame2);
DRIVERPARAM_CHECK_DEFAULT(BackOffFromEndTimeFrame2);
DRIVERPARAM_CHECK_DEFAULT(MaxRowsFrame3);
DRIVERPARAM_CHECK_DEFAULT(MaxRowsToUpdateFrame3);
DRIVERPARAM_CHECK_DEFAULT(BackOffFromEndTimeFrame3);
}
};
class CTxnMixGeneratorSettings
: public CParametersWithDefaults<struct TTxnMixGeneratorSettings, struct TTxnMixGeneratorSettingsState> {
public:
CTxnMixGeneratorSettings() {
Initialize();
}
void InitializeDefaults(void) {
dft.BrokerVolumeMixLevel = 49;
dft.CustomerPositionMixLevel = 130;
dft.MarketWatchMixLevel = 180;
dft.SecurityDetailMixLevel = 140;
dft.TradeLookupMixLevel = 80;
dft.TradeOrderMixLevel = 101;
dft.TradeStatusMixLevel = 190;
dft.TradeUpdateMixLevel = 20;
}
void CheckDefaults(void) {
state.BrokerVolumeMixLevel = (cur.BrokerVolumeMixLevel == dft.BrokerVolumeMixLevel);
state.CustomerPositionMixLevel = (cur.CustomerPositionMixLevel == dft.CustomerPositionMixLevel);
state.MarketWatchMixLevel = (cur.MarketWatchMixLevel == dft.MarketWatchMixLevel);
state.SecurityDetailMixLevel = (cur.SecurityDetailMixLevel == dft.SecurityDetailMixLevel);
state.TradeLookupMixLevel = (cur.TradeLookupMixLevel == dft.TradeLookupMixLevel);
state.TradeOrderMixLevel = (cur.TradeOrderMixLevel == dft.TradeOrderMixLevel);
state.TradeStatusMixLevel = (cur.TradeStatusMixLevel == dft.TradeStatusMixLevel);
state.TradeUpdateMixLevel = (cur.TradeUpdateMixLevel == dft.TradeUpdateMixLevel);
}
void CheckValid(void) {
DRIVERPARAM_CHECK_GE("BrokerVolumeMixLevel", cur.BrokerVolumeMixLevel, 0);
DRIVERPARAM_CHECK_GE("CustomerPositionMixLevel", cur.CustomerPositionMixLevel, 0);
DRIVERPARAM_CHECK_GE("MarketWatchMixLevel", cur.MarketWatchMixLevel, 0);
DRIVERPARAM_CHECK_GE("SecurityDetailMixLevel", cur.SecurityDetailMixLevel, 0);
DRIVERPARAM_CHECK_GE("TradeLookupMixLevel", cur.TradeLookupMixLevel, 0);
DRIVERPARAM_CHECK_GE("TradeOrderMixLevel", cur.TradeOrderMixLevel, 0);
DRIVERPARAM_CHECK_GE("TradeStatusMixLevel", cur.TradeStatusMixLevel, 0);
DRIVERPARAM_CHECK_GE("TradeUpdateMixLevel", cur.TradeUpdateMixLevel, 0);
}
void CheckCompliant(void) {
CheckValid();
DRIVERPARAM_CHECK_DEFAULT(BrokerVolumeMixLevel);
DRIVERPARAM_CHECK_DEFAULT(CustomerPositionMixLevel);
DRIVERPARAM_CHECK_DEFAULT(MarketWatchMixLevel);
DRIVERPARAM_CHECK_DEFAULT(SecurityDetailMixLevel);
DRIVERPARAM_CHECK_DEFAULT(TradeLookupMixLevel);
DRIVERPARAM_CHECK_DEFAULT(TradeOrderMixLevel);
DRIVERPARAM_CHECK_DEFAULT(TradeStatusMixLevel);
DRIVERPARAM_CHECK_DEFAULT(TradeUpdateMixLevel);
}
};
class CLoaderSettings : public CParametersWithDefaults<struct TLoaderSettings, struct TLoaderSettingsState> {
public:
CLoaderSettings(TIdent iConfiguredCustomerCount, TIdent iActiveCustomerCount, TIdent iStartingCustomer,
TIdent iCustomerCount, INT32 iScaleFactor, INT32 iDaysOfInitialTrades) {
Initialize();
cur.iConfiguredCustomerCount = iConfiguredCustomerCount;
cur.iActiveCustomerCount = iActiveCustomerCount;
cur.iStartingCustomer = iStartingCustomer;
cur.iCustomerCount = iCustomerCount;
cur.iScaleFactor = iScaleFactor;
cur.iDaysOfInitialTrades = iDaysOfInitialTrades;
CheckDefaults();
}
CLoaderSettings() {
Initialize();
}
void InitializeDefaults(void) {
// NOTE: All of these parameters should match the default values hard-
// coded in src/EGenLoader.cpp via the variable names listed below.
dft.iConfiguredCustomerCount = 5000; // iDefaultCustomerCount
dft.iActiveCustomerCount = 5000; // iDefaultCustomerCount
dft.iStartingCustomer = 1; // iDefaultStartFromCustomer
dft.iCustomerCount = 5000; // iDefaultCustomerCount
dft.iScaleFactor = 500; // iScaleFactor
dft.iDaysOfInitialTrades = 300; // iDaysOfInitialTrades
}
void CheckDefaults(void) {
state.iConfiguredCustomerCount = true;
state.iActiveCustomerCount = true;
state.iStartingCustomer = true;
state.iCustomerCount = true;
state.iScaleFactor = (cur.iScaleFactor == dft.iScaleFactor);
state.iDaysOfInitialTrades = (cur.iDaysOfInitialTrades == dft.iDaysOfInitialTrades);
}
void CheckValid(void) {
DRIVERPARAM_CHECK_GE("iConfiguredCustomerCount", cur.iConfiguredCustomerCount, 1000);
DRIVERPARAM_CHECK_GE("iActiveCustomerCount", cur.iActiveCustomerCount, 1000);
DRIVERPARAM_CHECK_LE("iActiveCustomerCount", cur.iActiveCustomerCount, cur.iConfiguredCustomerCount);
DRIVERPARAM_CHECK_EQUAL("iConfiguredCustomerCount", cur.iConfiguredCustomerCount % 1000, 0);
DRIVERPARAM_CHECK_GE("iStartingCustomer", cur.iStartingCustomer, 1)
DRIVERPARAM_CHECK_EQUAL("iStartingCustomer", cur.iStartingCustomer % 1000, 1);
DRIVERPARAM_CHECK_EQUAL("iCustomerCount", cur.iCustomerCount % 1000, 0);
DRIVERPARAM_CHECK_LE("iCustomerCount", cur.iCustomerCount + cur.iStartingCustomer - 1,
cur.iConfiguredCustomerCount);
}
void CheckCompliant(void) {
CheckValid();
DRIVERPARAM_CHECK_GE("iConfiguredCustomerCount", cur.iConfiguredCustomerCount, 5000);
DRIVERPARAM_CHECK_GE("iActiveCustomerCount", cur.iActiveCustomerCount, 5000);
DRIVERPARAM_CHECK_EQUAL("iActiveCustomerCount", cur.iActiveCustomerCount, cur.iConfiguredCustomerCount);
DRIVERPARAM_CHECK_DEFAULT(iScaleFactor);
DRIVERPARAM_CHECK_DEFAULT(iDaysOfInitialTrades);
}
};
class CDriverGlobalSettings
: public CParametersWithDefaults<struct TDriverGlobalSettings, struct TDriverGlobalSettingsState> {
public:
CDriverGlobalSettings(TIdent iConfiguredCustomerCount, TIdent iActiveCustomerCount, INT32 iScaleFactor,
INT32 iDaysOfInitialTrades) {
Initialize();
cur.iConfiguredCustomerCount = iConfiguredCustomerCount;
cur.iActiveCustomerCount = iActiveCustomerCount;
cur.iScaleFactor = iScaleFactor;
cur.iDaysOfInitialTrades = iDaysOfInitialTrades;
CheckDefaults();
}
CDriverGlobalSettings() {
Initialize();
}
void InitializeDefaults(void) {
// NOTE: All of these parameters should match the default values hard-
// coded in src/EGenLoader.cpp via the variable names listed below,
// as these are the minimum build (and therefore run) values.
dft.iConfiguredCustomerCount = 5000; // iDefaultLoadUnitSize
dft.iActiveCustomerCount = 5000; // iDefaultLoadUnitSize
dft.iScaleFactor = 500; // iScaleFactor
dft.iDaysOfInitialTrades = 300; // iDaysOfInitialTrades
}
void CheckDefaults(void) {
state.iConfiguredCustomerCount = true;
state.iActiveCustomerCount = true;
state.iScaleFactor = (cur.iScaleFactor == dft.iScaleFactor);
state.iDaysOfInitialTrades = (cur.iDaysOfInitialTrades == dft.iDaysOfInitialTrades);
}
void CheckValid(void) {
DRIVERPARAM_CHECK_GE("iConfiguredCustomerCount", cur.iConfiguredCustomerCount, 1000);
DRIVERPARAM_CHECK_GE("iActiveCustomerCount", cur.iActiveCustomerCount, 1000);
DRIVERPARAM_CHECK_LE("iActiveCustomerCount", cur.iActiveCustomerCount, cur.iConfiguredCustomerCount);
DRIVERPARAM_CHECK_EQUAL("iConfiguredCustomerCount", cur.iConfiguredCustomerCount % 1000, 0);
}
void CheckCompliant(void) {
CheckValid();
DRIVERPARAM_CHECK_GE("iConfiguredCustomerCount", cur.iConfiguredCustomerCount, 5000);
DRIVERPARAM_CHECK_GE("iActiveCustomerCount", cur.iActiveCustomerCount, 5000);
DRIVERPARAM_CHECK_EQUAL("iActiveCustomerCount", cur.iActiveCustomerCount, cur.iConfiguredCustomerCount);
DRIVERPARAM_CHECK_DEFAULT(iScaleFactor);
DRIVERPARAM_CHECK_DEFAULT(iDaysOfInitialTrades);
}
};
class CDriverCESettings : public CParametersWithoutDefaults<struct TDriverCESettings> {
public:
CDriverCESettings(UINT32 UniqueId, RNGSEED TxnMixRNGSeed, RNGSEED TxnInputRNGSeed) {
cur.UniqueId = UniqueId;
cur.TxnMixRNGSeed = TxnMixRNGSeed;
cur.TxnInputRNGSeed = TxnInputRNGSeed;
}
CDriverCESettings(){};
void CheckValid(void) {
}
void CheckCompliant(void) {
}
};
class CDriverCEPartitionSettings
: public CParametersWithDefaults<struct TDriverCEPartitionSettings, struct TDriverCEPartitionSettingsState> {
public:
CDriverCEPartitionSettings(TIdent iMyStartingCustomerId, TIdent iMyCustomerCount, INT32 iPartitionPercent) {
Initialize();
cur.iMyStartingCustomerId = iMyStartingCustomerId;
cur.iMyCustomerCount = iMyCustomerCount;
cur.iPartitionPercent = iPartitionPercent;
CheckDefaults();
}
// Default constructor neccessary for CE instantiation in the
// non-partitioned case In thise case we set the current values to 0 to
// indicate that they are unused.
CDriverCEPartitionSettings() {
Initialize();
cur.iMyStartingCustomerId = 0;
cur.iMyCustomerCount = 0;
cur.iPartitionPercent = 0;
CheckDefaults();
}
void InitializeDefaults(void) {
dft.iMyStartingCustomerId = 1; // Spec 6.4.3.1: Minimum possible starting C_ID
dft.iMyCustomerCount = 5000; // Spec 6.4.3.1: Minimum partition size
dft.iPartitionPercent = 50; // Spec 6.4.3.1: Required partition percentage
}
void CheckDefaults(void) {
state.iPartitionPercent = (cur.iPartitionPercent == dft.iPartitionPercent);
}
void CheckValid(void) {
DRIVERPARAM_CHECK_BETWEEN("iPartitionPercent", cur.iPartitionPercent, 0, 100);
if (cur.iMyStartingCustomerId == 0 && cur.iMyCustomerCount == 0 && cur.iPartitionPercent == 0) {
// Partitioning Disabled:
// - in this case, the default constructor would have been used and
// all values
// are set to 0. This must be considered valid.
} else {
// Partitioning Enabled:
// Spec clause 6.4.3.1 has many requirements, these are the ones
// that we validate here:
// - minimum C_ID in a subrange is the starting C_ID for a LU
// - minimum C_ID size of a subrange is 5000
// - size of a subrange must be an integral multiple of LU
DRIVERPARAM_CHECK_EQUAL("iMyStartingCustomerId", cur.iMyStartingCustomerId % 1000, 1);
DRIVERPARAM_CHECK_GE("iMyCustomerCount", cur.iMyCustomerCount, 1000);
DRIVERPARAM_CHECK_EQUAL("iMyCustomerCount", cur.iMyCustomerCount % 1000, 0);
}
}
void CheckCompliant(void) {
CheckValid();
if (cur.iMyStartingCustomerId == 0 && cur.iMyCustomerCount == 0 && cur.iPartitionPercent == 0) {
// Partitioning Disabled
} else {
// - CE partition is used 50% of the time
DRIVERPARAM_CHECK_DEFAULT(iPartitionPercent);
}
}
};
class CDriverMEESettings : public CParametersWithoutDefaults<struct TDriverMEESettings> {
public:
CDriverMEESettings(UINT32 UniqueId, RNGSEED RNGSeed, RNGSEED TickerTapeRNGSeed, RNGSEED TradingFloorRNGSeed) {
cur.UniqueId = UniqueId;
cur.RNGSeed = RNGSeed;
cur.TickerTapeRNGSeed = TickerTapeRNGSeed;
cur.TradingFloorRNGSeed = TradingFloorRNGSeed;
}
CDriverMEESettings(){};
void CheckValid(void) {
}
void CheckCompliant(void) {
}
};
class CDriverDMSettings : public CParametersWithoutDefaults<struct TDriverDMSettings> {
public:
CDriverDMSettings(UINT32 UniqueId, RNGSEED RNGSeed) {
cur.UniqueId = UniqueId;
cur.RNGSeed = RNGSeed;
}
CDriverDMSettings(){};
void CheckValid(void) {
}
void CheckCompliant(void) {
}
};
typedef struct TDriverCETxnSettings {
CBrokerVolumeSettings BV_settings;
CCustomerPositionSettings CP_settings;
CMarketWatchSettings MW_settings;
CSecurityDetailSettings SD_settings;
CTradeLookupSettings TL_settings;
CTradeOrderSettings TO_settings;
CTradeUpdateSettings TU_settings;
CTxnMixGeneratorSettings TxnMixGenerator_settings;
bool IsValid(void) {
try {
CheckValid();
return true;
} catch (CCheckErr) {
throw;
return false;
}
}
bool IsCompliant(void) {
try {
CheckCompliant();
return true;
} catch (CCheckErr) {
throw;
return false;
}
}
void CheckValid(void) {
BV_settings.CheckValid();
CP_settings.CheckValid();
MW_settings.CheckValid();
SD_settings.CheckValid();
TL_settings.CheckValid();
TO_settings.CheckValid();
TU_settings.CheckValid();
TxnMixGenerator_settings.CheckValid();
}
void CheckCompliant(void) {
BV_settings.CheckCompliant();
CP_settings.CheckCompliant();
MW_settings.CheckCompliant();
SD_settings.CheckCompliant();
TL_settings.CheckCompliant();
TO_settings.CheckCompliant();
TU_settings.CheckCompliant();
TxnMixGenerator_settings.CheckCompliant();
}
} * PDriverCETxnSettings;
} // namespace TPCE
#endif //#ifndef DRIVER_PARAM_SETTINGS_H
| 14,900 |
1,102 | <filename>src/vizdoom/src/g_shared/a_mapmarker.cpp
/*
** a_mapmarker.cpp
** An actor that appears on the automap instead of in the 3D view.
**
**---------------------------------------------------------------------------
** Copyright 2006 <NAME>
** All rights reserved.
**
** Redistribution and use in source and binary forms, with or without
** modification, are permitted provided that the following conditions
** are met:
**
** 1. Redistributions of source code must retain the above copyright
** notice, this list of conditions and the following disclaimer.
** 2. Redistributions in binary form must reproduce the above copyright
** notice, this list of conditions and the following disclaimer in the
** documentation and/or other materials provided with the distribution.
** 3. The name of the author may not be used to endorse or promote products
** derived from this software without specific prior written permission.
**
** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
** DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**---------------------------------------------------------------------------
**
*/
#include "a_sharedglobal.h"
#include "statnums.h"
// Map Marker --------------------------------------------------------------
//
// This class uses the following argument:
// args[0] == 0, shows the sprite at this actor
// != 0, shows the sprite for all actors whose TIDs match instead
//
// args[1] == 0, show the sprite always
// == 1, show the sprite only after its sector has been drawn
//
// To enable display of the sprite, activate it. To turn off the sprite,
// deactivate it.
//
// All the code to display it is in am_map.cpp.
//
//--------------------------------------------------------------------------
IMPLEMENT_CLASS(AMapMarker)
void AMapMarker::BeginPlay ()
{
ChangeStatNum (STAT_MAPMARKER);
}
void AMapMarker::Activate (AActor *activator)
{
flags2 |= MF2_DORMANT;
}
void AMapMarker::Deactivate (AActor *activator)
{
flags2 &= ~MF2_DORMANT;
}
| 820 |
4,320 | <filename>http-service/src/main/java/net/runelite/http/service/xp/XpMapper.java
/*
* Copyright (c) 2018, Adam <<EMAIL>>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package net.runelite.http.service.xp;
import net.runelite.http.api.hiscore.HiscoreResult;
import net.runelite.http.api.xp.XpData;
import net.runelite.http.service.xp.beans.XpEntity;
import org.mapstruct.Mapper;
import org.mapstruct.Mapping;
import org.mapstruct.factory.Mappers;
@Mapper
public interface XpMapper
{
XpMapper INSTANCE = Mappers.getMapper(XpMapper.class);
XpData xpEntityToXpData(XpEntity xpEntity);
@Mapping(target = "time", ignore = true)
@Mapping(source = "attack.experience", target = "attack_xp")
@Mapping(source = "defence.experience", target = "defence_xp")
@Mapping(source = "strength.experience", target = "strength_xp")
@Mapping(source = "hitpoints.experience", target = "hitpoints_xp")
@Mapping(source = "ranged.experience", target = "ranged_xp")
@Mapping(source = "prayer.experience", target = "prayer_xp")
@Mapping(source = "magic.experience", target = "magic_xp")
@Mapping(source = "cooking.experience", target = "cooking_xp")
@Mapping(source = "woodcutting.experience", target = "woodcutting_xp")
@Mapping(source = "fletching.experience", target = "fletching_xp")
@Mapping(source = "fishing.experience", target = "fishing_xp")
@Mapping(source = "firemaking.experience", target = "firemaking_xp")
@Mapping(source = "crafting.experience", target = "crafting_xp")
@Mapping(source = "smithing.experience", target = "smithing_xp")
@Mapping(source = "mining.experience", target = "mining_xp")
@Mapping(source = "herblore.experience", target = "herblore_xp")
@Mapping(source = "agility.experience", target = "agility_xp")
@Mapping(source = "thieving.experience", target = "thieving_xp")
@Mapping(source = "slayer.experience", target = "slayer_xp")
@Mapping(source = "farming.experience", target = "farming_xp")
@Mapping(source = "runecraft.experience", target = "runecraft_xp")
@Mapping(source = "hunter.experience", target = "hunter_xp")
@Mapping(source = "construction.experience", target = "construction_xp")
@Mapping(source = "overall.rank", target = "overall_rank")
@Mapping(source = "attack.rank", target = "attack_rank")
@Mapping(source = "defence.rank", target = "defence_rank")
@Mapping(source = "strength.rank", target = "strength_rank")
@Mapping(source = "hitpoints.rank", target = "hitpoints_rank")
@Mapping(source = "ranged.rank", target = "ranged_rank")
@Mapping(source = "prayer.rank", target = "prayer_rank")
@Mapping(source = "magic.rank", target = "magic_rank")
@Mapping(source = "cooking.rank", target = "cooking_rank")
@Mapping(source = "woodcutting.rank", target = "woodcutting_rank")
@Mapping(source = "fletching.rank", target = "fletching_rank")
@Mapping(source = "fishing.rank", target = "fishing_rank")
@Mapping(source = "firemaking.rank", target = "firemaking_rank")
@Mapping(source = "crafting.rank", target = "crafting_rank")
@Mapping(source = "smithing.rank", target = "smithing_rank")
@Mapping(source = "mining.rank", target = "mining_rank")
@Mapping(source = "herblore.rank", target = "herblore_rank")
@Mapping(source = "agility.rank", target = "agility_rank")
@Mapping(source = "thieving.rank", target = "thieving_rank")
@Mapping(source = "slayer.rank", target = "slayer_rank")
@Mapping(source = "farming.rank", target = "farming_rank")
@Mapping(source = "runecraft.rank", target = "runecraft_rank")
@Mapping(source = "hunter.rank", target = "hunter_rank")
@Mapping(source = "construction.rank", target = "construction_rank")
XpData hiscoreResultToXpData(HiscoreResult hiscoreResult);
}
| 1,666 |
332 | <filename>tricks/stl_iteration/cpp/main.cpp<gh_stars>100-1000
// IMPORT STANDARD LIBRARIES
#include <iostream>
#include <iterator>
#include <numeric>
#include <string>
// IMPORT THIRD-PARTY LIBRARIES
#include <pxr/usd/kind/registry.h>
#include <pxr/usd/sdf/path.h>
#include <pxr/usd/usd/modelAPI.h>
#include <pxr/usd/usd/primRange.h>
#include <pxr/usd/usd/stage.h>
int main() {
auto stage = pxr::UsdStage::CreateInMemory();
stage->DefinePrim(pxr::SdfPath {"/SomeSphere"});
stage->DefinePrim(pxr::SdfPath {"/AnotherSphere"});
auto range = pxr::UsdPrimRange::Stage(stage);
// Get every prim using accumulate
auto text = std::accumulate(
std::begin(range),
std::end(range),
std::string {"Prims:"},
[](std::string text, pxr::UsdPrim const &prim) {
return std::move(text) + "\n" + pxr::TfStringify(prim.GetPath());
}
);
std::cout << text << "\n\n";
// XXX : This is a bit strange but `prim` must be `pxr::UsdPrim
// const` on my machine, even though we immediately wrap it in
// UsdModelAPI and change the Prim's Kind.
//
std::for_each(std::begin(range), std::end(range), [](pxr::UsdPrim const &prim){
pxr::UsdModelAPI(prim).SetKind(pxr::KindTokens->component);
});
auto* result = new std::string();
stage->GetRootLayer()->ExportToString(result);
std::cout << *result << std::endl;
delete result;
result = nullptr;
return 0;
}
| 620 |
465 | # -*- coding:utf-8 -*-
import sys
import time
import json
import random
import string
import cls_kv_pb2
##pb协议客户自定义
def pb_gen(start_index, size):
logGroupList = cls_kv_pb2.LogGroupList()
LogGroup = logGroupList.logGroupList.add()
log = LogGroup.logs.add()
log.time = int(time.time())
for index in range(size):
content = log.contents.add()
key = "name,index,test_name_index,"
content.key = key + ''.join(random.sample(string.ascii_letters + string.digits, 60))
value = "test_" + str(start_index) + "_" + str(index) + "_"
for i in range(2):
content.value = content.value + ''.join(random.sample(string.ascii_letters + string.digits, 60))
data = logGroupList.SerializeToString()
return data
| 334 |
834 | <filename>fboss/agent/platforms/tests/utils/BcmTestGalaxyPort.cpp
/*
* Copyright (c) 2004-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
*/
#include "fboss/agent/platforms/tests/utils/BcmTestGalaxyPort.h"
#include "fboss/agent/platforms/common/utils/GalaxyLedUtils.h"
#include "fboss/agent/platforms/tests/utils/BcmTestGalaxyPlatform.h"
#include "fboss/agent/platforms/wedge/utils/BcmLedUtils.h"
namespace facebook::fboss {
BcmTestGalaxyPort::BcmTestGalaxyPort(PortID id, BcmTestGalaxyPlatform* platform)
: BcmTestPort(id, platform) {}
void BcmTestGalaxyPort::linkStatusChanged(bool up, bool adminUp) {
uint32_t portData = BcmLedUtils::getGalaxyPortStatus(0, getPortID());
GalaxyLedUtils::getDesiredLEDState(&portData, up, adminUp);
BcmLedUtils::setGalaxyPortStatus(0, getPortID(), portData);
}
} // namespace facebook::fboss
| 366 |
560 | /*
* Copyright (c) 2018 <NAME> <<EMAIL>>
* All Rights Reserved.
*/
package me.zhanghai.android.douya.functional;
import java.util.Objects;
import me.zhanghai.android.douya.functional.compat.Consumer;
public class IterableCompat {
private IterableCompat() {}
public static <T> void forEach(Iterable<T> iterable, Consumer<T> action) {
Objects.requireNonNull(iterable);
for (T t : iterable) {
action.accept(t);
}
}
}
| 187 |
32,544 | package com.baeldung.quarkus.liquibase;
import io.agroal.api.AgroalDataSource;
import liquibase.Liquibase;
import liquibase.database.jvm.JdbcConnection;
import liquibase.resource.ClassLoaderResourceAccessor;
import liquibase.resource.ResourceAccessor;
import javax.enterprise.context.ApplicationScoped;
import javax.enterprise.inject.Produces;
import javax.inject.Inject;
@ApplicationScoped
public class LiquibaseProducer {
@Inject
AgroalDataSource dataSource;
private LiquibaseConfig liquibaseConfig;
@Produces
public Liquibase produceLiquibase() throws Exception {
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
ResourceAccessor classLoaderResourceAccessor = new ClassLoaderResourceAccessor(classLoader);
Liquibase liquibase = new Liquibase(liquibaseConfig.changeLog, classLoaderResourceAccessor, new JdbcConnection(dataSource.getConnection()));
return liquibase;
}
public void setLiquibaseConfig(LiquibaseConfig liquibaseConfig) {
this.liquibaseConfig = liquibaseConfig;
}
} | 370 |
2,113 | //-----------------------------------------------------------------------------
// Copyright (c) 2012 GarageGames, LLC
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//-----------------------------------------------------------------------------
#include "platform/platform.h"
#include "core/util/journal/journal.h"
#include "core/stream/fileStream.h"
#include "core/util/safeDelete.h"
#include "console/console.h"
#include <stdlib.h>
//-----------------------------------------------------------------------------
Journal::FuncDecl* Journal::_FunctionList;
Stream *Journal::mFile;
Journal::Mode Journal::_State = Journal::StopState;
U32 Journal::_Count;
bool Journal::_Dispatching = false;
Journal Journal::smInstance;
//-----------------------------------------------------------------------------
Journal::~Journal()
{
if( mFile )
Stop();
}
//-----------------------------------------------------------------------------
Journal::Functor* Journal::_create(Id id)
{
for (FuncDecl* ptr = _FunctionList; ptr; ptr = ptr->next)
if (ptr->id == id)
return ptr->create();
return 0;
}
Journal::Id Journal::_getFunctionId(VoidPtr ptr,VoidMethod method)
{
for (FuncDecl* itr = _FunctionList; itr; itr = itr->next)
if (itr->match(ptr,method))
return itr->id;
return 0;
}
void Journal::_removeFunctionId(VoidPtr ptr,VoidMethod method)
{
FuncDecl ** itr = &_FunctionList;
do
{
if((*itr)->match(ptr, method))
{
// Unlink and break.
FuncDecl* decl = *itr;
idPool().free( decl->id );
*itr = (*itr)->next;
delete decl;
return;
}
// Advance to next...
itr = &((*itr)->next);
}
while(*itr);
}
void Journal::_start()
{
}
void Journal::_finish()
{
if (_State == PlayState)
--_Count;
else {
U32 pos = mFile->getPosition();
mFile->setPosition(0);
mFile->write(++_Count);
mFile->setPosition(pos);
}
}
void Journal::Record(const char * file)
{
if (_State == DisabledState)
{
Con::errorf("//---------------------------------------------//");
Con::errorf("Journal::Record() - Cannot record a journal after GuiCanvas or NetConnection creation!");
Con::errorf("To record before canvas/netConnection creation, run %s with the following arguments: -jSave %s",
Platform::getExecutableName(), file);
Con::errorf("//---------------------------------------------//");
return;
}
if (_State == StopState)
{
_Count = 0;
mFile = new FileStream();
if( ((FileStream*)mFile)->open(file, Torque::FS::File::Write) )
{
mFile->write(_Count);
_State = RecordState;
}
else
{
AssertWarn(false,"Journal: Could not create journal file");
Con::errorf("Journal: Could not create journal file '%s'", file);
}
}
}
void Journal::Play(const char * file)
{
if (_State == DisabledState)
{
Con::errorf("//---------------------------------------------//");
Con::errorf("Journal::Play() - Cannot playback a journal after GuiCanvas or NetConnection creation!");
Con::errorf("To playback before canvas/netConnection creation, run %s with the following arguments: -jPlay %s",
Platform::getExecutableName(), file);
Con::errorf("//---------------------------------------------//");
return;
}
if (_State == StopState)
{
SAFE_DELETE(mFile);
mFile = new FileStream();
if( ((FileStream*)mFile)->open(file, Torque::FS::File::Read) )
{
mFile->read(&_Count);
_State = PlayState;
}
else
{
AssertWarn(false,"Journal: Could not open journal file");
Con::errorf("Journal: Could not open journal file '%s'", file);
}
}
}
void Journal::Stop()
{
AssertFatal(mFile, "Journal::Stop - no file stream open!");
SAFE_DELETE( mFile );
_State = StopState;
}
bool Journal::PlayNext()
{
if (_State == PlayState) {
_start();
Id id;
mFile->read(&id);
Functor* jrn = _create(id);
AssertFatal(jrn,"Journal: Undefined function found in journal");
jrn->read(mFile);
_finish();
_Dispatching = true;
jrn->dispatch();
_Dispatching = false;
delete jrn;
if (_Count)
return true;
Stop();
//debugBreak();
}
return false;
}
void Journal::Disable()
{
if (_State == StopState)
_State = DisabledState;
} | 2,025 |
2,201 | """
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import cv2
SIZE = 200
BLACK = (0, 0, 0)
LWD = 2
TEXT_SIZE = 1.5
BORDER = 30
def add_top10_gallery_images(demo_image, impaths, distances, input_image):
""" Add top-10 most similar images from the gallery to demo image. """
for index, impath in enumerate(impaths[:10]):
image = cv2.imread(impath)
image = cv2.resize(image, (SIZE, SIZE))
h_shift = 2 * BORDER + input_image.shape[0] + (SIZE + BORDER) * (index // 5)
w_shift = BORDER + (index % 5) * (SIZE + BORDER)
demo_image[h_shift: h_shift + SIZE, w_shift: w_shift + SIZE] = image
if distances is not None:
cv2.putText(demo_image, '{}:{}'.format(index, int(distances[index] * 100) / 100),
(w_shift - BORDER, h_shift - 5), 1,
TEXT_SIZE, BLACK, LWD)
else:
cv2.putText(demo_image, '{}'.format(index), (w_shift - BORDER, h_shift - 5), 1,
TEXT_SIZE, BLACK, LWD)
return demo_image
def visualize(image, impaths, distances, input_size, compute_embedding_time,
search_in_gallery_time, imshow_delay, presenter, no_show=False):
""" Visualizes input frame and top-10 most similar images from the gallery. """
input_image = cv2.resize(image, (SIZE * 4, SIZE * 3))
demo_image = np.ones(
(input_image.shape[0] + SIZE * 2 + BORDER * 4, SIZE * 5 + BORDER * 6, 3),
dtype=np.uint8) * 200
presenter.drawGraphs(input_image)
demo_image[BORDER:BORDER + input_image.shape[0],
BORDER:BORDER + input_image.shape[1]] = input_image
cv2.putText(demo_image, 'Gallery size: {}'.format(len(impaths)),
(BORDER * 2 + input_image.shape[1], BORDER * 2 + 30), 1, TEXT_SIZE, BLACK, LWD)
if not np.isnan(compute_embedding_time):
cv2.putText(demo_image,
'Embbeding (ms): {}'.format(int(compute_embedding_time * 10000) / 10.0),
(BORDER * 2 + input_image.shape[1], BORDER * 2 + 60), 1, TEXT_SIZE, BLACK, LWD)
if not np.isnan(search_in_gallery_time):
cv2.putText(demo_image,
'Gallery search (ms): {}'.format(int(search_in_gallery_time * 10000) / 10.0),
(BORDER * 2 + input_image.shape[1], BORDER * 2 + 90), 1, TEXT_SIZE, BLACK, LWD)
cv2.putText(demo_image, 'Inp. res: {}x{}'.format(input_size[0], input_size[1]),
(BORDER * 2 + input_image.shape[1], BORDER * 2 + 120), 1, TEXT_SIZE, BLACK, LWD)
demo_image = add_top10_gallery_images(demo_image, impaths, distances, input_image)
if not no_show:
cv2.imshow('demo_image', demo_image)
key_pressed = cv2.waitKey(imshow_delay)
presenter.handleKey(key_pressed)
return (demo_image, key_pressed & 0xff) if key_pressed != -1 else (demo_image, -1)
return (demo_image, -1)
| 1,501 |
416 | <reponame>sharplook/tencentcloud-sdk-java-test<gh_stars>100-1000
/*
* Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.tencentcloudapi.msp.v20180319;
import java.lang.reflect.Type;
import com.google.gson.JsonSyntaxException;
import com.google.gson.reflect.TypeToken;
import com.tencentcloudapi.common.exception.TencentCloudSDKException;
import com.tencentcloudapi.common.AbstractClient;
import com.tencentcloudapi.common.profile.ClientProfile;
import com.tencentcloudapi.common.JsonResponseModel;
import com.tencentcloudapi.common.Credential;
import com.tencentcloudapi.msp.v20180319.models.*;
public class MspClient extends AbstractClient{
private static String endpoint = "msp.tencentcloudapi.com";
private static String service = "msp";
private static String version = "2018-03-19";
public MspClient(Credential credential, String region) {
this(credential, region, new ClientProfile());
}
public MspClient(Credential credential, String region, ClientProfile profile) {
super(MspClient.endpoint, MspClient.version, credential, region, profile);
}
/**
*取消注册迁移任务
* @param req DeregisterMigrationTaskRequest
* @return DeregisterMigrationTaskResponse
* @throws TencentCloudSDKException
*/
public DeregisterMigrationTaskResponse DeregisterMigrationTask(DeregisterMigrationTaskRequest req) throws TencentCloudSDKException{
JsonResponseModel<DeregisterMigrationTaskResponse> rsp = null;
String rspStr = "";
try {
Type type = new TypeToken<JsonResponseModel<DeregisterMigrationTaskResponse>>() {
}.getType();
rspStr = this.internalRequest(req, "DeregisterMigrationTask");
rsp = gson.fromJson(rspStr, type);
} catch (JsonSyntaxException e) {
throw new TencentCloudSDKException("response message: " + rspStr + ".\n Error message: " + e.getMessage());
}
return rsp.response;
}
/**
*获取指定迁移任务详情
* @param req DescribeMigrationTaskRequest
* @return DescribeMigrationTaskResponse
* @throws TencentCloudSDKException
*/
public DescribeMigrationTaskResponse DescribeMigrationTask(DescribeMigrationTaskRequest req) throws TencentCloudSDKException{
JsonResponseModel<DescribeMigrationTaskResponse> rsp = null;
String rspStr = "";
try {
Type type = new TypeToken<JsonResponseModel<DescribeMigrationTaskResponse>>() {
}.getType();
rspStr = this.internalRequest(req, "DescribeMigrationTask");
rsp = gson.fromJson(rspStr, type);
} catch (JsonSyntaxException e) {
throw new TencentCloudSDKException("response message: " + rspStr + ".\n Error message: " + e.getMessage());
}
return rsp.response;
}
/**
*获取迁移项目名称列表
* @param req ListMigrationProjectRequest
* @return ListMigrationProjectResponse
* @throws TencentCloudSDKException
*/
public ListMigrationProjectResponse ListMigrationProject(ListMigrationProjectRequest req) throws TencentCloudSDKException{
JsonResponseModel<ListMigrationProjectResponse> rsp = null;
String rspStr = "";
try {
Type type = new TypeToken<JsonResponseModel<ListMigrationProjectResponse>>() {
}.getType();
rspStr = this.internalRequest(req, "ListMigrationProject");
rsp = gson.fromJson(rspStr, type);
} catch (JsonSyntaxException e) {
throw new TencentCloudSDKException("response message: " + rspStr + ".\n Error message: " + e.getMessage());
}
return rsp.response;
}
/**
*获取迁移任务列表
* @param req ListMigrationTaskRequest
* @return ListMigrationTaskResponse
* @throws TencentCloudSDKException
*/
public ListMigrationTaskResponse ListMigrationTask(ListMigrationTaskRequest req) throws TencentCloudSDKException{
JsonResponseModel<ListMigrationTaskResponse> rsp = null;
String rspStr = "";
try {
Type type = new TypeToken<JsonResponseModel<ListMigrationTaskResponse>>() {
}.getType();
rspStr = this.internalRequest(req, "ListMigrationTask");
rsp = gson.fromJson(rspStr, type);
} catch (JsonSyntaxException e) {
throw new TencentCloudSDKException("response message: " + rspStr + ".\n Error message: " + e.getMessage());
}
return rsp.response;
}
/**
*更改迁移任务所属项目
* @param req ModifyMigrationTaskBelongToProjectRequest
* @return ModifyMigrationTaskBelongToProjectResponse
* @throws TencentCloudSDKException
*/
public ModifyMigrationTaskBelongToProjectResponse ModifyMigrationTaskBelongToProject(ModifyMigrationTaskBelongToProjectRequest req) throws TencentCloudSDKException{
JsonResponseModel<ModifyMigrationTaskBelongToProjectResponse> rsp = null;
String rspStr = "";
try {
Type type = new TypeToken<JsonResponseModel<ModifyMigrationTaskBelongToProjectResponse>>() {
}.getType();
rspStr = this.internalRequest(req, "ModifyMigrationTaskBelongToProject");
rsp = gson.fromJson(rspStr, type);
} catch (JsonSyntaxException e) {
throw new TencentCloudSDKException("response message: " + rspStr + ".\n Error message: " + e.getMessage());
}
return rsp.response;
}
/**
*更新迁移任务状态
* @param req ModifyMigrationTaskStatusRequest
* @return ModifyMigrationTaskStatusResponse
* @throws TencentCloudSDKException
*/
public ModifyMigrationTaskStatusResponse ModifyMigrationTaskStatus(ModifyMigrationTaskStatusRequest req) throws TencentCloudSDKException{
JsonResponseModel<ModifyMigrationTaskStatusResponse> rsp = null;
String rspStr = "";
try {
Type type = new TypeToken<JsonResponseModel<ModifyMigrationTaskStatusResponse>>() {
}.getType();
rspStr = this.internalRequest(req, "ModifyMigrationTaskStatus");
rsp = gson.fromJson(rspStr, type);
} catch (JsonSyntaxException e) {
throw new TencentCloudSDKException("response message: " + rspStr + ".\n Error message: " + e.getMessage());
}
return rsp.response;
}
/**
*注册迁移任务
* @param req RegisterMigrationTaskRequest
* @return RegisterMigrationTaskResponse
* @throws TencentCloudSDKException
*/
public RegisterMigrationTaskResponse RegisterMigrationTask(RegisterMigrationTaskRequest req) throws TencentCloudSDKException{
JsonResponseModel<RegisterMigrationTaskResponse> rsp = null;
String rspStr = "";
try {
Type type = new TypeToken<JsonResponseModel<RegisterMigrationTaskResponse>>() {
}.getType();
rspStr = this.internalRequest(req, "RegisterMigrationTask");
rsp = gson.fromJson(rspStr, type);
} catch (JsonSyntaxException e) {
throw new TencentCloudSDKException("response message: " + rspStr + ".\n Error message: " + e.getMessage());
}
return rsp.response;
}
}
| 3,204 |
777 | <reponame>google-ar/chromium
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ui/aura/mus/in_flight_change.h"
#include "ui/aura/client/aura_constants.h"
#include "ui/aura/mus/capture_synchronizer.h"
#include "ui/aura/mus/focus_synchronizer.h"
#include "ui/aura/mus/window_mus.h"
#include "ui/aura/mus/window_port_mus.h"
#include "ui/aura/mus/window_tree_client.h"
#include "ui/aura/window.h"
#include "ui/base/ui_base_types.h"
namespace aura {
// InFlightChange -------------------------------------------------------------
InFlightChange::InFlightChange(WindowMus* window, ChangeType type)
: window_(window), change_type_(type) {}
InFlightChange::~InFlightChange() {}
bool InFlightChange::Matches(const InFlightChange& change) const {
DCHECK(change.window_ == window_ && change.change_type_ == change_type_);
return true;
}
void InFlightChange::ChangeFailed() {}
// InFlightBoundsChange -------------------------------------------------------
InFlightBoundsChange::InFlightBoundsChange(WindowTreeClient* window_tree_client,
WindowMus* window,
const gfx::Rect& revert_bounds)
: InFlightChange(window, ChangeType::BOUNDS),
window_tree_client_(window_tree_client),
revert_bounds_(revert_bounds) {}
void InFlightBoundsChange::SetRevertValueFrom(const InFlightChange& change) {
revert_bounds_ =
static_cast<const InFlightBoundsChange&>(change).revert_bounds_;
}
void InFlightBoundsChange::Revert() {
window_tree_client_->SetWindowBoundsFromServer(window(), revert_bounds_);
}
// InFlightDragChange -----------------------------------------------------
InFlightDragChange::InFlightDragChange(WindowMus* window, ChangeType type)
: InFlightChange(window, type) {
DCHECK(type == ChangeType::MOVE_LOOP || type == ChangeType::DRAG_LOOP);
}
void InFlightDragChange::SetRevertValueFrom(const InFlightChange& change) {}
void InFlightDragChange::Revert() {}
// CrashInFlightChange --------------------------------------------------------
CrashInFlightChange::CrashInFlightChange(WindowMus* window, ChangeType type)
: InFlightChange(window, type) {}
CrashInFlightChange::~CrashInFlightChange() {}
void CrashInFlightChange::SetRevertValueFrom(const InFlightChange& change) {
CHECK(false);
}
void CrashInFlightChange::ChangeFailed() {
DLOG(ERROR) << "change failed, type=" << static_cast<int>(change_type());
CHECK(false);
}
void CrashInFlightChange::Revert() {
CHECK(false);
}
// InFlightWindowChange -------------------------------------------------------
InFlightWindowTreeClientChange::InFlightWindowTreeClientChange(
WindowTreeClient* client,
WindowMus* revert_value,
ChangeType type)
: InFlightChange(nullptr, type), client_(client), revert_window_(nullptr) {
SetRevertWindow(revert_value);
}
InFlightWindowTreeClientChange::~InFlightWindowTreeClientChange() {
SetRevertWindow(nullptr);
}
void InFlightWindowTreeClientChange::SetRevertValueFrom(
const InFlightChange& change) {
SetRevertWindow(static_cast<const InFlightWindowTreeClientChange&>(change)
.revert_window_);
}
void InFlightWindowTreeClientChange::SetRevertWindow(WindowMus* window) {
if (revert_window_)
revert_window_->GetWindow()->RemoveObserver(this);
revert_window_ = window;
if (revert_window_)
revert_window_->GetWindow()->AddObserver(this);
}
void InFlightWindowTreeClientChange::OnWindowDestroyed(Window* window) {
// NOTE: this has to be in OnWindowDestroyed() as FocusClients typically
// change focus in OnWindowDestroying().
SetRevertWindow(nullptr);
}
// InFlightCaptureChange ------------------------------------------------------
InFlightCaptureChange::InFlightCaptureChange(
WindowTreeClient* client,
CaptureSynchronizer* capture_synchronizer,
WindowMus* revert_value)
: InFlightWindowTreeClientChange(client, revert_value, ChangeType::CAPTURE),
capture_synchronizer_(capture_synchronizer) {}
InFlightCaptureChange::~InFlightCaptureChange() {}
void InFlightCaptureChange::Revert() {
capture_synchronizer_->SetCaptureFromServer(revert_window());
}
// InFlightFocusChange --------------------------------------------------------
InFlightFocusChange::InFlightFocusChange(WindowTreeClient* client,
FocusSynchronizer* focus_synchronizer,
WindowMus* revert_value)
: InFlightWindowTreeClientChange(client, revert_value, ChangeType::FOCUS),
focus_synchronizer_(focus_synchronizer) {}
InFlightFocusChange::~InFlightFocusChange() {}
void InFlightFocusChange::Revert() {
focus_synchronizer_->SetFocusFromServer(revert_window());
}
// InFlightPropertyChange -----------------------------------------------------
InFlightPropertyChange::InFlightPropertyChange(
WindowMus* window,
const std::string& property_name,
std::unique_ptr<std::vector<uint8_t>> revert_value)
: InFlightChange(window, ChangeType::PROPERTY),
property_name_(property_name),
revert_value_(std::move(revert_value)) {}
InFlightPropertyChange::~InFlightPropertyChange() {}
bool InFlightPropertyChange::Matches(const InFlightChange& change) const {
return static_cast<const InFlightPropertyChange&>(change).property_name_ ==
property_name_;
}
void InFlightPropertyChange::SetRevertValueFrom(const InFlightChange& change) {
const InFlightPropertyChange& property_change =
static_cast<const InFlightPropertyChange&>(change);
if (property_change.revert_value_) {
revert_value_ =
base::MakeUnique<std::vector<uint8_t>>(*property_change.revert_value_);
} else {
revert_value_.reset();
}
}
void InFlightPropertyChange::Revert() {
window()->SetPropertyFromServer(property_name_, revert_value_.get());
}
// InFlightPredefinedCursorChange ---------------------------------------------
InFlightPredefinedCursorChange::InFlightPredefinedCursorChange(
WindowMus* window,
ui::mojom::Cursor revert_value)
: InFlightChange(window, ChangeType::PREDEFINED_CURSOR),
revert_cursor_(revert_value) {}
InFlightPredefinedCursorChange::~InFlightPredefinedCursorChange() {}
void InFlightPredefinedCursorChange::SetRevertValueFrom(
const InFlightChange& change) {
revert_cursor_ =
static_cast<const InFlightPredefinedCursorChange&>(change).revert_cursor_;
}
void InFlightPredefinedCursorChange::Revert() {
window()->SetPredefinedCursorFromServer(revert_cursor_);
}
// InFlightVisibleChange -------------------------------------------------------
InFlightVisibleChange::InFlightVisibleChange(WindowTreeClient* client,
WindowMus* window,
bool revert_value)
: InFlightChange(window, ChangeType::VISIBLE),
window_tree_client_(client),
revert_visible_(revert_value) {}
InFlightVisibleChange::~InFlightVisibleChange() {}
void InFlightVisibleChange::SetRevertValueFrom(const InFlightChange& change) {
revert_visible_ =
static_cast<const InFlightVisibleChange&>(change).revert_visible_;
}
void InFlightVisibleChange::Revert() {
window_tree_client_->SetWindowVisibleFromServer(window(), revert_visible_);
}
// InFlightOpacityChange -------------------------------------------------------
InFlightOpacityChange::InFlightOpacityChange(WindowMus* window,
float revert_value)
: InFlightChange(window, ChangeType::OPACITY),
revert_opacity_(revert_value) {}
InFlightOpacityChange::~InFlightOpacityChange() {}
void InFlightOpacityChange::SetRevertValueFrom(const InFlightChange& change) {
revert_opacity_ =
static_cast<const InFlightOpacityChange&>(change).revert_opacity_;
}
void InFlightOpacityChange::Revert() {
window()->SetOpacityFromServer(revert_opacity_);
}
// InFlightSetModalChange ------------------------------------------------------
InFlightSetModalChange::InFlightSetModalChange(WindowMus* window)
: InFlightChange(window, ChangeType::SET_MODAL) {}
InFlightSetModalChange::~InFlightSetModalChange() {}
void InFlightSetModalChange::SetRevertValueFrom(const InFlightChange& change) {}
void InFlightSetModalChange::Revert() {
// TODO: need to support more than just off. http://crbug.com/660073.
window()->GetWindow()->SetProperty(client::kModalKey, ui::MODAL_TYPE_NONE);
}
} // namespace aura
| 2,903 |
606 | #if defined(__linux__) && !defined(__ANDROID__)
#include <stdio.h>
#include <string.h>
#include <sys/socket.h>
#include <unistd.h>
#include "seccomp.h"
#include "util.h"
union cmsg {
char buf[CMSG_SPACE(sizeof(int))];
struct cmsghdr hdr;
};
void seccomp_socket_create(int *sock) {
int tmp_sock[2] = {-1, -1};
if (socketpair(AF_UNIX, SOCK_STREAM, 0, tmp_sock) < 0) {
FFATAL("socketpair");
}
if (dup2(tmp_sock[STDIN_FILENO], SECCOMP_SOCKET_RECV_FD) < 0) {
FFATAL("seccomp_socket_create - dup2 (1)");
}
if (dup2(tmp_sock[STDOUT_FILENO], SECCOMP_SOCKET_SEND_FD) < 0) {
FFATAL("seccomp_socket_create - dup2 (1)");
}
if (close(tmp_sock[STDIN_FILENO]) < 0) {
FFATAL("seccomp_socket_create - close (1)");
}
if (close(tmp_sock[STDOUT_FILENO]) < 0) {
FFATAL("seccomp_socket_create - close (2)");
}
sock[STDIN_FILENO] = SECCOMP_SOCKET_RECV_FD;
sock[STDOUT_FILENO] = SECCOMP_SOCKET_SEND_FD;
}
void seccomp_socket_send(int sockfd, int fd) {
int data = 12345;
struct iovec iov = {.iov_base = &data, .iov_len = sizeof(data)};
union cmsg control_msg = {.hdr = {
.cmsg_len = CMSG_LEN(sizeof(int)),
.cmsg_level = SOL_SOCKET,
.cmsg_type = SCM_RIGHTS,
}};
struct msghdr message = {.msg_control = control_msg.buf,
.msg_controllen = sizeof(control_msg.buf),
.msg_flags = 0,
.msg_iov = &iov,
.msg_iovlen = 1,
.msg_name = NULL,
.msg_namelen = 0};
memcpy(CMSG_DATA(&control_msg.hdr), &fd, sizeof(int));
if (sendmsg(sockfd, &message, 0) == -1) { FFATAL("sendmsg"); }
}
int seccomp_socket_recv(int sockfd) {
int data;
struct iovec iov = {.iov_base = &data, .iov_len = sizeof(data)};
union cmsg control_msg = {0};
struct msghdr message = {.msg_control = control_msg.buf,
.msg_controllen = sizeof(control_msg.buf),
.msg_flags = 0,
.msg_iov = &iov,
.msg_iovlen = 1,
.msg_name = NULL,
.msg_namelen = 0};
int fd;
if (recvmsg(sockfd, &message, 0) < 0) { FFATAL("recvmsg"); }
if (control_msg.hdr.cmsg_len != CMSG_LEN(sizeof(int))) {
FFATAL("control_msg.hdr.cmsg_len");
}
if (control_msg.hdr.cmsg_level != SOL_SOCKET) {
FFATAL("control_msg.hdr.cmsg_level");
}
if (control_msg.hdr.cmsg_type != SCM_RIGHTS) {
FFATAL("control_msg.hdr.cmsg_type");
}
memcpy(&fd, CMSG_DATA(&control_msg.hdr), sizeof(int));
return fd;
}
#endif
| 1,542 |
687 | // Copyright 2021 The XLS Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This is the "golden reference model" for our JPEG decoding functionality --
// it's not optimized for speed, just attempts to give implementation clarity
// and a basis for comparison for our DSL version as that comes online, and
// helps us establish unit tests for cross checking functionality.
//
// See streams.h file-level comment for references on the JPEG decoding process.
#ifndef XLS_EXAMPLES_JPEG_JPEG_GRM_H_
#define XLS_EXAMPLES_JPEG_JPEG_GRM_H_
#include <array>
#include <cstdint>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/types/optional.h"
#include "xls/examples/jpeg/constants.h"
#include "xls/examples/jpeg/streams.h"
namespace xls::jpeg {
struct Rgb {
uint8_t r;
uint8_t g;
uint8_t b;
};
// Helpers (equality, stream formatting) for testing with Rgb values.
bool operator==(const Rgb& lhs, const Rgb& rhs);
std::ostream& operator<<(std::ostream& os, const Rgb& rgb);
// Represents the result of decoding a JPEG.
struct DecodedJpeg {
uint16_t y;
uint16_t x;
// Note: this is a 2D array as an invariant (i.e. it should not be ragged, all
// lines should have the same `.size()`, which is the "number of samples per
// line");
std::vector<std::vector<Rgb>> lines;
// Returns the number of pixels in the image by inspecting the metadata about
// the image.
int64_t GetPixelCount() const { return y * x; }
// Returns the total number of pixels in the image by inspecting the value
// storage. Once the JPEG is decoded this should be the same as
// `GetPixelCount()` as an invariant.
int64_t GetPixelCountViaLineData() const {
if (lines.empty()) {
return 0;
}
return lines.size() * lines[0].size();
}
};
// Decodes a JPEG from the given byte strema or returns an error.
absl::StatusOr<DecodedJpeg> DecodeJpeg(ByteStream& byte_stream);
// Internal namespace is exposed in the header for unit testing purposes.
namespace internal {
// Note: conversion is from JFIF specification
// https://www.w3.org/Graphics/JPEG/jfif3.pdf page 3 -- notably Y has 256 levels
// [0, 255] while the chrominance channels (Cb and Cr) are offset by 128, so the
// [0, 255] values correspond to [-.5, .5] in the real number line. This implies
// that greyscale colors lacking in chrominance are given as (X, 128, 128).
Rgb ToRgb(uint8_t y, uint8_t cb, uint8_t cr);
// Notes whether the Huffman table applies to the DC information (no frequency
// variation within an MCU) or AC information (the components varying by
// frequency within the MCU).
enum class HuffmanTableClass {
kDc,
kAc,
};
// An "expanded" value from the Huffman metadata encoded in the JPEG stream --
// the order in which entries are placed in the stream help us expand the 16-bit
// "code" value -- if the lookahead in the bit stream matches the "code" value,
// this entry will be the (unique) match in the Huffman table.
struct PrefixEntry {
// The number of effective bits in "code". Must be in range [1, 16].
uint8_t bits;
// The prefix code to match on -- the effective bits in the code will be
// placed in the LSbs of this 16-bit storage.
uint16_t code;
// The value given in the metadata, which concatenates two 4-bit values of
// "leading zeros" and "bits to pop", referred to as RRRR and SSSS in the
// spec.
uint8_t value;
// Returns the upper nibble of "value" which indicates the number of leading
// zeros to skip in coefficients before decoding the value indicated by
// `GetBitsToPop()`.
uint8_t GetLeadingZeros() const { return value >> 4; }
// Returns the lower nibble of "value" which indicates the number of bits to
// pop from the bit stream.
uint8_t GetBitsToPop() const { return value & 0xf; }
};
// Stores data for a Huffman decoding table, as extracted from the byte stream.
//
// It is postprocessed via HuffmanTableExpandEntries once "entries" has been
// populated by the JPEG metadata to create more easily matched-on PrefixEntry
// records.
struct HuffmanTable {
HuffmanTableClass table_class;
uint8_t table_index;
std::array<std::vector<uint8_t>, kHuffmanCodeSizeLimit> entries;
std::vector<PrefixEntry> expanded;
};
// Expands the (compact) Huffman tables received from the byte stream into
// entries with the prefix bit patterns that we can more easily prefix match on.
absl::Status HuffmanTableExpandEntries(HuffmanTable* table);
// Matches the 16 bits of provided lookahead from the scan stream against the
// Huffman table, and returns a pointer to a matching prefix entry, or nullptr
// if no entry matches.
//
// Note that the lookahead bits that are currently being matched start with the
// MSb of "lookahead". e.g. if the table has entries for:
//
// #1: 0
// #2: 10
// And the lookahead is:
//
// bits[16]:0b1000_0000_0000_0000
//
// Then entry #2 will be matched.
const PrefixEntry* MatchLookahead(const HuffmanTable& table,
uint16_t lookahead);
} // namespace internal
} // namespace xls::jpeg
#endif // XLS_EXAMPLES_JPEG_JPEG_GRM_H_
| 1,735 |
1,968 | <gh_stars>1000+
//////////////////////////////////////////////////////////////////////////////
//
// This file is part of the Corona game engine.
// For overview and more information on licensing please refer to README.md
// Home page: https://github.com/coronalabs/corona
// Contact: <EMAIL>
//
//////////////////////////////////////////////////////////////////////////////
#include "stdafx.h"
#include "AsyncPipeReader.h"
#include "Interop\UI\HandleMessageEventArgs.h"
#include "Interop\UI\MessageOnlyWindow.h"
#include "Interop\Ticks.h"
#include <algorithm>
#include <exception>
#include <mutex>
#include <thread>
#include <unordered_map>
#include <unordered_set>
namespace Interop { namespace Ipc {
#pragma region Static Members
/// <summary>Mutex used to synchronize access to this class' static member variables.</summary>
static std::recursive_mutex sMutex;
/// <summary>
/// <para>Hash table used by all worker threads to access the AsyncPipeReader object that spawned them.</para>
/// <para>The key is a unique integer ID assigned to the thread via the OnAsyncExecute() argument.</para>
/// <para>The worker thread is expected to remove the pair when it exits the thread.</para>
/// <para>
/// The reader can null out its pointer in the pair if the thread does not exit in-time. This allows the reader
/// to cut loose a thread before spawning a new one so that the old thread can no longer access it.
/// </para>
/// </summary>
static std::unordered_map<uint32_t, AsyncPipeReader*> sThreadIdToAsyncPipeReaderMap;
/// <summary>Stores the next unique integer ID to be assigned to a worker thread.</summary>
static uint32_t sNextThreadIntegerId;
#pragma endregion
#pragma region Constructors/Destructors
AsyncPipeReader::AsyncPipeReader(HANDLE pipeHandle)
: fPipeHandle(pipeHandle),
fIsRunning(false),
fWasClosed(false),
fIsRaisingReceivedDataEvents(false),
fMessageOnlyWindowPointer(Interop::UI::MessageOnlyWindow::GetSharedInstanceForCurrentThread()),
fReceivedMessageEventHandler(this, &AsyncPipeReader::OnReceivedMessage),
fPipeClosedMessageId(0),
fReceivedDataMessageId(0),
fThreadIntegerId(0)
{
// Add event handlers.
if (fMessageOnlyWindowPointer)
{
fMessageOnlyWindowPointer->GetReceivedMessageEventHandlers().Add(&fReceivedMessageEventHandler);
fPipeClosedMessageId = fMessageOnlyWindowPointer->ReserveMessageId();
fReceivedDataMessageId = fMessageOnlyWindowPointer->ReserveMessageId();
}
}
AsyncPipeReader::~AsyncPipeReader()
{
// Stop the worker thread in case it is still running.
Stop();
// Remove event handlers.
if (fMessageOnlyWindowPointer)
{
fMessageOnlyWindowPointer->GetReceivedMessageEventHandlers().Remove(&fReceivedMessageEventHandler);
fMessageOnlyWindowPointer->UnreserveMessageId(fPipeClosedMessageId);
fMessageOnlyWindowPointer->UnreserveMessageId(fReceivedDataMessageId);
fPipeClosedMessageId = 0;
fReceivedDataMessageId = 0;
}
}
#pragma endregion
#pragma region Public Methods
AsyncPipeReader::ClosedEvent::HandlerManager& AsyncPipeReader::GetClosedEventHandlers()
{
return fClosedEvent.GetHandlerManager();
}
AsyncPipeReader::ReceivedDataEvent::HandlerManager& AsyncPipeReader::GetReceivedDataEventHandlers()
{
return fReceivedDataEvent.GetHandlerManager();
}
AsyncPipeReader::ReceivingDataBeganEvent::HandlerManager& AsyncPipeReader::GetReceivingDataBeganEventHandlers()
{
return fReceivingDataBeganEvent.GetHandlerManager();
}
AsyncPipeReader::ReceivingDataEndedEvent::HandlerManager& AsyncPipeReader::GetReceivingDataEndedEventHandlers()
{
return fReceivingDataEndedEvent.GetHandlerManager();
}
HANDLE AsyncPipeReader::GetHandle() const
{
return fPipeHandle;
}
bool AsyncPipeReader::IsRunning() const
{
return fIsRunning;
}
bool AsyncPipeReader::WasClosed() const
{
return fWasClosed;
}
OperationResult AsyncPipeReader::Start()
{
// Validate.
if (!fPipeHandle)
{
return OperationResult::FailedWith(L"Pipe handle is null.");
}
if (!fMessageOnlyWindowPointer)
{
return OperationResult::FailedWith(L"Failed to obtain a Win32 message-only window.");
}
if (fWasClosed)
{
return OperationResult::FailedWith(L"Pipe has been closed.");
}
// Do not continue if this reader is already running.
if (IsRunning())
{
return OperationResult::FailedWith(L"Already started.");
}
// Create a static mapping between the worker thread and this AsyncPipeReader instance.
// This is used by the worker thread to fetch this object instance safely.
// It also allows us to cut the thread loose and remove the mapping in case the worker thread hangs.
{
std::lock_guard<std::recursive_mutex> scopedMutexLock(sMutex);
// Generate a unique thread ID out of all AsyncPipeReader instances.
for (fThreadIntegerId = sNextThreadIntegerId;; fThreadIntegerId++)
{
// Do not allow an ID of zero.
// This value is used to indicate that a reader is not currently executing a thread.
if (0 == fThreadIntegerId)
{
continue;
}
// Check if this ID is already being used by another pipe reader.
if (sThreadIdToAsyncPipeReaderMap.find(fThreadIntegerId) != sThreadIdToAsyncPipeReaderMap.end())
{
continue;
}
// The generated thread ID is unique. Break out of the loop.
break;
}
sNextThreadIntegerId = fThreadIntegerId + 1;
// Add a mapping between the generated thread ID and this reader instance.
sThreadIdToAsyncPipeReaderMap.insert(std::pair<uint32_t, AsyncPipeReader*>(fThreadIntegerId, this));
}
// Clear any data remaining in the queue.
fReceivedDataCollection.clear();
fReceivedDataCollection.shrink_to_fit();
// Start the worker thread used to read data from the pipe.
fIsRunning = true;
try
{
std::thread newThread(&AsyncPipeReader::OnAsyncExecute, fThreadIntegerId);
newThread.detach();
}
catch (std::exception ex)
{
// Failed to start the thread.
// Reset this reader's variable to indicate that it has not been started.
{
std::lock_guard<std::recursive_mutex> scopedMutexLock(sMutex);
sThreadIdToAsyncPipeReaderMap.erase(fThreadIntegerId);
}
fThreadIntegerId = 0;
fIsRunning = false;
// Return an error explaining why it failed.
const char* errorMessage = ex.what();
if (!errorMessage || ('\0' == errorMessage[0]))
{
errorMessage = "Failed to create thread.";
}
return OperationResult::FailedWith(errorMessage);
}
// This reader has been successfully started.
return OperationResult::kSucceeded;
}
void AsyncPipeReader::Stop()
{
// Do not continue if not running.
if (!fIsRunning)
{
return;
}
// Flag that this reader is no longer running.
fIsRunning = false;
// Wait for the worker thread to exit gracefully.
const Ticks kTimeoutInTicks = Ticks::FromCurrentTime().AddSeconds(1);
do
{
std::this_thread::yield();
std::lock_guard<std::recursive_mutex> scopedMutexLock(sMutex);
if (sThreadIdToAsyncPipeReaderMap.find(fThreadIntegerId) != sThreadIdToAsyncPipeReaderMap.end())
{
break;
}
} while (Ticks::FromCurrentTime() < kTimeoutInTicks);
// Remove this reader from the static mapping.
// This cuts the worker thread loose in case it is blocked/timed-out above, denying it access to this reader.
{
std::lock_guard<std::recursive_mutex> scopedMutexLock(sMutex);
if (sThreadIdToAsyncPipeReaderMap.find(fThreadIntegerId) != sThreadIdToAsyncPipeReaderMap.end())
{
sThreadIdToAsyncPipeReaderMap[fThreadIntegerId] = nullptr;
}
}
fThreadIntegerId = 0;
// Clear any data remaining in the queue.
fReceivedDataCollection.clear();
fReceivedDataCollection.shrink_to_fit();
}
void AsyncPipeReader::Poll()
{
// Remove all "ReceivedData" messages from this pipe's message-only window queue.
if (fMessageOnlyWindowPointer && fReceivedDataMessageId)
{
fMessageOnlyWindowPointer->RemoveMessagesById(fReceivedDataMessageId);
}
// Process all queued data from the worker thread right now.
ProcessReceivedData();
// If a "PipeClosed" message was posted on the window queue, then handle it now.
if (fMessageOnlyWindowPointer && fPipeClosedMessageId)
{
bool wasRemoved = fMessageOnlyWindowPointer->RemoveMessagesById(fPipeClosedMessageId);
if (wasRemoved)
{
Interop::UI::MessageSettings messageSettings{};
messageSettings.WindowHandle = fMessageOnlyWindowPointer->GetWindowHandle();
messageSettings.MessageId = fPipeClosedMessageId;
OnReceivedMessage(*fMessageOnlyWindowPointer, Interop::UI::HandleMessageEventArgs(messageSettings));
}
}
}
#pragma endregion
#pragma region Private Methods
void AsyncPipeReader::OnReceivedMessage(
Interop::UI::UIComponent& sender, Interop::UI::HandleMessageEventArgs& arguments)
{
// Do not continue if the received message was already handled.
if (arguments.WasHandled())
{
return;
}
// Handle the received message.
if (arguments.GetMessageId() == fPipeClosedMessageId)
{
// *** The pipe has been externally closed. ***
// Note: The worker thread will automatically self terminate in this case.
// Handle the close event.
if (!fWasClosed)
{
// First, raise events for all received data in the queue, if any.
ProcessReceivedData();
// Update member variables.
fIsRunning = false;
fThreadIntegerId = 0;
fWasClosed = true;
// Notify the system that the pipe has been closed.
fClosedEvent.Raise(*this, EventArgs::kEmpty);
}
// Flag this message as handled.
arguments.SetHandled();
arguments.SetReturnResult(0);
}
else if (arguments.GetMessageId() == fReceivedDataMessageId)
{
// *** New data has been received from the pipe and appended to this reader's queue. ***
// Handle all queued data received from the pipe by the worker thread.
ProcessReceivedData();
// Flag this message as handled.
arguments.SetHandled();
arguments.SetReturnResult(0);
}
}
void AsyncPipeReader::ProcessReceivedData()
{
// Do not continue if we're already in the middle of raising "ReceivedData" events below.
// This prevents data from from being received out of order.
// Note: This issue can happen if a "ReceivedData" event handler pumps the system's message queue.
if (fIsRaisingReceivedDataEvents)
{
return;
}
// Do not continue if this reader is no longer running.
if (!fIsRunning)
{
std::lock_guard<std::recursive_mutex> scopedMutexLock(fMutex);
fReceivedDataCollection.clear();
return;
}
// Do not continue if we haven't received any data.
if (fReceivedDataCollection.empty())
{
return;
}
// Notify the system that we're about to process all received data.
fIsRaisingReceivedDataEvents = true;
fReceivingDataBeganEvent.Raise(*this, EventArgs::kEmpty);
// Handle all queued data received from the pipe by the worker thread.
// Note: Make sure this reader wasn't stopped by an event handler for the above "ReceiveDataBegan" event.
if (fIsRunning)
{
const size_t kDataArraySize = 32;
IODataEventArgs::Data dataArray[kDataArraySize];
while (fReceivedDataCollection.size() > 0)
{
// Pop off a batch of data from the "fReceivedDataCollection" and copy them to our temporary array.
int dataItemsCopied = 0;
{
std::lock_guard<std::recursive_mutex> scopedMutexLock(fMutex);
const int kDataItemsAvailable = (int)fReceivedDataCollection.size();
int index;
for (index = 0; (index < kDataItemsAvailable) && (index < kDataArraySize); index++)
{
dataArray[index] = fReceivedDataCollection.at(index);
}
dataItemsCopied = index;
if (dataItemsCopied > 0)
{
auto iterator = fReceivedDataCollection.begin();
if (dataItemsCopied >= 2)
{
fReceivedDataCollection.erase(iterator, iterator + dataItemsCopied);
}
else
{
fReceivedDataCollection.erase(iterator);
}
}
}
// Notify the system about the newly received I/O data.
for (int index = 0; index < dataItemsCopied; index++)
{
// Raise a "ReceivedData" event.
try
{
fReceivedDataEvent.Raise(*this, IODataEventArgs(dataArray[index]));
}
catch (...) {}
// Do not continue if an event handler has stopped this reader or closed the pipe.
if (!fIsRunning)
{
break;
}
}
// Do not continue if an event handler has stopped this reader.
if (!fIsRunning)
{
break;
}
}
}
// Notify the system that we're done processing all received data.
fIsRaisingReceivedDataEvents = false;
fReceivingDataEndedEvent.Raise(*this, EventArgs::kEmpty);
}
#pragma endregion
#pragma region Private Static Functions
void AsyncPipeReader::OnAsyncExecute(const uint32_t threadIntegerId)
{
// Validate.
if (!threadIntegerId)
{
throw std::exception();
}
// Fetch information from the reader.
HANDLE pipeHandle = nullptr;
HWND windowHandle = nullptr;
UINT pipeClosedMessageId = 0;
UINT receivedDataMessageId = 0;
{
std::lock_guard<std::recursive_mutex> scopedMutexLock(sMutex);
auto iterator = sThreadIdToAsyncPipeReaderMap.find(threadIntegerId);
if (iterator != sThreadIdToAsyncPipeReaderMap.end())
{
auto readerPointer = (*iterator).second;
if (readerPointer)
{
pipeHandle = readerPointer->fPipeHandle;
windowHandle = readerPointer->fMessageOnlyWindowPointer->GetWindowHandle();
pipeClosedMessageId = readerPointer->fPipeClosedMessageId;
receivedDataMessageId = readerPointer->fReceivedDataMessageId;
}
}
}
// Start our infinite loop which reads the given reader's pipe.
const DWORD kBufferSizeInBytes = 32767;
char byteBuffer[kBufferSizeInBytes];
DWORD bytesCopiedToBuffer = 0;
bool wasStopRequested = false;
bool wasPipeClosed = false;
while (true)
{
// Poll the pipe's buffer until:
// - All of the bytes in the buffer have been read.
// - The pipe has been closed or errored out.
// - The reader has requested this thread to exit.
while (true)
{
// Determine if it's time to exit this thread.
{
// Exit if the reader was destroyed.
std::lock_guard<std::recursive_mutex> scopedStaticMutexLock(sMutex);
AsyncPipeReader* readerPointer = nullptr;
auto iterator = sThreadIdToAsyncPipeReaderMap.find(threadIntegerId);
if (iterator != sThreadIdToAsyncPipeReaderMap.end())
{
readerPointer = (*iterator).second;
}
if (!readerPointer)
{
wasStopRequested = true;
break;
}
// Exit if the reader was stopped.
std::lock_guard<std::recursive_mutex> scopedInstanceMutexLock(readerPointer->fMutex);
if (!readerPointer->fIsRunning)
{
wasStopRequested = true;
break;
}
}
// Do a non-blocking check for bytes in the pipe's buffer.
// Note: This *will* block if a ReadFile() was called on another thread. (You should never do that.)
BOOL result;
DWORD bytesAvailable = 0;
result = ::PeekNamedPipe(pipeHandle, nullptr, 0, nullptr, &bytesAvailable, nullptr);
if (!result)
{
if (::GetLastError() != ERROR_IO_PENDING)
{
wasPipeClosed = true;
}
break;
}
// Do not continue if the pipe does not contain any data.
if (bytesAvailable <= 0)
{
break;
}
// Fetch data from the pipe.
auto timestamp = DateTime::FromCurrentLocal();
DWORD bytesReceived = 0;
DWORD bytesRequested = (std::min)(bytesAvailable, kBufferSizeInBytes - bytesCopiedToBuffer);
result = ::ReadFile(pipeHandle, byteBuffer + bytesCopiedToBuffer, bytesRequested, &bytesReceived, nullptr);
if (!result)
{
if (::GetLastError() != ERROR_IO_PENDING)
{
wasPipeClosed = true;
}
break;
}
if (bytesReceived <= 0)
{
break;
}
if (bytesReceived > bytesRequested)
{
bytesReceived = bytesRequested;
}
bytesCopiedToBuffer += bytesReceived;
// Send all received data to the reader on its main thread.
{
std::lock_guard<std::recursive_mutex> scopedStaticMutexLock(sMutex);
auto iterator = sThreadIdToAsyncPipeReaderMap.find(threadIntegerId);
if ((iterator != sThreadIdToAsyncPipeReaderMap.end()) && (*iterator).second)
{
// Split the received data into newline separated strings and push them into the reader's queue.
auto readerPointer = (*iterator).second;
std::lock_guard<std::recursive_mutex> scopedInstanceMutexLock(readerPointer->fMutex);
IODataEventArgs::Data ioData;
ioData.Timestamp = timestamp;
while (bytesCopiedToBuffer > 0)
{
// Find the next newline character in the buffer.
DWORD endIndex;
DWORD skip = 0;
for (endIndex = 0; endIndex < bytesCopiedToBuffer; endIndex++)
{
if (skip == endIndex && byteBuffer[skip] == 0)
{
skip++;
}
if ('\n' == byteBuffer[endIndex])
{
break;
}
}
if (endIndex < bytesCopiedToBuffer)
{
// Newline was found. Push that substring into the reader's queue.
DWORD bytesToCopy = endIndex + 1;
try
{
ioData.Text = std::make_shared<const std::string>(byteBuffer+skip, bytesToCopy-skip);
readerPointer->fReceivedDataCollection.push_back(ioData);
}
catch (...) {}
// Remove the above substring from this thread's buffer by shifting its bytes.
bytesCopiedToBuffer -= bytesToCopy;
if (bytesCopiedToBuffer > 0)
{
memmove_s(byteBuffer, kBufferSizeInBytes, byteBuffer + bytesToCopy, bytesCopiedToBuffer);
}
}
else
{
// A newline character was not found.
// If the buffer is full, then push the entire buffer as 1 string into the reader's queue.
if (bytesCopiedToBuffer >= kBufferSizeInBytes)
{
try
{
ioData.Text = std::make_shared<const std::string>(byteBuffer+skip, kBufferSizeInBytes-skip);
readerPointer->fReceivedDataCollection.push_back(ioData);
}
catch (...) {}
bytesCopiedToBuffer = 0;
}
// Wait for more data from the pipe. We're hoping to get a newline character later.
break;
}
}
// Notify the reader on its main thread if at least 1 string was pushed into the reader's queue.
if (ioData.Text.get() && windowHandle && receivedDataMessageId)
{
::PostMessageW(windowHandle, receivedDataMessageId, 0, 0);
}
}
}
}
// Exit out of this thead if:
// - The pipe was closed or errored out.
// - The reader requested this thread to exit via its Stop() method.
if (wasPipeClosed || wasStopRequested)
{
break;
}
// Give this thread a very short break.
std::this_thread::sleep_for(std::chrono::milliseconds(10));
}
// Final cleanup before exiting this thread.
{
std::lock_guard<std::recursive_mutex> scopedStaticMutexLock(sMutex);
auto iterator = sThreadIdToAsyncPipeReaderMap.find(threadIntegerId);
if (iterator != sThreadIdToAsyncPipeReaderMap.end())
{
// Notify the reader if the pipe was externally closed/disconnected.
auto readerPointer = (*iterator).second;
if (readerPointer && wasPipeClosed && windowHandle && pipeClosedMessageId)
{
::PostMessageW(windowHandle, pipeClosedMessageId, 0, 0);
}
// Remove this thread's reference in the static map.
// This signals the reader in its Stop() method that this thread has exited gracefully.
sThreadIdToAsyncPipeReaderMap.erase(iterator);
}
}
}
#pragma endregion
} } // namespace Interop::Ipc
| 6,856 |
3,655 | import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle import ParamAttr
from paddle.regularizer import L2Decay
from paddlex.ppdet.core.workspace import register
def _de_sigmoid(x, eps=1e-7):
x = paddle.clip(x, eps, 1. / eps)
x = paddle.clip(1. / x - 1., eps, 1. / eps)
x = -paddle.log(x)
return x
@register
class YOLOv3Head(nn.Layer):
__shared__ = ['num_classes', 'data_format']
__inject__ = ['loss']
def __init__(self,
in_channels=[1024, 512, 256],
anchors=[[10, 13], [16, 30], [33, 23], [30, 61], [62, 45],
[59, 119], [116, 90], [156, 198], [373, 326]],
anchor_masks=[[6, 7, 8], [3, 4, 5], [0, 1, 2]],
num_classes=80,
loss='YOLOv3Loss',
iou_aware=False,
iou_aware_factor=0.4,
data_format='NCHW'):
"""
Head for YOLOv3 network
Args:
num_classes (int): number of foreground classes
anchors (list): anchors
anchor_masks (list): anchor masks
loss (object): YOLOv3Loss instance
iou_aware (bool): whether to use iou_aware
iou_aware_factor (float): iou aware factor
data_format (str): data format, NCHW or NHWC
"""
super(YOLOv3Head, self).__init__()
assert len(in_channels) > 0, "in_channels length should > 0"
self.in_channels = in_channels
self.num_classes = num_classes
self.loss = loss
self.iou_aware = iou_aware
self.iou_aware_factor = iou_aware_factor
self.parse_anchor(anchors, anchor_masks)
self.num_outputs = len(self.anchors)
self.data_format = data_format
self.yolo_outputs = []
for i in range(len(self.anchors)):
if self.iou_aware:
num_filters = len(self.anchors[i]) * (self.num_classes + 6)
else:
num_filters = len(self.anchors[i]) * (self.num_classes + 5)
name = 'yolo_output.{}'.format(i)
conv = nn.Conv2D(
in_channels=self.in_channels[i],
out_channels=num_filters,
kernel_size=1,
stride=1,
padding=0,
data_format=data_format,
bias_attr=ParamAttr(regularizer=L2Decay(0.)))
conv.skip_quant = True
yolo_output = self.add_sublayer(name, conv)
self.yolo_outputs.append(yolo_output)
def parse_anchor(self, anchors, anchor_masks):
self.anchors = [[anchors[i] for i in mask] for mask in anchor_masks]
self.mask_anchors = []
anchor_num = len(anchors)
for masks in anchor_masks:
self.mask_anchors.append([])
for mask in masks:
assert mask < anchor_num, "anchor mask index overflow"
self.mask_anchors[-1].extend(anchors[mask])
def forward(self, feats, targets=None):
assert len(feats) == len(self.anchors)
yolo_outputs = []
for i, feat in enumerate(feats):
yolo_output = self.yolo_outputs[i](feat)
if self.data_format == 'NHWC':
yolo_output = paddle.transpose(yolo_output, [0, 3, 1, 2])
yolo_outputs.append(yolo_output)
if self.training:
return self.loss(yolo_outputs, targets, self.anchors)
else:
if self.iou_aware:
y = []
for i, out in enumerate(yolo_outputs):
na = len(self.anchors[i])
ioup, x = out[:, 0:na, :, :], out[:, na:, :, :]
b, c, h, w = x.shape
no = c // na
x = x.reshape((b, na, no, h * w))
ioup = ioup.reshape((b, na, 1, h * w))
obj = x[:, :, 4:5, :]
ioup = F.sigmoid(ioup)
obj = F.sigmoid(obj)
obj_t = (obj**(1 - self.iou_aware_factor)) * (
ioup**self.iou_aware_factor)
obj_t = _de_sigmoid(obj_t)
loc_t = x[:, :, :4, :]
cls_t = x[:, :, 5:, :]
y_t = paddle.concat([loc_t, obj_t, cls_t], axis=2)
y_t = y_t.reshape((b, c, h, w))
y.append(y_t)
return y
else:
return yolo_outputs
@classmethod
def from_config(cls, cfg, input_shape):
return {'in_channels': [i.channels for i in input_shape], }
| 2,570 |
545 | <reponame>mmoegele/xmastree2020<filename>examples/xmaslights-tetrahedron.py
def xmaslight():
# This is the code from my
#NOTE THE LEDS ARE GRB COLOUR (NOT RGB)
# Here are the libraries I am currently using:
import time
import board
import neopixel
import re
import math
# You are welcome to add any of these:
# import random
# import numpy
# import scipy
# import sys
# If you want to have user changable values, they need to be entered from the command line
# so import sys sys and use sys.argv[0] etc
# some_value = int(sys.argv[0])
# IMPORT THE COORDINATES (please don't break this bit)
coordfilename = "Python/coords.txt"
fin = open(coordfilename,'r')
coords_raw = fin.readlines()
coords_bits = [i.split(",") for i in coords_raw]
coords = []
for slab in coords_bits:
new_coord = []
for i in slab:
new_coord.append(int(re.sub(r'[^-\d]','', i)))
coords.append(new_coord)
#set up the pixels (AKA 'LEDs')
PIXEL_COUNT = len(coords) # this should be 500
pixels = neopixel.NeoPixel(board.D18, PIXEL_COUNT, auto_write=False)
# YOU CAN EDIT FROM HERE DOWN
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Authors: <NAME>, <NAME>, <NAME>
# Date: 12/23/2020
#
# A spinning tetrahedron to light up <NAME>'s Christmas Tree!
# Code modified from: https://github.com/standupmaths/xmastree2020/blob/main/xmaslights-spin.py
#
# Watch Matt's video: https://www.youtube.com/watch?v=TvlpIojusBE
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import numpy as np
# This function tells you if a given point is on the same side of a triangle (v1, v2, v3) as the point v4
def sameSide(v1, v2, v3, v4, point):
normal = np.cross(np.subtract(v2, v1), np.subtract(v3, v1))
dotv4 = np.dot(normal, np.subtract(v4, v1))
dotpoint = np.dot(normal, np.subtract(point, v1))
return (np.sign(dotv4) == np.sign(dotpoint))
# By checking a point against all the faces of a tetrahedron, we can tell if the point is inside or outside
def pointInsideTetrahedron(v1, v2, v3, v4, point):
return (sameSide(v1, v2, v3, v4, point) and
sameSide(v2, v3, v4, v1, point) and
sameSide(v3, v4, v1, v2, point) and
sameSide(v4, v1, v2, v3, point))
# This function rotates a point by 3 angles from their respective axes
def matrixRotate(point, xAngle, yAngle, zAngle):
v1 = np.array([[point[0]], [point[1]], [point[2]]])
xAngle = math.radians(xAngle)
yAngle = math.radians(yAngle)
zAngle = math.radians(zAngle)
xRotation = np.array([
[1, 0, 0],
[0, np.cos(xAngle), -np.sin(xAngle)],
[0, np.sin(xAngle), np.cos(xAngle)]
])
yRotation = np.array([
[np.cos(yAngle), 0, np.sin(yAngle)],
[0, 1, 0],
[-np.sin(yAngle), 0, np.cos(yAngle)]
])
zRotation = np.array([
[np.cos(zAngle), -np.sin(zAngle), 0],
[np.sin(zAngle), np.cos(zAngle), 0],
[0, 0, 1]
])
v2 = zRotation.dot(yRotation.dot(xRotation.dot(v1)))
return v2[0][0], v2[1][0], v2[2][0]
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Adjust these values to change the effect!
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Inside and outside colors in GRB
insideColor = [115, 230, 0] # orange
outsideColor = [0, 31, 77] # dark purple
# Scale of the tetrahedron
scale = 400
# Offset of the tetrahedron. (0, 0, 0) will make it centered on the origin
# Note that the tetrahedron does not rotate about its own center, but uses the various axes
# and so offsetting it means it won't rotate in-place
xOffset = 0
yOffset = 0
zOffset = .4
# The change of the angle per update in DEGREES
# Rotations apply in the order X -> Y -> Z because I'm bad at quaternions
xAngleChange = 5
yAngleChange = 10
zAngleChange = 15
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Update the LEDs!
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
frame = 0
run = 1
while run == 1:
tetrahedron = [
[(-1 + xOffset) * scale, (0 + yOffset) * scale, (-1/1.414 + zOffset) * scale],
[(1 + xOffset) * scale, (0 + yOffset) * scale, (-1/1.414 + zOffset) * scale],
[(0 + xOffset) * scale, (-1 + yOffset) * scale, (1/1.414 + zOffset) * scale],
[(0 + xOffset) * scale, (1 + yOffset) * scale, (1/1.414 + zOffset) * scale]
]
# Vertices of our tetrahedron, with the applied rotation
tetrahedron = [
matrixRotate(tetrahedron[0], xAngleChange * frame, yAngleChange * frame , zAngleChange * frame),
matrixRotate(tetrahedron[1], xAngleChange * frame, yAngleChange * frame , zAngleChange * frame),
matrixRotate(tetrahedron[2], xAngleChange * frame, yAngleChange * frame , zAngleChange * frame),
matrixRotate(tetrahedron[3], xAngleChange * frame , yAngleChange * frame , zAngleChange * frame)
]
LED = 0
for coord in coords:
if (pointInsideTetrahedron(tetrahedron[0], tetrahedron[1], tetrahedron[2], tetrahedron[3], coord)):
pixels[LED] = insideColor
else:
pixels[LED] = outsideColor
LED += 1
# Send out the updates to the lights
pixels.show()
frame += 1
return 'DONE'
# yes, I just put this at the bottom so it auto runs
xmaslight() | 2,814 |
815 | #include "vtkContext2DScalarBarActor.h"
#include "vtkAxis.h"
#include "vtkBoundingRectContextDevice2D.h"
#include "vtkBrush.h"
#include "vtkColorTransferFunction.h"
#include "vtkContext2D.h"
#include "vtkContextActor.h"
#include "vtkContextDevice2D.h"
#include "vtkContextItem.h"
#include "vtkContextScene.h"
#include "vtkDiscretizableColorTransferFunction.h"
#include "vtkDoubleArray.h"
#include "vtkFloatArray.h"
#include "vtkImageData.h"
#include "vtkLookupTable.h"
#include "vtkMath.h"
#include "vtkNew.h"
#include "vtkObjectFactory.h"
#include "vtkOpenGLContextDevice2D.h"
#include "vtkOpenGLRenderWindow.h"
#include "vtkPen.h"
#include "vtkPointData.h"
#include "vtkPoints2D.h"
#include "vtkRenderWindow.h"
#include "vtkRenderer.h"
#include "vtkScalarsToColors.h"
#include "vtkTextProperty.h"
#include "vtkTransform2D.h"
#include "vtkUnsignedCharArray.h"
#include "vtkViewport.h"
#include <limits>
#include <map>
#if defined(_WIN32) && !defined(__CYGWIN__)
#define SNPRINTF _snprintf
#else
#define SNPRINTF snprintf
#endif
// NOTE FOR DEVELOPERS
// The color bar is defined so that the origin (0, 0) is the bottom left
// corner of the region that contains the scalar bar, the out-of-range
// color swatches, and the NaN color swatches. This is true for both
// horizontal and vertical orientations.
//
// The layout of the color bar for both orientations is as follows:
//
// VERTICAL HORIZONTAL
//
// +-+ Below Range Above Range NaN Color
// | | Above Range +-+-------------------+-+ +-+
// +-+ | | | | | |
// | | +-+-------------------+-+-+-+
// | | (0, 0) Scalar Bar
// | |
// | | Scalar Bar
// | |
// +-+
// | | Below Range
// +-+
//
// +-+
// | | Nan Color
// + +
// (0, 0)
// This class is a vtkContextItem that can be added to a vtkContextScene.
//----------------------------------------------------------------------------
class vtkContext2DScalarBarActor::vtkScalarBarItem : public vtkContextItem
{
public:
vtkTypeMacro(vtkScalarBarItem, vtkContextItem);
static vtkScalarBarItem* New() { VTK_OBJECT_FACTORY_NEW_BODY(vtkScalarBarItem); }
// Forward calls to vtkContextItem::Paint to vtkContext2DScalarBarActor
bool Paint(vtkContext2D* painter) override
{
bool somethingRendered = false;
if (this->Actor)
{
somethingRendered = this->Actor->Paint(painter);
}
return somethingRendered && this->Superclass::Paint(painter);
}
// Reference to the Actor.
vtkContext2DScalarBarActor* Actor;
protected:
vtkScalarBarItem()
: Actor(nullptr)
{
}
~vtkScalarBarItem() override = default;
};
//----------------------------------------------------------------------------
// Hide use of std::map from public interface
class vtkContext2DScalarBarActor::vtkAnnotationMap : public std::map<double, std::string>
{
};
//----------------------------------------------------------------------------
vtkStandardNewMacro(vtkContext2DScalarBarActor);
//----------------------------------------------------------------------------
vtkContext2DScalarBarActor::vtkContext2DScalarBarActor()
{
this->ActorDelegate = vtkContextActor::New();
this->TitleJustification = VTK_TEXT_LEFT;
this->ForceHorizontalTitle = false;
this->ScalarBarThickness = 16;
this->ScalarBarLength = 0.33;
this->AutomaticLabelFormat = 1;
this->AddRangeLabels = 1;
this->AutomaticAnnotations = 0;
this->AddRangeAnnotations = 0;
this->RangeLabelFormat = nullptr;
this->SetRangeLabelFormat("%g");
this->OutlineScalarBar = 0;
this->Spacer = 4.0;
this->DrawTickMarks = true;
// Create an array for the custom labels
this->CustomLabels = vtkDoubleArray::New();
this->ReverseLegend = false;
this->ScalarBarItem = vtkScalarBarItem::New();
this->ScalarBarItem->Actor = this;
vtkContextScene* localScene = vtkContextScene::New();
this->ActorDelegate->SetScene(localScene);
localScene->AddItem(this->ScalarBarItem);
localScene->Delete();
this->CurrentViewport = nullptr;
this->Axis = vtkAxis::New();
this->Axis->SetScene(localScene);
}
//----------------------------------------------------------------------------
vtkContext2DScalarBarActor::~vtkContext2DScalarBarActor()
{
this->SetLookupTable(nullptr);
this->ActorDelegate->Delete();
this->SetTitle(nullptr);
this->SetComponentTitle(nullptr);
this->ScalarBarItem->Delete();
this->SetTitleTextProperty(nullptr);
this->SetLabelTextProperty(nullptr);
this->Axis->Delete();
this->SetRangeLabelFormat(nullptr);
}
//----------------------------------------------------------------------------
void vtkContext2DScalarBarActor::SetNumberOfCustomLabels(vtkIdType numLabels)
{
this->CustomLabels->SetNumberOfTuples(numLabels);
}
//----------------------------------------------------------------------------
vtkIdType vtkContext2DScalarBarActor::GetNumberOfCustomLabels()
{
return this->CustomLabels->GetNumberOfTuples();
}
//----------------------------------------------------------------------------
void vtkContext2DScalarBarActor::SetCustomLabel(vtkIdType index, double value)
{
if (index < 0 || index >= this->CustomLabels->GetNumberOfTuples())
{
vtkErrorMacro(<< "Index out of range");
return;
}
this->CustomLabels->SetTypedTuple(index, &value);
}
//----------------------------------------------------------------------------
int vtkContext2DScalarBarActor::RenderOverlay(vtkViewport* viewport)
{
this->CurrentViewport = viewport;
int returnValue = 0;
if (this->ActorDelegate)
{
returnValue = this->ActorDelegate->RenderOverlay(viewport);
}
return returnValue;
}
//----------------------------------------------------------------------------
int vtkContext2DScalarBarActor::RenderOpaqueGeometry(vtkViewport* viewport)
{
this->CurrentViewport = viewport;
return 1;
}
//----------------------------------------------------------------------------
void vtkContext2DScalarBarActor::ReleaseGraphicsResources(vtkWindow* window)
{
if (!this->ActorDelegate || !this->ActorDelegate->GetScene() ||
!this->ActorDelegate->GetScene()->GetLastPainter())
{
return;
}
vtkContextDevice2D* device = this->ActorDelegate->GetScene()->GetLastPainter()->GetDevice();
// Downcast is needed because the context device superclass does
// not define this method (but probably should).
vtkOpenGLContextDevice2D* oglDevice = vtkOpenGLContextDevice2D::SafeDownCast(device);
if (oglDevice)
{
oglDevice->ReleaseGraphicsResources(window);
}
}
//----------------------------------------------------------------------------
void vtkContext2DScalarBarActor::UpdateScalarBarTexture(vtkImageData* image)
{
if (this->Orientation == VTK_ORIENT_VERTICAL)
{
image->SetDimensions(1, 256, 1);
}
else
{
image->SetDimensions(256, 1, 1);
}
image->AllocateScalars(VTK_UNSIGNED_CHAR, 4);
vtkUnsignedCharArray* colors =
vtkUnsignedCharArray::SafeDownCast(image->GetPointData()->GetArray(0));
vtkDiscretizableColorTransferFunction* ctf =
vtkDiscretizableColorTransferFunction::SafeDownCast(this->LookupTable);
if (!ctf)
{
return;
}
double* lutRange = ctf->GetRange();
const int numColors = 256;
unsigned char color[4];
for (int i = 0; i < numColors; ++i)
{
// Probably use MapScalarsThroughTable2 here instead.
// Update only when LUT changes
double originalValue = (((double)i / numColors) * (lutRange[1] - lutRange[0])) + lutRange[0];
double value = originalValue;
if (this->LookupTable->UsingLogScale())
{
value = log10(lutRange[0]) + i * (log10(lutRange[1]) - log10(lutRange[0])) / numColors;
value = pow(10.0, value);
}
const unsigned char* colorTmp = ctf->MapValue(value);
// The opacity function does not take into account the logarithmic
// mapping, so we use the original value here.
color[0] = colorTmp[0];
color[1] = colorTmp[1];
color[2] = colorTmp[2];
color[3] = static_cast<unsigned char>(255.0 * ctf->GetOpacity(originalValue) + 0.5);
if (this->ReverseLegend)
{
colors->SetTypedTuple(numColors - i - 1, color);
}
else
{
colors->SetTypedTuple(i, color);
}
}
}
//----------------------------------------------------------------------------
void vtkContext2DScalarBarActor::GetSize(double size[2], vtkContext2D* painter)
{
if (!this->CurrentViewport)
{
return;
}
// Convert scalar bar length from normalized viewport coordinates to pixels
vtkNew<vtkCoordinate> lengthCoord;
lengthCoord->SetCoordinateSystemToNormalizedViewport();
lengthCoord->SetValue(this->Orientation == VTK_ORIENT_VERTICAL ? 0.0 : this->ScalarBarLength,
this->Orientation == VTK_ORIENT_VERTICAL ? this->ScalarBarLength : 0.0);
int* lengthOffset = lengthCoord->GetComputedDisplayValue(this->CurrentViewport);
// The scalar bar thickness is defined in terms of points. That is,
// if the thickness size is 12, that matches the height of a "|"
// character in a 12 point font.
vtkNew<vtkTextProperty> textProp;
textProp->SetFontSize(this->ScalarBarThickness);
painter->ApplyTextProp(textProp.Get());
float bounds[4];
painter->ComputeStringBounds("|", bounds);
double thickness = bounds[3];
if (this->Orientation == VTK_ORIENT_VERTICAL)
{
size[0] = thickness;
size[1] = lengthOffset[1];
}
else
{
size[0] = lengthOffset[0];
size[1] = thickness;
}
}
//----------------------------------------------------------------------------
vtkRectf vtkContext2DScalarBarActor::GetColorBarRect(double size[2])
{
vtkRectf rect = vtkRectf(0, 0, size[0], size[1]);
// Color swatches are squares with sides equal to the width of the
// color bar when in vertical orientation and equal to the height
// when in horizontal orientation.
double swatchSize = this->Orientation == VTK_ORIENT_VERTICAL ? size[0] : size[1];
// Count up the swatches
double shift = 0;
double sizeReduction = 0;
if (this->DrawNanAnnotation)
{
if (this->Orientation == VTK_ORIENT_VERTICAL)
{
shift += swatchSize + this->Spacer;
}
sizeReduction += swatchSize + this->Spacer;
}
vtkDiscretizableColorTransferFunction* ctf =
vtkDiscretizableColorTransferFunction::SafeDownCast(this->LookupTable);
if (ctf && ctf->GetUseAboveRangeColor())
{
sizeReduction += swatchSize;
}
if (ctf && ctf->GetUseBelowRangeColor())
{
shift += swatchSize;
sizeReduction += swatchSize;
}
if (this->Orientation == VTK_ORIENT_VERTICAL)
{
rect.SetY(rect.GetY() + shift);
rect.SetHeight(rect.GetHeight() - sizeReduction);
}
else
{
rect.SetX(rect.GetX() + shift);
rect.SetWidth(rect.GetWidth() - sizeReduction);
}
return rect;
}
//----------------------------------------------------------------------------
vtkRectf vtkContext2DScalarBarActor::GetFullColorBarRect(double size[2])
{
// This will end up with the full color bar rect
vtkRectf fullRect = this->GetColorBarRect(size);
// Add these rects in if they have non-zero size
vtkRectf aboveRect = this->GetAboveRangeColorRect(size);
if (aboveRect.GetWidth() > 0 && aboveRect.GetHeight() > 0)
{
fullRect.AddRect(aboveRect);
}
vtkRectf belowRect = this->GetBelowRangeColorRect(size);
if (belowRect.GetWidth() > 0 && belowRect.GetHeight() > 0)
{
fullRect.AddRect(belowRect);
}
return fullRect;
}
//----------------------------------------------------------------------------
vtkRectf vtkContext2DScalarBarActor::GetAboveRangeColorRect(double size[2])
{
vtkRectf rect(0, 0, 0, 0);
vtkDiscretizableColorTransferFunction* ctf =
vtkDiscretizableColorTransferFunction::SafeDownCast(this->LookupTable);
if (!ctf)
{
if (this->LookupTable)
{
vtkErrorMacro(<< "Lookup table should be a vtkDiscretizableColorTransferFunction but was a "
<< this->LookupTable->GetClassName());
}
else
{
vtkErrorMacro(<< "Lookup table was NULL");
}
return rect;
}
if (ctf->GetUseAboveRangeColor())
{
rect = this->GetOutOfRangeColorRectInternal(vtkContext2DScalarBarActor::ABOVE_RANGE, size);
}
return rect;
}
//----------------------------------------------------------------------------
vtkRectf vtkContext2DScalarBarActor::GetBelowRangeColorRect(double size[2])
{
vtkRectf rect(0, 0, 0, 0);
vtkDiscretizableColorTransferFunction* ctf =
vtkDiscretizableColorTransferFunction::SafeDownCast(this->LookupTable);
if (!ctf)
{
if (this->LookupTable)
{
vtkErrorMacro(<< "Lookup table should be a vtkDiscretizableColorTransferFunction but was a "
<< this->LookupTable->GetClassName());
}
else
{
vtkErrorMacro(<< "Lookup table was NULL");
}
return rect;
}
if (ctf->GetUseBelowRangeColor())
{
rect = this->GetOutOfRangeColorRectInternal(vtkContext2DScalarBarActor::BELOW_RANGE, size);
}
return rect;
}
//----------------------------------------------------------------------------
vtkRectf vtkContext2DScalarBarActor::GetOutOfRangeColorRectInternal(
vtkContext2DScalarBarActor::OutOfRangeType type, double size[2])
{
vtkRectf rect(0, 0, 0, 0);
bool graphicallyAbove = type == vtkContext2DScalarBarActor::ABOVE_RANGE && !this->ReverseLegend;
if (graphicallyAbove)
{
if (this->Orientation == VTK_ORIENT_VERTICAL)
{
double width = size[0];
rect = vtkRectf(0, size[1] - width, width, width);
}
else
{
// Horizontal
double nanSpace = this->GetNaNColorRect(size).GetWidth();
if (nanSpace > 0)
{
nanSpace += this->Spacer;
}
double height = size[1];
// Move it all the way to the right, minus the NaN swatch
rect = vtkRectf(size[0] - nanSpace - height, 0, height, height);
}
}
else
{
if (this->Orientation == VTK_ORIENT_VERTICAL)
{
double nanSpace = this->GetNaNColorRect(size).GetHeight();
if (nanSpace > 0)
{
nanSpace += this->Spacer;
}
double height = size[0];
rect = vtkRectf(0, nanSpace, height, height);
}
else
{
double width = size[1];
rect = vtkRectf(0, 0, width, width);
}
}
return rect;
}
//----------------------------------------------------------------------------
vtkRectf vtkContext2DScalarBarActor::GetNaNColorRect(double size[2])
{
// Initialize to 0 width, 0 height
vtkRectf rect(0, 0, 0, 0);
if (this->DrawNanAnnotation)
{
if (this->Orientation == VTK_ORIENT_VERTICAL)
{
double width = size[0];
rect = vtkRectf(0, 0, width, width);
}
else
{
// Horizontal
double height = size[1];
rect = vtkRectf(size[0] - height, 0, height, height);
}
}
return rect;
}
//----------------------------------------------------------------------------
void vtkContext2DScalarBarActor::UpdateTextProperties()
{
// We can't just ShallowCopy the LabelTextProperty to axisTextProperty
// because it will clobber the orientation and justification settings.
vtkTextProperty* axisLabelProperty = this->Axis->GetLabelProperties();
axisLabelProperty->SetColor(this->LabelTextProperty->GetColor());
axisLabelProperty->SetOpacity(this->LabelTextProperty->GetOpacity());
axisLabelProperty->SetBackgroundColor(this->LabelTextProperty->GetBackgroundColor());
axisLabelProperty->SetBackgroundOpacity(this->LabelTextProperty->GetBackgroundOpacity());
axisLabelProperty->SetFontFamilyAsString(this->LabelTextProperty->GetFontFamilyAsString());
axisLabelProperty->SetFontFile(this->LabelTextProperty->GetFontFile());
axisLabelProperty->SetFontSize(this->LabelTextProperty->GetFontSize());
axisLabelProperty->SetBold(this->LabelTextProperty->GetBold());
axisLabelProperty->SetItalic(this->LabelTextProperty->GetItalic());
axisLabelProperty->SetShadow(this->LabelTextProperty->GetShadow());
axisLabelProperty->SetShadowOffset(this->LabelTextProperty->GetShadowOffset());
vtkTextProperty* axisTitleProperty = this->Axis->GetTitleProperties();
axisTitleProperty->SetColor(this->TitleTextProperty->GetColor());
axisTitleProperty->SetOpacity(this->TitleTextProperty->GetOpacity());
axisTitleProperty->SetBackgroundColor(this->TitleTextProperty->GetBackgroundColor());
axisTitleProperty->SetBackgroundOpacity(this->TitleTextProperty->GetBackgroundOpacity());
axisTitleProperty->SetFontFamilyAsString(this->TitleTextProperty->GetFontFamilyAsString());
axisTitleProperty->SetFontFile(this->TitleTextProperty->GetFontFile());
axisTitleProperty->SetFontSize(this->TitleTextProperty->GetFontSize());
axisTitleProperty->SetBold(this->TitleTextProperty->GetBold());
axisTitleProperty->SetItalic(this->TitleTextProperty->GetItalic());
axisTitleProperty->SetShadow(this->TitleTextProperty->GetShadow());
axisTitleProperty->SetShadowOffset(this->TitleTextProperty->GetShadowOffset());
}
//----------------------------------------------------------------------------
void vtkContext2DScalarBarActor::PaintColorBar(vtkContext2D* painter, double size[2])
{
vtkRectf barRect = this->GetColorBarRect(size);
vtkBrush* brush = painter->GetBrush();
//-----------------------------
// Draw scalar bar itself
vtkDiscretizableColorTransferFunction* ctf =
vtkDiscretizableColorTransferFunction::SafeDownCast(this->LookupTable);
if (!ctf)
{
return;
}
// Disable pen to prevent an outline around the color swatches
vtkPen* pen = painter->GetPen();
pen->SetLineType(vtkPen::NO_PEN);
// Create a map from anchor values to annotations. Since maps sort by key,
// when we iterate over the annotations later on, we will be doing it from
// smallest to greatest annotation value.
vtkAnnotationMap annotationAnchors;
if (ctf->GetIndexedLookup())
{
// Divide up the color bar rect into the number of indexed colors
int numIndexedColors = ctf->GetNumberOfAnnotatedValues();
double indexedColorSwatchLength =
this->Orientation == VTK_ORIENT_VERTICAL ? barRect.GetHeight() : barRect.GetWidth();
// Subtract spaces between swatches
if (numIndexedColors > 0)
{
indexedColorSwatchLength -= (numIndexedColors - 1) * this->Spacer;
indexedColorSwatchLength /= numIndexedColors;
}
// Now loop over indexed colors and draw swatches
double x, y;
for (int i = 0; i < numIndexedColors; ++i)
{
double shift = i * (indexedColorSwatchLength + this->Spacer);
double indexedColor[4];
vtkVariant annotatedValue = ctf->GetAnnotatedValue(i);
ctf->GetIndexedColor(i, indexedColor);
std::string annotation = ctf->GetAnnotation(i);
brush->SetColorF(indexedColor);
if (this->Orientation == VTK_ORIENT_VERTICAL)
{
x = barRect.GetX();
if (this->ReverseLegend)
{
y = barRect.GetY() + shift;
}
else
{
y = barRect.GetY() + barRect.GetHeight() - shift - indexedColorSwatchLength;
}
painter->DrawRect(x, y, barRect.GetWidth(), indexedColorSwatchLength);
annotationAnchors[y + 0.5 * indexedColorSwatchLength] = annotation;
}
else
{
// Horizontal
if (this->ReverseLegend)
{
x = barRect.GetX() + barRect.GetWidth() - shift - indexedColorSwatchLength;
}
else
{
x = barRect.GetX() + shift;
}
y = barRect.GetY();
painter->DrawRect(x, y, indexedColorSwatchLength, barRect.GetHeight());
annotationAnchors[x + 0.5 * indexedColorSwatchLength] = annotation;
}
}
}
else
// Continuous color map
{
vtkNew<vtkImageData> image;
this->UpdateScalarBarTexture(image.GetPointer());
painter->DrawImage(barRect, image.GetPointer());
// Draw the out-of-range colors if enabled
// pen->SetLineType(vtkPen::NO_PEN);
if (ctf->GetUseAboveRangeColor())
{
vtkRectf rect = this->GetAboveRangeColorRect(size);
brush->SetColorF(ctf->GetAboveRangeColor());
pen->SetLineType(vtkPen::NO_PEN);
painter->DrawRect(rect.GetX(), rect.GetY(), rect.GetWidth(), rect.GetHeight());
}
if (ctf->GetUseBelowRangeColor())
{
vtkRectf rect = this->GetBelowRangeColorRect(size);
brush->SetColorF(ctf->GetBelowRangeColor());
pen->SetLineType(vtkPen::NO_PEN);
painter->DrawRect(rect.GetX(), rect.GetY(), rect.GetWidth(), rect.GetHeight());
}
// Finally, draw a rect around the scalar bar and out-of-range
// colors, if they are enabled. We should probably draw four
// lines instead.
if (this->OutlineScalarBar)
{
vtkRectf outlineRect = this->GetFullColorBarRect(size);
brush->SetOpacity(0);
pen->SetLineType(vtkPen::SOLID_LINE);
painter->DrawRect(
outlineRect.GetX(), outlineRect.GetY(), outlineRect.GetWidth(), outlineRect.GetHeight());
}
// Now set up annotation anchor point map
double lutRange[2];
lutRange[0] = this->LookupTable->GetRange()[0];
lutRange[1] = this->LookupTable->GetRange()[1];
if (this->LookupTable->UsingLogScale())
{
lutRange[0] = log10(lutRange[0]);
lutRange[1] = log10(lutRange[1]);
}
double low = barRect.GetX();
double high = low + barRect.GetWidth();
if (this->Orientation == VTK_ORIENT_VERTICAL)
{
low = barRect.GetY();
high = low + barRect.GetHeight();
}
if (this->ReverseLegend)
{
std::swap(high, low);
}
if (this->GetAutomaticAnnotations())
{
// How many annotations should there be?
vtkIdType numValues = ctf->GetNumberOfAvailableColors();
if (ctf && ctf->GetDiscretize() && this->AutomaticAnnotations && numValues)
{
double step = (lutRange[1] - lutRange[0]) / numValues;
for (vtkIdType i = 0; i <= numValues; i++)
{
double annotatedValue = lutRange[0] + step * i;
double normalizedValue = (annotatedValue - lutRange[0]) / (lutRange[1] - lutRange[0]);
double barPosition = normalizedValue * (high - low) + low;
if (normalizedValue >= 0.0 - std::numeric_limits<double>::epsilon() &&
normalizedValue <= 1.0 + std::numeric_limits<double>::epsilon() &&
!vtkMath::IsNan(barPosition))
{
char annotation[1024];
if (this->LookupTable->UsingLogScale())
{
annotatedValue = pow(10.0, annotatedValue);
}
SNPRINTF(annotation, 1023, this->LabelFormat, annotatedValue);
annotationAnchors[barPosition] = annotation;
}
}
}
}
else // Manual annotations
{
int numAnnotations = ctf->GetNumberOfAnnotatedValues();
for (int i = 0; i < numAnnotations; ++i)
{
// Figure out placement of annotation value along color bar.
double annotatedValue = ctf->GetAnnotatedValue(i).ToDouble();
if (this->LookupTable->UsingLogScale())
{
// Scale in log space
annotatedValue = log10(annotatedValue);
}
double normalizedValue = (annotatedValue - lutRange[0]) / (lutRange[1] - lutRange[0]);
double barPosition = normalizedValue * (high - low) + low;
if (normalizedValue >= 0.0 && normalizedValue <= 1.0 && !vtkMath::IsNan(barPosition))
{
std::string annotation = ctf->GetAnnotation(i);
annotationAnchors[barPosition] = annotation;
}
}
}
if (this->AddRangeAnnotations)
{
char annotation[1024];
SNPRINTF(annotation, 1023, this->RangeLabelFormat, lutRange[0]);
annotationAnchors[low] = annotation;
SNPRINTF(annotation, 1023, this->RangeLabelFormat, lutRange[1]);
annotationAnchors[high] = annotation;
}
} // Continuous color map
// For all types of color maps, draw the NaN annotation.
if (this->DrawNanAnnotation)
{
// Paint NaN color swatch
vtkRectf rect = this->GetNaNColorRect(size);
brush->SetOpacity(255);
brush->SetColorF(ctf->GetNanColor());
pen->SetLineType(vtkPen::NO_PEN);
painter->DrawRect(rect.GetX(), rect.GetY(), rect.GetWidth(), rect.GetHeight());
// Add NaN annotation
double nanAnchor = rect.GetY() + 0.5 * rect.GetHeight();
if (this->Orientation == VTK_ORIENT_HORIZONTAL)
{
nanAnchor = rect.GetX() + 0.5 * rect.GetWidth();
}
annotationAnchors[nanAnchor] = this->GetNanAnnotation();
}
// Draw the annotations
if (this->GetDrawAnnotations())
{
this->PaintAnnotations(painter, size, annotationAnchors);
}
}
//----------------------------------------------------------------------------
void vtkContext2DScalarBarActor::PaintAxis(vtkContext2D* painter, double size[2])
{
vtkRectf rect = this->GetColorBarRect(size);
// Use the length of the character "|" at the label font size for various
// measurements.
float bounds[4];
painter->ApplyTextProp(this->LabelTextProperty);
painter->ComputeStringBounds("|", bounds);
float pipeHeight = bounds[3];
// Note that at this point the font size is already scaled by the tile
// scale factor. Later on, vtkAxis will scale the tick length and label offset
// by the tile scale factor again, so we need to divide by the tile scale
// factor here to take that into account.
vtkWindow* renWin = this->CurrentViewport->GetVTKWindow();
int tileScale[2];
renWin->GetTileScale(tileScale);
pipeHeight /= tileScale[1];
// Compute a shift amount for tick marks.
float axisShift = 0.25 * pipeHeight;
// Compute tick lengths and label offsets based on the label font size
float tickLength = 0.75 * pipeHeight;
if (this->Orientation == VTK_ORIENT_VERTICAL)
{
this->Axis->SetTickLength(tickLength);
// Offset the labels from the tick marks a bit
float labelOffset = tickLength + (0.5 * tickLength);
this->Axis->SetLabelOffset(labelOffset);
}
else
{
this->Axis->SetTickLength(tickLength);
float labelOffset = tickLength + (0.3 * tickLength);
this->Axis->SetLabelOffset(labelOffset);
}
// Position the axis
if (this->TextPosition == PrecedeScalarBar)
{
// Left
if (this->Orientation == VTK_ORIENT_VERTICAL)
{
this->Axis->SetPoint1(rect.GetX() + axisShift, rect.GetY());
this->Axis->SetPoint2(rect.GetX() + axisShift, rect.GetY() + rect.GetHeight());
this->Axis->SetPosition(vtkAxis::LEFT);
}
else
{
// Bottom
this->Axis->SetPoint1(rect.GetX(), rect.GetY() + axisShift);
this->Axis->SetPoint2(rect.GetX() + rect.GetWidth(), rect.GetY() + axisShift);
this->Axis->SetPosition(vtkAxis::BOTTOM);
}
}
else
{
// Right
if (this->Orientation == VTK_ORIENT_VERTICAL)
{
this->Axis->SetPoint1(rect.GetX() + rect.GetWidth() - axisShift, rect.GetY());
this->Axis->SetPoint2(
rect.GetX() + rect.GetWidth() - axisShift, rect.GetY() + rect.GetHeight());
this->Axis->SetPosition(vtkAxis::RIGHT);
}
else
{
// Top
this->Axis->SetPoint1(rect.GetX(), rect.GetY() + rect.GetHeight() - axisShift);
this->Axis->SetPoint2(
rect.GetX() + rect.GetWidth(), rect.GetY() + rect.GetHeight() - axisShift);
this->Axis->SetPosition(vtkAxis::TOP);
}
}
//-----------------------------
// Get the range of the lut
const double* lutRange = this->LookupTable->GetRange();
double range[2];
range[0] = lutRange[0];
range[1] = lutRange[1];
if (this->ReverseLegend)
{
std::swap(range[0], range[1]);
}
vtkPen* axisPen = this->Axis->GetPen();
axisPen->SetColorF(this->LabelTextProperty->GetColor());
bool indexedMode = this->LookupTable->GetIndexedLookup() == 1;
// NOTE: the order of calls to this->Axis is important and should be
// changed only with extreme care.
this->Axis->SetTickLabelAlgorithm(vtkAxis::TICK_SIMPLE);
this->Axis->SetUnscaledMinimumLimit(std::numeric_limits<double>::max() * -1.0);
this->Axis->SetUnscaledMaximumLimit(std::numeric_limits<double>::max());
this->Axis->SetUnscaledRange(range);
this->Axis->SetAxisVisible(false);
this->Axis->SetLabelsVisible(!indexedMode && this->DrawTickLabels == 1);
this->Axis->SetTicksVisible(!indexedMode && this->DrawTickMarks);
this->Axis->SetGridVisible(false);
if (this->AutomaticLabelFormat)
{
this->Axis->SetNotation(vtkAxis::STANDARD_NOTATION);
}
else
{
this->Axis->SetNotation(vtkAxis::PRINTF_NOTATION);
}
this->Axis->SetLabelFormat(std::string(this->LabelFormat));
this->Axis->SetLogScale(this->LookupTable->UsingLogScale() == 1);
this->Axis->AutoScale();
this->Axis->SetRangeLabelsVisible(!indexedMode && this->AddRangeLabels == 1);
this->Axis->SetRangeLabelFormat(std::string(this->RangeLabelFormat));
if (this->UseCustomLabels)
{
if (this->Axis->GetLogScale())
{
// Take log of label positions
vtkNew<vtkDoubleArray> logCustomLabels;
logCustomLabels->SetNumberOfTuples(this->CustomLabels->GetNumberOfTuples());
for (vtkIdType id = 0; id < logCustomLabels->GetNumberOfTuples(); ++id)
{
double d = this->CustomLabels->GetValue(id);
d = log10(d);
logCustomLabels->SetValue(id, d);
}
this->Axis->SetCustomTickPositions(logCustomLabels.GetPointer());
}
else
{
this->Axis->SetCustomTickPositions(this->CustomLabels);
}
}
else
{
this->Axis->SetCustomTickPositions(nullptr);
}
this->Axis->SetUnscaledRange(range);
this->Axis->RecalculateTickSpacing();
this->Axis->Update();
this->Axis->Paint(painter);
}
//----------------------------------------------------------------------------
void vtkContext2DScalarBarActor::PaintTitle(vtkContext2D* painter, double size[2])
{
std::string combinedTitle(this->Title);
if (this->ComponentTitle && strlen(this->ComponentTitle) > 0)
{
combinedTitle.append(" ");
combinedTitle.append(this->ComponentTitle);
}
// Apply the text property so that title size is up to date.
double titleOrientation = 0.0;
if (this->GetOrientation() == VTK_ORIENT_VERTICAL && !this->GetForceHorizontalTitle())
{
titleOrientation = 90.0;
}
this->TitleTextProperty->SetOrientation(titleOrientation);
this->TitleTextProperty->SetJustification(this->GetTitleJustification());
painter->ApplyTextProp(this->TitleTextProperty);
// Get title size
float titleBounds[4];
painter->ComputeStringBounds(combinedTitle, titleBounds);
float titleWidth = titleBounds[2];
float titleHeight = titleBounds[3];
// vtkAxis::GetBoundingRect() is not accurate. Compute it ourselves.
// All the code in this section is needed to get the actual bounds of
// the axis including any offsets applied in PaintAxis().
vtkNew<vtkBoundingRectContextDevice2D> boundingDevice;
vtkNew<vtkContextDevice2D> contextDevice;
boundingDevice->SetDelegateDevice(contextDevice.Get());
boundingDevice->Begin(this->CurrentViewport);
vtkNew<vtkContext2D> context;
context->Begin(boundingDevice);
this->PaintAxis(context, size);
context->End();
boundingDevice->End();
vtkRectf axisRect = boundingDevice->GetBoundingRect();
vtkRectf barAndAxisRect = axisRect;
vtkRectf colorBarRect = this->GetColorBarRect(size);
barAndAxisRect.AddRect(colorBarRect);
float titleX = barAndAxisRect.GetX() + 0.5 * barAndAxisRect.GetWidth();
float titleY = colorBarRect.GetY() + 0.5 * colorBarRect.GetHeight();
if (this->GetOrientation() == VTK_ORIENT_HORIZONTAL || this->ForceHorizontalTitle)
{
if (this->GetTitleJustification() == VTK_TEXT_LEFT)
{
titleX = barAndAxisRect.GetX();
}
else if (this->GetTitleJustification() == VTK_TEXT_RIGHT)
{
titleX = barAndAxisRect.GetX() + barAndAxisRect.GetWidth();
}
if (this->GetTextPosition() == vtkContext2DScalarBarActor::PrecedeScalarBar)
{
titleY = axisRect.GetY() - titleHeight - 0.25 * titleHeight;
}
else
{
// Handle zero-height axis.
if (axisRect.GetHeight() < 1.0)
{
axisRect.SetHeight(colorBarRect.GetHeight());
}
titleY = axisRect.GetY() + axisRect.GetHeight() + 0.25 * titleHeight;
}
// Move title to the top if the title is forced horizontal
if (this->ForceHorizontalTitle && this->GetOrientation() != VTK_ORIENT_HORIZONTAL)
{
titleY = barAndAxisRect.GetY() + barAndAxisRect.GetHeight() + 0.25 * titleHeight;
}
}
else // Vertical orientation
{
// Handle zero-width axis.
if (axisRect.GetWidth() < 1.0)
{
axisRect.SetWidth(0.25 * colorBarRect.GetWidth());
}
if (this->GetTitleJustification() == VTK_TEXT_LEFT)
{
titleY = barAndAxisRect.GetY();
}
else if (this->GetTitleJustification() == VTK_TEXT_RIGHT)
{
titleY = barAndAxisRect.GetY() + barAndAxisRect.GetHeight();
}
if (this->GetTextPosition() == vtkContext2DScalarBarActor::PrecedeScalarBar)
{
titleX = colorBarRect.GetX() - axisRect.GetWidth();
}
else
{
titleX = colorBarRect.GetX() + colorBarRect.GetWidth() + axisRect.GetWidth() + titleWidth;
}
}
painter->ApplyTextProp(this->TitleTextProperty);
painter->DrawString(titleX, titleY, combinedTitle);
}
//----------------------------------------------------------------------------
bool vtkContext2DScalarBarActor::Paint(vtkContext2D* painter)
{
if (!this->Visibility)
{
return false;
}
this->UpdateTextProperties();
vtkPen* pen = painter->GetPen();
vtkBrush* brush = painter->GetBrush();
// Save previous settings
vtkNew<vtkPen> savePen;
savePen->DeepCopy(pen);
vtkNew<vtkBrush> saveBrush;
saveBrush->DeepCopy(brush);
pen->SetColorF(1, 1, 1);
brush->SetColorF(1, 0, 0);
vtkNew<vtkPoints2D> rect;
rect->InsertNextPoint(0, 00);
rect->InsertNextPoint(200, 40);
int* displayPosition = this->PositionCoordinate->GetComputedDisplayValue(this->CurrentViewport);
// Ensure that the scene held by the Axis is the current renderer
// so that things like tile scale and DPI are correct.
this->Axis->GetScene()->SetRenderer(vtkRenderer::SafeDownCast(this->CurrentViewport));
double size[2];
this->GetSize(size, painter);
// Paint the various components
vtkNew<vtkTransform2D> tform;
tform->Translate(displayPosition[0], displayPosition[1]);
painter->PushMatrix();
painter->AppendTransform(tform.GetPointer());
this->PaintColorBar(painter, size);
this->PaintAxis(painter, size);
// IMPORTANT: this needs to be done *after* this->Axis->Update() is called
// in PaintAxis() so that we get an accurate axis bounding rectangle.
this->PaintTitle(painter, size);
// Restore settings
pen->DeepCopy(savePen.GetPointer());
brush->DeepCopy(saveBrush.GetPointer());
painter->PopMatrix();
return false;
}
//----------------------------------------------------------------------------
vtkRectf vtkContext2DScalarBarActor::GetBoundingRect()
{
return vtkBoundingRectContextDevice2D::GetBoundingRect(
this->ScalarBarItem, this->CurrentViewport);
}
//----------------------------------------------------------------------------
void vtkContext2DScalarBarActor::PaintAnnotations(
vtkContext2D* painter, double size[2], const vtkAnnotationMap& annotationAnchors)
{
// Set the annotation text properties
painter->ApplyTextProp(this->Axis->GetLabelProperties());
if (this->Orientation == VTK_ORIENT_VERTICAL)
{
this->PaintAnnotationsVertically(painter, size, annotationAnchors);
}
else
{
this->PaintAnnotationsHorizontally(painter, size, annotationAnchors);
}
}
namespace
{
//----------------------------------------------------------------------------
typedef struct AI
{
double Anchor;
double Position;
std::string Annotation;
double Span;
} AnnotationInfo;
//----------------------------------------------------------------------------
void DistributeAnnotations(std::vector<AnnotationInfo>& annotations, float spacer)
{
// Look for clusters of overlapping annotations. We'll space these
// annotations about the center of mass of each cluster. This winds
// up looking nice.
// Process clusters.
// 1. Find centroid of cluster.
// 2. Compute lower and upper bounds of cluster.
// 3. Layout the labels.
bool overlapDetected = false;
int tries = 0;
int maxTries = 20;
// Keep track of adjacent annotations that overlap at any point in
// the placement process. This is to prevent annotations from
// potentially jumping from cluster to cluster during the layout
// process. overlaps[j] == true means that annotations j and j+1
// overlapped at some point.
std::vector<bool> overlaps(annotations.size(), false);
// Iterate repeatedely in case repositioning of clustered
// annotations causes new overlap.
do
{
overlapDetected = false;
double clusterWidth = 0.0;
size_t clusterCount = 0;
for (size_t j = 0; j < annotations.size(); ++j)
{
// Check for overlap with neighbors
bool overlapsNext = false;
double lowerMax = 0.0;
double upperMin = 0.0;
if (j < annotations.size() - 1)
{
lowerMax = annotations[j].Position + 0.5 * annotations[j].Span + 0.5 * spacer;
upperMin = annotations[j + 1].Position - 0.5 * annotations[j + 1].Span - 0.5 * spacer;
overlapsNext = lowerMax > upperMin;
overlapDetected = overlapDetected || overlapsNext;
}
if (overlapDetected)
{
clusterWidth += annotations[j].Span;
clusterCount++;
}
if (overlapsNext)
{
overlaps[j] = true;
}
if (!overlaps[j])
{
// Cluster ended. Go back and change the annotation positions
// based on the cluster centroid.
if (clusterCount > 0)
{
// Weight centers of each label by width
double clusterCenter = 0.0;
for (size_t k = j - clusterCount + 1; k <= j; ++k)
{
double weight = annotations[k].Span / clusterWidth;
clusterCenter += annotations[k].Anchor * weight;
}
double accumWidth = 0.0;
clusterWidth += spacer * (clusterCount - 1); // Add in spacer width
for (size_t k = 0; k < clusterCount; ++k)
{
// Start from the right (bigger coordinate) side and work toward the left.
annotations[j - k].Position =
clusterCenter + 0.5 * clusterWidth - accumWidth - 0.5 * annotations[j - k].Span;
accumWidth += annotations[j - k].Span + spacer;
}
}
// Reset cluster stats
clusterWidth = 0.0;
clusterCount = 0;
}
}
++tries;
} while (overlapDetected && tries < maxTries);
}
} // end anonymous namespace
//----------------------------------------------------------------------------
void vtkContext2DScalarBarActor::PaintAnnotationsVertically(
vtkContext2D* painter, double size[2], const vtkAnnotationMap& annotationAnchors)
{
vtkRectf barRect = this->GetColorBarRect(size);
// Copy annotations and position info into a vector.
std::vector<AnnotationInfo> annotations;
annotations.reserve(annotationAnchors.size());
vtkAnnotationMap::const_iterator annotationMapIter;
for (annotationMapIter = annotationAnchors.begin(); annotationMapIter != annotationAnchors.end();
++annotationMapIter)
{
float bounds[4]; // bounds contains x, y, width, height
painter->ComputeStringBounds(annotationMapIter->second, bounds);
AnnotationInfo p;
p.Anchor = annotationMapIter->first;
p.Position = annotationMapIter->first;
p.Annotation = annotationMapIter->second;
p.Span = bounds[3]; // height
annotations.push_back(p);
}
vtkWindow* renWin = this->CurrentViewport->GetVTKWindow();
int tileScale[2];
renWin->GetTileScale(tileScale);
// Calculate the annotation labels
const float spacer = 1 * tileScale[0]; // vertical space between annotations
DistributeAnnotations(annotations, spacer);
// Iterate over anchors and draw annotations
std::vector<AnnotationInfo>::iterator vectorIter;
for (vectorIter = annotations.begin(); vectorIter != annotations.end(); ++vectorIter)
{
const int annotationLeader = 8 * tileScale[0];
double anchorPt[2] = { barRect.GetX(), vectorIter->Anchor };
double labelPt[2] = { anchorPt[0] - annotationLeader, vectorIter->Position };
painter->GetTextProp()->SetJustification(VTK_TEXT_RIGHT);
if (this->TextPosition == PrecedeScalarBar)
{
anchorPt[0] = barRect.GetX() + barRect.GetWidth();
labelPt[0] = anchorPt[0] + annotationLeader;
painter->GetTextProp()->SetJustification(VTK_TEXT_LEFT);
}
vtkPen* pen = painter->GetPen();
pen->SetOpacity(255);
pen->SetLineType(vtkPen::SOLID_LINE);
pen->SetColorF(this->Axis->GetLabelProperties()->GetColor());
painter->DrawLine(anchorPt[0], anchorPt[1], labelPt[0], labelPt[1]);
painter->GetTextProp()->SetVerticalJustification(VTK_TEXT_CENTERED);
painter->DrawString(labelPt[0], labelPt[1], vectorIter->Annotation);
}
}
//----------------------------------------------------------------------------
void vtkContext2DScalarBarActor::PaintAnnotationsHorizontally(
vtkContext2D* painter, double size[2], const vtkAnnotationMap& annotationAnchors)
{
vtkRectf barRect = this->GetColorBarRect(size);
// Copy annotations and position info into a vector.
std::vector<AnnotationInfo> annotations;
annotations.reserve(annotationAnchors.size());
vtkAnnotationMap::const_iterator annotationMapIter;
for (annotationMapIter = annotationAnchors.begin(); annotationMapIter != annotationAnchors.end();
++annotationMapIter)
{
float bounds[4]; // bounds contains x, y, width, height
painter->ComputeStringBounds(annotationMapIter->second, bounds);
AnnotationInfo p;
p.Anchor = annotationMapIter->first;
p.Position = annotationMapIter->first;
p.Annotation = annotationMapIter->second;
p.Span = bounds[2]; // width
annotations.push_back(p);
}
vtkWindow* renWin = this->CurrentViewport->GetVTKWindow();
int tileScale[2];
renWin->GetTileScale(tileScale);
// Get horizontal spacing distance as a function of the font
// properties. Use width of '-' as spacing between annotations.
float bounds[4];
painter->ComputeStringBounds("-", bounds);
const float spacer = bounds[2] * tileScale[0];
// Calculate the annotation labels
DistributeAnnotations(annotations, spacer);
// Iterate over anchors and draw annotations
std::vector<AnnotationInfo>::iterator vectorIter;
for (vectorIter = annotations.begin(); vectorIter != annotations.end(); ++vectorIter)
{
const int annotationLeader = 8 * tileScale[0];
double anchorPt[2] = { vectorIter->Anchor, barRect.GetY() };
double labelPt[2] = { vectorIter->Position, anchorPt[1] - annotationLeader };
double labelOffset = 3;
painter->GetTextProp()->SetJustification(VTK_TEXT_CENTERED);
painter->GetTextProp()->SetVerticalJustification(VTK_TEXT_TOP);
if (this->TextPosition == PrecedeScalarBar)
{
anchorPt[1] = barRect.GetY() + barRect.GetHeight();
labelPt[1] = anchorPt[1] + annotationLeader;
labelOffset *= -1.0;
painter->GetTextProp()->SetVerticalJustification(VTK_TEXT_BOTTOM);
}
painter->DrawString(labelPt[0], labelPt[1] - labelOffset, vectorIter->Annotation);
vtkPen* pen = painter->GetPen();
pen->SetOpacity(255);
pen->SetLineType(vtkPen::SOLID_LINE);
pen->SetColorF(this->Axis->GetLabelProperties()->GetColor());
painter->DrawLine(anchorPt[0], anchorPt[1], labelPt[0], labelPt[1]);
}
}
//----------------------------------------------------------------------------
void vtkContext2DScalarBarActor::PrintSelf(ostream& os, vtkIndent indent)
{
this->Superclass::PrintSelf(os, indent);
}
//----------------------------------------------------------------------------
int vtkContext2DScalarBarActor::GetEstimatedNumberOfAnnotations()
{
vtkDiscretizableColorTransferFunction* ctf =
vtkDiscretizableColorTransferFunction::SafeDownCast(this->LookupTable);
if (!ctf)
{
return 0;
}
if (this->GetAutomaticAnnotations() && !ctf->GetIndexedLookup())
{
// How many annotations should there be?
return ctf->GetNumberOfAvailableColors();
}
else // Manual annotations
{
return ctf->GetNumberOfAnnotatedValues();
}
}
| 15,940 |
2,542 | // ------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License (MIT). See License.txt in the repo root for license information.
// ------------------------------------------------------------
#include "stdafx.h"
#include <boost/test/unit_test.hpp>
#include "Common/boost-taef.h"
#include "ComTestOperation.h"
#include "TestHealthClient.h"
namespace ReplicationUnitTest
{
using namespace Common;
using namespace std;
using namespace Reliability::ReplicationComponent;
static Common::StringLiteral const Source("TESTBatchedHealthReporter");
class TestBatchedHealthReporter
{
};
BOOST_FIXTURE_TEST_SUITE(TestBatchedHealthReporterSuite,TestBatchedHealthReporter)
BOOST_AUTO_TEST_CASE(TestDisabled)
{
ReplicationEndpointId endpoint(Common::Guid::NewGuid(), 0);
auto healthClient = HealthClient::Create(false);
ComTestOperation::WriteInfo(
Source,
"Start TestDisabled. Partition = {0}", endpoint.PartitionId);
auto sender = BatchedHealthReporter::Create(
endpoint.PartitionId,
endpoint,
HealthReportType::SecondaryReplicationQueueStatus,
TimeSpan::Zero,
healthClient);
// Since timespan is zero, health reporting should be disalbed
sender->ScheduleWarningReport(L"Temp");
VERIFY_ARE_EQUAL(false, sender->Test_IsTimerRunning, L"Timer running check");
VERIFY_ARE_EQUAL(::FABRIC_HEALTH_STATE_OK, sender->Test_LatestHealthState, L"Latest health state check");
sender->Close();
VERIFY_ARE_EQUAL(0, healthClient->NumberofReportsSent, L"Number of reports check");
}
BOOST_AUTO_TEST_CASE(TestOk)
{
ReplicationEndpointId endpoint(Common::Guid::NewGuid(), 0);
auto healthClient = HealthClient::Create(true);
ComTestOperation::WriteInfo(
Source,
"Start TestOk. Partition = {0}", endpoint.PartitionId);
auto sender = BatchedHealthReporter::Create(
endpoint.PartitionId,
endpoint,
HealthReportType::SecondaryReplicationQueueStatus,
TimeSpan::FromMilliseconds(1),
healthClient);
sender->ScheduleOKReport();
VERIFY_ARE_EQUAL(false, sender->Test_IsTimerRunning, L"Timer running check");
sender->Close();
VERIFY_ARE_EQUAL(::FABRIC_HEALTH_STATE_OK, sender->Test_LatestHealthState, L"Latest health state check");
VERIFY_ARE_EQUAL(0, healthClient->NumberofReportsSent, L"Number of reports check");
}
BOOST_AUTO_TEST_CASE(TestSimpleWarningReport)
{
ReplicationEndpointId endpoint(Common::Guid::NewGuid(), 0);
auto healthClient = HealthClient::Create(true);
ComTestOperation::WriteInfo(
Source,
"Start TestSimpleWarningReport. Partition = {0}", endpoint.PartitionId);
auto sender = BatchedHealthReporter::Create(
endpoint.PartitionId,
endpoint,
HealthReportType::SecondaryReplicationQueueStatus,
TimeSpan::FromMilliseconds(50),
healthClient);
sender->ScheduleWarningReport(L"Temp");
VERIFY_ARE_EQUAL(true, sender->Test_IsTimerRunning, L"Timer running check");
Sleep(1000); // wait for timer to finish running
VERIFY_ARE_EQUAL(false, sender->Test_IsTimerRunning, L"Timer running check");
sender->Close();
VERIFY_ARE_EQUAL(::FABRIC_HEALTH_STATE_OK, sender->Test_LatestHealthState, L"Latest health state check");
VERIFY_ARE_EQUAL(2, healthClient->NumberofReportsSent, L"Number of reports check");
}
BOOST_AUTO_TEST_CASE(TestWarningsWithinIntervalReport)
{
ReplicationEndpointId endpoint(Common::Guid::NewGuid(), 0);
auto healthClient = HealthClient::Create(true);
ComTestOperation::WriteInfo(
Source,
"Start TestWarningsWithinIntervalReport. Partition = {0}", endpoint.PartitionId);
auto sender = BatchedHealthReporter::Create(
endpoint.PartitionId,
endpoint,
HealthReportType::SecondaryReplicationQueueStatus,
TimeSpan::FromMilliseconds(50),
healthClient);
sender->ScheduleWarningReport(L"Temp");
VERIFY_ARE_EQUAL(true, sender->Test_IsTimerRunning, L"Timer running check");
sender->ScheduleWarningReport(L"Temp4");
sender->ScheduleWarningReport(L"Temp3");
sender->ScheduleWarningReport(L"Temp5");
sender->ScheduleWarningReport(L"Temp2");
Sleep(1000); // wait for timer to finish running
VERIFY_ARE_EQUAL(false, sender->Test_IsTimerRunning, L"Timer running check");
sender->Close();
VERIFY_ARE_EQUAL(::FABRIC_HEALTH_STATE_OK, sender->Test_LatestHealthState, L"Latest health state check");
VERIFY_ARE_EQUAL(L"Temp2", sender->Test_LatestDescription, L"Description check");
VERIFY_ARE_EQUAL(2, healthClient->NumberofReportsSent, L"Number of reports check");
}
BOOST_AUTO_TEST_CASE(TestWarningsWithinIntervalAndCloseImmediatelyReport)
{
ReplicationEndpointId endpoint(Common::Guid::NewGuid(), 0);
auto healthClient = HealthClient::Create(true);
ComTestOperation::WriteInfo(
Source,
"Start TestWarningsWithinIntervalAndCloseImmediatelyReport. Partition = {0}", endpoint.PartitionId);
auto sender = BatchedHealthReporter::Create(
endpoint.PartitionId,
endpoint,
HealthReportType::SecondaryReplicationQueueStatus,
TimeSpan::FromMilliseconds(50),
healthClient);
sender->ScheduleWarningReport(L"Temp");
sender->ScheduleWarningReport(L"Temp1");
sender->ScheduleWarningReport(L"Temp9");
sender->ScheduleWarningReport(L"Temp2");
VERIFY_ARE_EQUAL(true, sender->Test_IsTimerRunning, L"Timer running check");
sender->Close();
VERIFY_ARE_EQUAL(false, sender->Test_IsTimerRunning, L"Timer running check");
VERIFY_ARE_EQUAL(::FABRIC_HEALTH_STATE_OK, sender->Test_LatestHealthState, L"Latest health state check");
VERIFY_ARE_EQUAL(L"Temp2", sender->Test_LatestDescription, L"Description check");
VERIFY_ARE_EQUAL(0, healthClient->NumberofReportsSent, L"Number of reports check");
}
BOOST_AUTO_TEST_CASE(TestWarningsAndOkWithinIntervalReport)
{
ReplicationEndpointId endpoint(Common::Guid::NewGuid(), 0);
auto healthClient = HealthClient::Create(true);
ComTestOperation::WriteInfo(
Source,
"Start TestWarningsAndOkWithinIntervalReport. Partition = {0}", endpoint.PartitionId);
auto sender = BatchedHealthReporter::Create(
endpoint.PartitionId,
endpoint,
HealthReportType::SecondaryReplicationQueueStatus,
TimeSpan::FromMilliseconds(50),
healthClient);
sender->ScheduleWarningReport(L"Temp");
sender->ScheduleWarningReport(L"Temp3");
sender->ScheduleWarningReport(L"Temp5");
sender->ScheduleWarningReport(L"Temp2");
VERIFY_ARE_EQUAL(true, sender->Test_IsTimerRunning, L"Timer running check");
sender->ScheduleOKReport();
VERIFY_ARE_EQUAL(false, sender->Test_IsTimerRunning, L"Timer running check");
sender->Close();
VERIFY_ARE_EQUAL(::FABRIC_HEALTH_STATE_OK, sender->Test_LatestHealthState, L"Latest health state check");
VERIFY_ARE_EQUAL(L"", sender->Test_LatestDescription, L"Description check");
VERIFY_ARE_EQUAL(0, healthClient->NumberofReportsSent, L"Number of reports check");
}
BOOST_AUTO_TEST_CASE(TestWarningAndOkin2IntervalsReport)
{
ReplicationEndpointId endpoint(Common::Guid::NewGuid(), 0);
auto healthClient = HealthClient::Create(true);
ComTestOperation::WriteInfo(
Source,
"Start TestWarningAndOkin2IntervalsReport. Partition = {0}", endpoint.PartitionId);
auto sender = BatchedHealthReporter::Create(
endpoint.PartitionId,
endpoint,
HealthReportType::SecondaryReplicationQueueStatus,
TimeSpan::FromMilliseconds(50),
healthClient);
sender->ScheduleWarningReport(L"Temp");
Sleep(1000);
sender->ScheduleOKReport();
VERIFY_ARE_EQUAL(true, sender->Test_IsTimerRunning, L"Timer running check");
Sleep(1000);
VERIFY_ARE_EQUAL(::FABRIC_HEALTH_STATE_OK, sender->Test_LatestHealthState, L"Latest health state check");
VERIFY_ARE_EQUAL(L"", sender->Test_LatestDescription, L"Description check");
VERIFY_ARE_EQUAL(2, healthClient->NumberofReportsSent, L"Number of reports check");
}
BOOST_AUTO_TEST_CASE(TestOksWithinIntervalReport)
{
ReplicationEndpointId endpoint(Common::Guid::NewGuid(), 0);
auto healthClient = HealthClient::Create(true);
ComTestOperation::WriteInfo(
Source,
"Start TestOksWithinIntervalReport. Partition = {0}", endpoint.PartitionId);
auto sender = BatchedHealthReporter::Create(
endpoint.PartitionId,
endpoint,
HealthReportType::SecondaryReplicationQueueStatus,
TimeSpan::FromMilliseconds(50),
healthClient);
sender->ScheduleWarningReport(L"Temp");
VERIFY_ARE_EQUAL(true, sender->Test_IsTimerRunning, L"Timer running check");
Sleep(1000);
VERIFY_ARE_EQUAL(false, sender->Test_IsTimerRunning, L"Timer running check");
sender->ScheduleOKReport();
sender->ScheduleOKReport();
sender->ScheduleOKReport();
Sleep(1000);
VERIFY_ARE_EQUAL(false, sender->Test_IsTimerRunning, L"Timer running check");
VERIFY_ARE_EQUAL(::FABRIC_HEALTH_STATE_OK, sender->Test_LatestHealthState, L"Latest health state check");
VERIFY_ARE_EQUAL(L"", sender->Test_LatestDescription, L"Description check");
VERIFY_ARE_EQUAL(2, healthClient->NumberofReportsSent, L"Number of reports check");
}
BOOST_AUTO_TEST_SUITE_END()
}
| 4,268 |
582 | <reponame>PinoEire/archi
/**
* This program and the accompanying materials
* are made available under the terms of the License
* which accompanies this distribution in the file LICENSE.txt
*/
package com.archimatetool.editor.ui.textrender;
import static org.junit.Assert.assertEquals;
import org.junit.Test;
import com.archimatetool.model.IDiagramModelArchimateConnection;
import com.archimatetool.model.IDiagramModelArchimateObject;
import junit.framework.JUnit4TestAdapter;
/**
* PropertiesRenderer Tests
*
* @author <NAME>
*/
@SuppressWarnings("nls")
public class PropertiesRendererTests extends AbstractTextRendererTests {
public static junit.framework.Test suite() {
return new JUnit4TestAdapter(PropertiesRendererTests.class);
}
private PropertiesRenderer renderer = new PropertiesRenderer();
@Override
protected PropertiesRenderer getRenderer() {
return renderer;
}
@Test
public void render_PropertyKey() {
IDiagramModelArchimateObject dmo = TextRendererTests.createDiagramModelObject();
assertEquals("concept_v1", renderer.render(dmo, "${property:k1}"));
assertEquals("concept_v2", renderer.render(dmo, "${property:k2}"));
assertEquals("concept_v3", renderer.render(dmo, "${property:k3}"));
assertEquals("concept_v1 concept_v2", renderer.render(dmo, "${property:k1} ${property:k2}"));
assertEquals("concept_v1\nconcept_v2", renderer.render(dmo, "${property:k1}\n${property:k2}"));
}
@Test
public void render_PropertyKey_Model() {
IDiagramModelArchimateObject dmo = TextRendererTests.createDiagramModelObject();
assertEquals("model_v1", renderer.render(dmo, "$model{property:k1}"));
assertEquals("model_v2", renderer.render(dmo, "$model{property:k2}"));
assertEquals("model_v3", renderer.render(dmo, "$model{property:k3}"));
assertEquals("model_v1 model_v2", renderer.render(dmo, "$model{property:k1} $model{property:k2}"));
assertEquals("model_v1\nmodel_v2", renderer.render(dmo, "$model{property:k1}\n$model{property:k2}"));
}
@Test
public void render_PropertyKey_View() {
IDiagramModelArchimateObject dmo = TextRendererTests.createDiagramModelObject();
assertEquals("view_v1", renderer.render(dmo, "$view{property:k1}"));
assertEquals("view_v2", renderer.render(dmo, "$view{property:k2}"));
assertEquals("view_v3", renderer.render(dmo, "$view{property:k3}"));
assertEquals("view_v1 view_v2", renderer.render(dmo, "$view{property:k1} $view{property:k2}"));
assertEquals("view_v1\nview_v2", renderer.render(dmo, "$view{property:k1}\n$view{property:k2}"));
}
@Test
public void render_PropertyKey_ModelFolder() {
IDiagramModelArchimateObject dmo = TextRendererTests.createDiagramModelObject();
assertEquals("concept_folder_v1", renderer.render(dmo, "$mfolder{property:k1}"));
assertEquals("concept_folder_v2", renderer.render(dmo, "$mfolder{property:k2}"));
assertEquals("concept_folder_v3", renderer.render(dmo, "$mfolder{property:k3}"));
assertEquals("concept_folder_v1 concept_folder_v2", renderer.render(dmo, "$mfolder{property:k1} $mfolder{property:k2}"));
assertEquals("concept_folder_v1\nconcept_folder_v2", renderer.render(dmo, "$mfolder{property:k1}\n$mfolder{property:k2}"));
}
@Test
public void render_PropertyKey_ViewFolder() {
IDiagramModelArchimateObject dmo = TextRendererTests.createDiagramModelObject();
assertEquals("view_folder_v1", renderer.render(dmo, "$vfolder{property:k1}"));
assertEquals("view_folder_v2", renderer.render(dmo, "$vfolder{property:k2}"));
assertEquals("view_folder_v3", renderer.render(dmo, "$vfolder{property:k3}"));
assertEquals("view_folder_v1 view_folder_v2", renderer.render(dmo, "$vfolder{property:k1} $vfolder{property:k2}"));
assertEquals("view_folder_v1\nview_folder_v2", renderer.render(dmo, "$vfolder{property:k1}\n$vfolder{property:k2}"));
}
@Test
public void render_PropertyKey_Parent() {
IDiagramModelArchimateObject dmo = TextRendererTests.createDiagramModelObject();
assertEquals("view_v1", renderer.render(dmo, "$parent{property:k1}"));
assertEquals("view_v2", renderer.render(dmo, "$parent{property:k2}"));
assertEquals("view_v3", renderer.render(dmo, "$parent{property:k3}"));
assertEquals("view_v1 view_v2", renderer.render(dmo, "$parent{property:k1} $parent{property:k2}"));
assertEquals("view_v1\nview_v2", renderer.render(dmo, "$parent{property:k1}\n$parent{property:k2}"));
}
@Test
public void render_PropertyKey_Source() {
IDiagramModelArchimateConnection dmc = TextRendererTests.createDiagramModelConnection();
assertEquals("sconcept_v1", renderer.render(dmc, "$source{property:k1}"));
assertEquals("sconcept_v2", renderer.render(dmc, "$source{property:k2}"));
assertEquals("sconcept_v3", renderer.render(dmc, "$source{property:k3}"));
assertEquals("sconcept_v1 sconcept_v2", renderer.render(dmc, "$source{property:k1} $source{property:k2}"));
assertEquals("sconcept_v1\nsconcept_v2", renderer.render(dmc, "$source{property:k1}\n$source{property:k2}"));
}
@Test
public void render_PropertyKey_Target() {
IDiagramModelArchimateConnection dmc = TextRendererTests.createDiagramModelConnection();
assertEquals("tconcept_v1", renderer.render(dmc, "$target{property:k1}"));
assertEquals("tconcept_v2", renderer.render(dmc, "$target{property:k2}"));
assertEquals("tconcept_v3", renderer.render(dmc, "$target{property:k3}"));
assertEquals("tconcept_v1 tconcept_v2", renderer.render(dmc, "$target{property:k1} $target{property:k2}"));
assertEquals("tconcept_v1\ntconcept_v2", renderer.render(dmc, "$target{property:k1}\n$target{property:k2}"));
}
@Test
public void render_PropertyKey_ConnectedSource() {
IDiagramModelArchimateConnection dmc = TextRendererTests.createDiagramModelConnection();
assertEquals("sconcept_v1", renderer.render(dmc.getTarget(), "$assignment:source{property:k1}"));
assertEquals("sconcept_v2", renderer.render(dmc.getTarget(), "$assignment:source{property:k2}"));
assertEquals("sconcept_v3", renderer.render(dmc.getTarget(), "$assignment:source{property:k3}"));
assertEquals("sconcept_v1 sconcept_v2", renderer.render(dmc.getTarget(), "$assignment:source{property:k1} $assignment:source{property:k2}"));
assertEquals("sconcept_v1\nsconcept_v2", renderer.render(dmc.getTarget(), "$assignment:source{property:k1}\n$assignment:source{property:k2}"));
}
@Test
public void render_PropertyKey_ConnectedTarget() {
IDiagramModelArchimateConnection dmc = TextRendererTests.createDiagramModelConnection();
assertEquals("tconcept_v1", renderer.render(dmc.getSource(), "$assignment:target{property:k1}"));
assertEquals("tconcept_v2", renderer.render(dmc.getSource(), "$assignment:target{property:k2}"));
assertEquals("tconcept_v3", renderer.render(dmc.getSource(), "$assignment:target{property:k3}"));
assertEquals("tconcept_v1 tconcept_v2", renderer.render(dmc.getSource(), "$assignment:target{property:k1} $assignment:target{property:k2}"));
assertEquals("tconcept_v1\ntconcept_v2", renderer.render(dmc.getSource(), "$assignment:target{property:k1}\n$assignment:target{property:k2}"));
}
@Test
public void render_Properties() {
IDiagramModelArchimateObject dmo = TextRendererTests.createDiagramModelObject();
assertEquals("k1: concept_v1\nk2: concept_v2\nk3: concept_v3\nk3: concept_v4", renderer.render(dmo, "${properties}"));
}
@Test
public void render_Properties_Model() {
IDiagramModelArchimateObject dmo = TextRendererTests.createDiagramModelObject();
assertEquals("k1: model_v1\nk2: model_v2\nk3: model_v3\nk3: model_v4", renderer.render(dmo, "$model{properties}"));
}
@Test
public void render_Properties_View() {
IDiagramModelArchimateObject dmo = TextRendererTests.createDiagramModelObject();
assertEquals("k1: view_v1\nk2: view_v2\nk3: view_v3\nk3: view_v4", renderer.render(dmo, "$view{properties}"));
}
@Test
public void render_Properties_Parent() {
IDiagramModelArchimateObject dmo = TextRendererTests.createDiagramModelObject();
String result = renderer.render(dmo, "$parent{properties}");
assertEquals("k1: view_v1\nk2: view_v2\nk3: view_v3\nk3: view_v4", result);
}
@Test
public void render_PropertiesValues() {
IDiagramModelArchimateObject dmo = TextRendererTests.createDiagramModelObject();
assertEquals("concept_v1\nconcept_v2\nconcept_v3\nconcept_v4", renderer.render(dmo, "${propertiesvalues}"));
}
@Test
public void render_PropertiesValues_Model() {
IDiagramModelArchimateObject dmo = TextRendererTests.createDiagramModelObject();
assertEquals("model_v1\nmodel_v2\nmodel_v3\nmodel_v4", renderer.render(dmo, "$model{propertiesvalues}"));
}
@Test
public void render_PropertiesValues_View() {
IDiagramModelArchimateObject dmo = TextRendererTests.createDiagramModelObject();
assertEquals("view_v1\nview_v2\nview_v3\nview_v4", renderer.render(dmo, "$view{propertiesvalues}"));
}
@Test
public void render_PropertiesValues_Parent() {
IDiagramModelArchimateObject dmo = TextRendererTests.createDiagramModelObject();
assertEquals("view_v1\nview_v2\nview_v3\nview_v4", renderer.render(dmo, "$parent{propertiesvalues}"));
}
@Test
public void render_PropertiesValues_CustomList() {
IDiagramModelArchimateObject dmo = TextRendererTests.createDiagramModelObject();
assertEquals("concept_v3\nconcept_v4", renderer.render(dmo, "${properties:\n:k3}"));
}
@Test
public void render_PropertiesValues_CustomList_Model() {
IDiagramModelArchimateObject dmo = TextRendererTests.createDiagramModelObject();
assertEquals("model_v3\nmodel_v4", renderer.render(dmo, "$model{properties:\n:k3}"));
}
@Test
public void render_PropertiesValues_CustomList_View() {
IDiagramModelArchimateObject dmo = TextRendererTests.createDiagramModelObject();
assertEquals("view_v3\nview_v4", renderer.render(dmo, "$view{properties:\n:k3}"));
}
@Test
public void render_PropertiesValues_CustomList_Parent() {
IDiagramModelArchimateObject dmo = TextRendererTests.createDiagramModelObject();
assertEquals("view_v3\nview_v4", renderer.render(dmo, "$parent{properties:\n:k3}"));
}
}
| 4,378 |
769 | #ifndef __GPIO_H
#define __GPIO_H
#include <stm32l4xx.h>
// GPIO pin definitions
#define GPIO_PIN_0 GPIO_BSRR_BS0
#define GPIO_PIN_1 GPIO_BSRR_BS1
#define GPIO_PIN_2 GPIO_BSRR_BS2
#define GPIO_PIN_3 GPIO_BSRR_BS3
#define GPIO_PIN_4 GPIO_BSRR_BS4
#define GPIO_PIN_5 GPIO_BSRR_BS5
#define GPIO_PIN_6 GPIO_BSRR_BS6
#define GPIO_PIN_7 GPIO_BSRR_BS7
#define GPIO_PIN_8 GPIO_BSRR_BS8
#define GPIO_PIN_9 GPIO_BSRR_BS9
#define GPIO_PIN_10 GPIO_BSRR_BS10
#define GPIO_PIN_11 GPIO_BSRR_BS11
#define GPIO_PIN_12 GPIO_BSRR_BS12
#define GPIO_PIN_13 GPIO_BSRR_BS13
#define GPIO_PIN_14 GPIO_BSRR_BS14
#define GPIO_PIN_15 GPIO_BSRR_BS15
// GPIO pin sources for alternate functions
#define GPIO_PinSource0 ((uint32_t)0x00000000U)
#define GPIO_PinSource1 ((uint32_t)0x00000001U)
#define GPIO_PinSource2 ((uint32_t)0x00000002U)
#define GPIO_PinSource3 ((uint32_t)0x00000003U)
#define GPIO_PinSource4 ((uint32_t)0x00000004U)
#define GPIO_PinSource5 ((uint32_t)0x00000005U)
#define GPIO_PinSource6 ((uint32_t)0x00000006U)
#define GPIO_PinSource7 ((uint32_t)0x00000007U)
#define GPIO_PinSource8 ((uint32_t)0x00000008U)
#define GPIO_PinSource9 ((uint32_t)0x00000009U)
#define GPIO_PinSource10 ((uint32_t)0x0000000AU)
#define GPIO_PinSource11 ((uint32_t)0x0000000BU)
#define GPIO_PinSource12 ((uint32_t)0x0000000CU)
#define GPIO_PinSource13 ((uint32_t)0x0000000DU)
#define GPIO_PinSource14 ((uint32_t)0x0000000EU)
#define GPIO_PinSource15 ((uint32_t)0x0000000FU)
// GPIO alternate function
#define GPIO_AF0 ((uint32_t)0x00000000U)
#define GPIO_AF1 ((uint32_t)0x00000001U)
#define GPIO_AF2 ((uint32_t)0x00000002U)
#define GPIO_AF3 ((uint32_t)0x00000003U)
#define GPIO_AF4 ((uint32_t)0x00000004U)
#define GPIO_AF5 ((uint32_t)0x00000005U)
#define GPIO_AF6 ((uint32_t)0x00000006U)
#define GPIO_AF7 ((uint32_t)0x00000007U)
#define GPIO_AF8 ((uint32_t)0x00000008U)
#define GPIO_AF9 ((uint32_t)0x00000009U)
#define GPIO_AF10 ((uint32_t)0x0000000AU)
#define GPIO_AF11 ((uint32_t)0x0000000BU)
#define GPIO_AF12 ((uint32_t)0x0000000CU)
#define GPIO_AF13 ((uint32_t)0x0000000DU)
#define GPIO_AF14 ((uint32_t)0x0000000EU)
#define GPIO_AF15 ((uint32_t)0x0000000FU)
// GPIO macros definitions
#define GPIO_MODE_SET(pin,mode) (mode << ((pin) << 1)) // set MODER bits for a specified pin
#define GPIO_MODE_MSK(pin) (0x03U << ((pin) << 1)) // mask MODER bits for a specified pin
#define GPIO_PUPD_SET(pin,pupd) (pupd << ((pin) << 1)) // set PUPDR bits for a specified pin
#define GPIO_PUPD_MSK(pin) (0x03U << ((pin) << 1)) // mask PUPDR bits for a specified pin
#define GPIO_SPD_SET(pin,spd) (spd << ((pin) << 1)) // set SPEEDR bits for a specified pin
#define GPIO_SPD_MSK(pin) (0x03U << ((pin) << 1)) // mask SPEEDR bits for a specified pin
#define GPIO_AF_SET(pin,af) (af << ((pin) << 2)) // set AFR bits for a specified pin
#define GPIO_AF_MSK(pin) (0x0FU << ((pin) << 2)) // mask AFR bits for a specified pin
// GPIO pin output speed
typedef enum {
GPIO_SPD_LOW = ((uint32_t)0x00000000U), // low
GPIO_SPD_MEDIUM = GPIO_OSPEEDR_OSPEED0_0, // medium
GPIO_SPD_FAST = GPIO_OSPEEDR_OSPEED0_1, // fast
GPIO_SPD_HIGH = GPIO_OSPEEDR_OSPEED0 // high
} GPIOSPD_TypeDef;
// GPIO pin output type
typedef enum {
GPIO_OT_PP = ((uint32_t)0x00000000U), // push-pull
GPIO_OT_OD = GPIO_OTYPER_OT0 // open-drain
} GPIOOT_TypeDef;
// GPIO pin configuration mode
typedef enum {
GPIO_Mode_IN = ((uint32_t)0x00000000U), // input
GPIO_Mode_OUT = GPIO_MODER_MODE0_0, // output
GPIO_Mode_AF = GPIO_MODER_MODE0_1, // alternate function
GPIO_Mode_AN = GPIO_MODER_MODE0 // analog
} GPIOMode_TypeDef;
// GPIO pin pull-up/pull-down configuration
typedef enum {
GPIO_PUPD_NONE = ((uint32_t)0x00000000U), // no pull
GPIO_PUPD_PU = GPIO_PUPDR_PUPD0_0, // pull-up
GPIO_PUPD_PD = GPIO_PUPDR_PUPD0_1 // pull-down
} GPIOPUPD_TypeDef;
// GPIO pin handle structure
typedef struct {
uint32_t GPIO_AHB; // AHB bit for GPIO port
GPIO_TypeDef *GPIO; // Pointer to the pin GPIO port
uint16_t GPIO_PIN; // GPIO pin
uint8_t GPIO_SRC; // GPIO pin source
} GPIO_HandleTypeDef;
// Public macros and functions
// Port bit(s) set
// input:
// GPIOx - pointer to a GPIO peripheral handle
// pin - combination of GPIO_PIN_X values
__STATIC_INLINE void GPIO_PIN_SET(GPIO_TypeDef* GPIOx, uint32_t pin) {
GPIOx->BSRR = pin;
}
// Port bit(s) reset
// input:
// GPIOx - pointer to a GPIO peripheral handle
// pin - combination of GPIO_PIN_X values
__STATIC_INLINE void GPIO_PIN_RESET(GPIO_TypeDef* GPIOx, uint32_t pin) {
GPIOx->BRR = pin;
}
// Invert pin(s) output state
// input:
// GPIOx - pointer to a GPIO peripheral handle
// pin - combination of GPIO_PIN_X values
//#define GPIO_PIN_INVERT(PORT,pin) ((PORT)->ODR ^= (uint32_t)pin)
__STATIC_INLINE void GPIO_PIN_INVERT(GPIO_TypeDef* GPIOx, uint32_t pin) {
GPIOx->ODR ^= pin;
}
// Get pin(s) input state
// input:
// GPIOx - pointer to a GPIO peripheral handle
// pin - combination of GPIO_Pin_X values
__STATIC_INLINE uint32_t GPIO_PIN_ISTATE(GPIO_TypeDef* GPIOx, uint32_t pin) {
return (uint32_t)((GPIOx->IDR & pin) == pin);
}
// Get pin(s) output state
// input:
// GPIOx - pointer to a GPIO peripheral handle
// pin - combination of GPIO_Pin_X values
__STATIC_INLINE uint32_t GPIO_PIN_OSTATE(GPIO_TypeDef* GPIOx, uint32_t pin) {
return (uint32_t)((GPIOx->ODR & pin) == pin);
}
// Function prototypes
void GPIO_set_mode(GPIO_TypeDef *GPIOx, GPIOMode_TypeDef Mode, GPIOPUPD_TypeDef PUPD, uint16_t Pins);
void GPIO_out_cfg(GPIO_TypeDef *GPIOx, GPIOOT_TypeDef OT, GPIOSPD_TypeDef Speed, uint16_t Pins);
void GPIO_af_cfg(GPIO_TypeDef *GPIOx, uint16_t Pin, uint8_t AF);
#endif // __GPIO_H
| 3,137 |
651 | <reponame>dandycheung/subzero<filename>java/gui/src/main/java/com/squareup/subzero/wallet/TestWallets.java<gh_stars>100-1000
package com.squareup.subzero.wallet;
/**
* Hardcoded wallets for blackbox regression testing of subzero transaction signing
* <p>
* Instead of loading a test wallet from a file, we have it loaded from this class
*/
public final class TestWallets {
// Test wallet 1492, share 1, for off-target dev testing.
protected static final String devTestWallet =
"{\"currency\":\"TEST_NET\",\"encrypted_master_seed\":{\"encrypted_master_seed\":" +
"\"<KEY>" +
"<KEY>},\"encrypted_pub_keys\"" +
":[{\"encrypted_pub_key\":\"<KEY>" +
"<KEY> +
"<KEY>\"},{\"encryp" +
"ted_pub_key\":\"<KEY>" +
"<KEY>" +
"7WzZAFCuQ04bNNATuGfdXQK8pgoCHKWKTJ2c3alaZvIauwzkfQ==\"},{\"encrypted_pub_key" +
"\":\"<KEY>" +
"RPva29lPdi4X4mz+qde2nPYMvIJtW0ndAUGU2kw9dhzVY/FZ8XGnIH33otuKE2i+HxOYwxk6+EqS" +
"1WEoWEqRe2LO8h1DTg9GsYzzTyjSj2OKIOGc02A==\"},{\"encrypted_pub_key\":\"s/1O2n" +
"<KEY>" +
"<KEY>" +
"<KEY>\"}]}";
// Test wallet for on-target nChipher testing. Note that ncipherTestWallet differs
// from devTestWallet only in its extra "ocs_id" and "master_seed_encryption_key_id"
// values. We could use the string below for both test wallets, but keep them separate
// so that the code is easier to understand.
protected static final String ncipherTestWallet =
"{\"currency\":\"TEST_NET\",\"ocs_id\":\"adb1c4d63095d578b60d7fa3ef44f2acc435c821" +
"\",\"master_seed_encryption_key_id\":\"masterseedenckey128\", \"encrypted_ma" +
"ster_seed\":{\"encrypted_master_seed\":\"ioBg3WF2BntMnGae6PyWbp1VG4r446PUYVZ" +
"nt1BzOOVQzHy3XeaqmBXS6tMbE9fsB0sR+Vi9xPgJcayN2uJsJNjEw7S77h9oUUpu0zWrYvl6iRA" +
"I4fcezOxbRcc=\"},\"encrypted_pub_keys\":[{\"encrypted_pub_key\":\"<KEY>
<KEY> +
"<KEY>" +
"<KEY>},{\"encrypted_pub_key\":\"<KEY>
<KEY>" +
"<KEY>" +
"<KEY>},{\"encrypted_pub_key\":\"<KEY>
<KEY>" +
"<KEY>/<KEY>" +
"2A==\"},{\"encrypted_pub_key\":\"<KEY>" +
"<KEY>" +
"<KEY>\"}]}";
}
| 1,120 |
1,224 | <gh_stars>1000+
package plugin.google.maps;
public interface AsyncLoadImageInterface {
public void onPostExecute(AsyncLoadImage.AsyncLoadImageResult result) ;
}
| 48 |
301 | <reponame>Anton-V-K/Notepad2e
// Scintilla source code edit control
/** @file LexerBase.h
** A simple lexer with no state.
**/
// Copyright 1998-2010 by <NAME> <<EMAIL>>
// The License.txt file describes the conditions under which this software may be distributed.
#ifndef LEXERBASE_H
#define LEXERBASE_H
namespace Scintilla {
// A simple lexer with no state
class LexerBase : public ILexerWithMetaData {
protected:
const LexicalClass *lexClasses;
size_t nClasses;
PropSetSimple props;
enum {numWordLists=KEYWORDSET_MAX+1};
WordList *keyWordLists[numWordLists+1];
public:
LexerBase(const LexicalClass *lexClasses_=nullptr, size_t nClasses_=0);
virtual ~LexerBase();
void SCI_METHOD Release() override;
int SCI_METHOD Version() const override;
const char * SCI_METHOD PropertyNames() override;
int SCI_METHOD PropertyType(const char *name) override;
const char * SCI_METHOD DescribeProperty(const char *name) override;
Sci_Position SCI_METHOD PropertySet(const char *key, const char *val) override;
const char * SCI_METHOD DescribeWordListSets() override;
Sci_Position SCI_METHOD WordListSet(int n, const char *wl) override;
void SCI_METHOD Lex(Sci_PositionU startPos, Sci_Position lengthDoc, int initStyle, IDocument *pAccess) override = 0;
void SCI_METHOD Fold(Sci_PositionU startPos, Sci_Position lengthDoc, int initStyle, IDocument *pAccess) override = 0;
void * SCI_METHOD PrivateCall(int operation, void *pointer) override;
int SCI_METHOD LineEndTypesSupported() override;
int SCI_METHOD AllocateSubStyles(int styleBase, int numberStyles) override;
int SCI_METHOD SubStylesStart(int styleBase) override;
int SCI_METHOD SubStylesLength(int styleBase) override;
int SCI_METHOD StyleFromSubStyle(int subStyle) override;
int SCI_METHOD PrimaryStyleFromStyle(int style) override;
void SCI_METHOD FreeSubStyles() override;
void SCI_METHOD SetIdentifiers(int style, const char *identifiers) override;
int SCI_METHOD DistanceToSecondaryStyles() override;
const char * SCI_METHOD GetSubStyleBases() override;
int SCI_METHOD NamedStyles() override;
const char * SCI_METHOD NameOfStyle(int style) override;
const char * SCI_METHOD TagsOfStyle(int style) override;
const char * SCI_METHOD DescriptionOfStyle(int style) override;
};
}
#endif
| 782 |
732 | import argparse
import os
import pytest
from shutil import rmtree, copytree
from afdko.makeinstancesufo import (
main as mkinstufo,
get_options,
_split_comma_sequence,
updateInstance,
)
from runner import main as runner
from differ import main as differ
from test_utils import get_input_path
TOOL = 'makeinstancesufo'
DATA_DIR = os.path.join(os.path.dirname(__file__), TOOL + '_data')
TEMP_DIR = os.path.join(DATA_DIR, "temp_output")
def _get_output_path(file_name, dir_name):
return os.path.join(DATA_DIR, dir_name, file_name)
def setup_module():
"""
Create the temporary output directory
"""
rmtree(TEMP_DIR, ignore_errors=True)
os.mkdir(TEMP_DIR)
def teardown_module():
"""
teardown the temporary UFOs or the directory that holds them
"""
rmtree(os.path.join(TEMP_DIR), True)
rmtree(os.path.join(DATA_DIR, 'input', 'same_dir.ufo'), True)
# -----
# Tests
# -----
@pytest.mark.parametrize('args, ufo_filename', [
(['_0'], 'extralight.ufo'), # hint/remove overlap/normalize/round
(['_1', 'a'], 'light.ufo'), # no hint
(['_2', 'r'], 'regular.ufo'), # no round
(['_3', 'r', 'n'], 'regular1.ufo'), # no round & no normalize
(['_4', 'c'], 'semibold.ufo'), # no remove overlap
(['_5', 'n'], 'bold.ufo'), # no normalize
(['_6', 'a', 'c', 'n'], 'black.ufo'), # round only
(['_7', 'a', 'c', 'n'], 'anisotropic.ufo'),
(['_8', 'a', '=ufo-version', '_2'], 'ufo2.ufo'), # no hint UFO v2
])
def test_options(args, ufo_filename):
runner(['-t', TOOL, '-o', 'd',
f'_{get_input_path("font.designspace")}', 'i'] + args)
expected_path = _get_output_path(ufo_filename, 'expected_output')
actual_path = _get_output_path(ufo_filename, 'temp_output')
assert differ([expected_path, actual_path])
@pytest.mark.parametrize('v_arg', ['', 'v', 'vv', 'vvv'])
def test_log_level(v_arg):
dspath = get_input_path('font.designspace')
v_val = len(v_arg)
arg = [] if not v_val else [f'-{v_arg}']
opts = get_options(['-d', dspath] + arg)
assert opts.verbose == v_val
@pytest.mark.parametrize('str_seq, lst_seq', [
('10', [10]),
('11,13,19', [11, 13, 19]),
('16,0,13', [0, 13, 16]),
])
def test_split_comma_sequence(str_seq, lst_seq):
assert _split_comma_sequence(str_seq) == lst_seq
@pytest.mark.parametrize('str_seq', ['0,a', '1,-2', '10-10', '5+4', '3*5'])
def test_split_comma_sequence_error(str_seq):
with pytest.raises(argparse.ArgumentTypeError):
_split_comma_sequence(str_seq)
@pytest.mark.parametrize('filename', [
'noaxes', 'nosources', 'nosourcepath', 'badsourcepath',
'noinstances', 'noinstancepath', ('badinstanceindex', 2)
])
def test_validate_designspace_doc_raise(filename):
args = []
if isinstance(filename, tuple):
filename, idx = filename
args = ['-i', f'{idx}']
dspath = get_input_path(f'{filename}.designspace')
assert mkinstufo(['-d', dspath] + args) == 1
@pytest.mark.parametrize('args', [
[], # checkoutlinesufo call
['-c'], # psautohint call
])
def test_update_instance_raise(args):
dspath = get_input_path('font.designspace')
ufopath = get_input_path('invalid.ufo')
opts = get_options(['-d', dspath] + args)
with pytest.raises(SystemExit):
updateInstance(ufopath, opts)
def test_filename_without_dir():
instance_path = get_input_path('same_dir.ufo')
assert not os.path.exists(instance_path)
runner(['-t', TOOL, '-o', 'd',
f'_{get_input_path("font.designspace")}', 'i', '_9'])
assert os.path.exists(instance_path)
@pytest.mark.parametrize('args, ufo_filename', [
(['_0'], 'ufo3regular.ufo'), # hint/remove overlap/normalize/round
(['_1'], 'ufo3regular.ufo'), # for testing instance removal
(['_2', 'a', 'c', 'n'], 'ufo3medium.ufo'), # round only
(['_3', 'a', '=ufo-version', '_2'], 'ufo3semibold.ufo'), # no hint UFO v2
])
def test_ufo3_masters(args, ufo_filename):
runner(['-t', TOOL, '-o', 'd',
f'_{get_input_path("ufo3.designspace")}', 'i'] + args)
expected_path = _get_output_path(ufo_filename, 'expected_output')
actual_path = _get_output_path(ufo_filename, 'temp_output')
assert differ([expected_path, actual_path])
@pytest.mark.parametrize('filename', ['features_copy', 'features_nocopy'])
def test_features_copy(filename):
# NOTE: This test was originally implemented without copying the expected
# UFOs to a temp location, but the Windows64 build always modified the glif
# files' line endings after the test was ran and this caused wheel problems
# https://ci.appveyor.com/project/adobe-type-tools/afdko/builds/25459479
# ---
# First copy the expected UFOs into the temp folder. The UFOs need to
# exist before makeinstancesufo is called because this test is about
# checking that any existing features.fea files are preserved.
paths = []
for i in (1, 2): # two instances
ufo_filename = f'{filename}{i}.ufo'
from_path = _get_output_path(ufo_filename, 'expected_output')
to_path = os.path.join(TEMP_DIR, ufo_filename)
copytree(from_path, to_path)
paths.append((to_path, from_path))
# run makeinstancesufo
runner(['-t', TOOL, '-o', 'a', 'c', 'n', 'd',
f'_{get_input_path(f"{filename}.designspace")}'])
# assert the expected results
for expected_path, actual_path in paths:
assert differ([expected_path, actual_path])
@pytest.mark.parametrize('args, ufo_filename', [
(['_0'], 'bend1.ufo'),
(['_1'], 'bend2.ufo'),
(['_2'], 'bend3.ufo'),
])
def test_bend_masters_mutator_math(args, ufo_filename):
# MutatorMath 2.1.2 did not handle location bending properly which resulted
# in incorrect interpolation outlines. This was fixed in 3.0.1.
runner(['-t', TOOL, '-o', 'a', 'c', 'n', 'd',
f'_{get_input_path("bend_test.designspace")}', 'i'] + args)
expected_path = _get_output_path(ufo_filename, 'expected_output')
actual_path = _get_output_path(ufo_filename, 'temp_output')
assert differ([expected_path, actual_path])
@pytest.mark.parametrize('args, ufo_filename', [
(['_0'], 'bend1.ufo'),
(['_1'], 'bend2.ufo'),
(['_2'], 'bend3.ufo'),
])
def test_bend_masters_varlib(args, ufo_filename):
# We should get the same output passing through varLib
runner(['-t', TOOL, '-o', 'a', 'c', 'n', '=use-varlib', 'd',
f'_{get_input_path("bend_test.designspace")}', 'i'] + args)
expected_path = _get_output_path(ufo_filename, 'expected_output')
actual_path = _get_output_path(ufo_filename, 'temp_output')
assert differ([expected_path, actual_path])
@pytest.mark.parametrize('use_varlib', [False, True])
def test_extrapolate(capfd, use_varlib):
"""
Test extrapolating, with default (MutatorMath) and with varlib.
Using varlib should fail (output should not be extrapolated) because
extrapolation is not supported by varlib.
"""
runner_args = ['-t', TOOL, '-o', 'v', 'd', f'_{get_input_path("extrapolation.designspace")}'] # noqa: E501
if use_varlib:
runner_args.append('=use-varlib')
runner(runner_args)
captured = capfd.readouterr()
tool = "fontTools.varlib" if use_varlib else "MutatorMath"
assert f"Building 2 instances with {tool}..." in captured.err
for ufo_filename in ("Dummy-ExtraPlus.ufo", "Dummy-ExtraMinus.ufo"):
expected_path = _get_output_path(ufo_filename, 'expected_output')
actual_path = _get_output_path(ufo_filename, 'temp_output')
if use_varlib:
assert not differ([expected_path, actual_path])
assert "Extrapolation is not supported with varlib (Dummy Extra Plus weight: 1500.0)" in captured.err # noqa: E501
else:
assert differ([expected_path, actual_path])
| 3,281 |
1,127 | <gh_stars>1000+
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <string>
#include <memory>
#include <ngraph/function.hpp>
#include <ngraph/opsets/opset7.hpp>
#include <ngraph/pass/manager.hpp>
#include "transformations/common_optimizations/hsigmoid_fusion.hpp"
#include <transformations/common_optimizations/hswish_fusion.hpp>
#include <transformations/init_node_info.hpp>
#include <transformations/utils/utils.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
using namespace testing;
TEST_F(TransformationTestsF, HSwishFusionWithReluDivF16) {
{
auto input = std::make_shared<ngraph::opset7::Parameter>(ngraph::element::f16, ngraph::PartialShape::dynamic(1));
auto add_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {3.0});
auto add = std::make_shared<ngraph::opset7::Add>(input, add_constant);
auto relu = std::make_shared<ngraph::opset7::Relu>(add);
auto min_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {6.0});
auto min = std::make_shared<ngraph::opset7::Minimum>(relu, min_constant);
auto mul = std::make_shared<ngraph::opset7::Multiply>(input, min);
auto div_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {6.0});
auto div = std::make_shared<ngraph::opset7::Divide>(mul, div_constant);
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{div}, ngraph::ParameterVector{input});
manager.register_pass<ngraph::pass::HSwishFusion>();
}
{
auto input = std::make_shared<ngraph::opset7::Parameter>(ngraph::element::f16, ngraph::PartialShape::dynamic(1));
auto hswish = std::make_shared<ngraph::opset7::HSwish>(input);
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{hswish}, ngraph::ParameterVector{input});
}
}
TEST_F(TransformationTestsF, HSwishFusionWithReluDivF32) {
{
auto input = std::make_shared<ngraph::opset7::Parameter>(ngraph::element::f32, ngraph::Shape{});
auto add_constant = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{}, {3.0});
auto add = std::make_shared<ngraph::opset7::Add>(input, add_constant);
auto relu = std::make_shared<ngraph::opset7::Relu>(add);
auto min_constant = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{}, {6.0});
auto min = std::make_shared<ngraph::opset7::Minimum>(relu, min_constant);
auto mul = std::make_shared<ngraph::opset7::Multiply>(input, min);
auto div_constant = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{}, {6.0});
auto div = std::make_shared<ngraph::opset7::Divide>(mul, div_constant);
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{div}, ngraph::ParameterVector{input});
manager.register_pass<ngraph::pass::HSwishFusion>();
}
{
auto input = std::make_shared<ngraph::opset7::Parameter>(ngraph::element::f32, ngraph::Shape{});
auto hswish = std::make_shared<ngraph::opset7::HSwish>(input);
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{hswish}, ngraph::ParameterVector{input});
}
}
TEST_F(TransformationTestsF, HSwishFusionWithReluMul) {
{
auto input = std::make_shared<ngraph::opset7::Parameter>(ngraph::element::f16, ngraph::PartialShape::dynamic(1));
auto add_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {3.0});
auto add = std::make_shared<ngraph::opset7::Add>(input, add_constant);
auto relu = std::make_shared<ngraph::opset7::Relu>(add);
auto min_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {6.0});
auto min = std::make_shared<ngraph::opset7::Minimum>(relu, min_constant);
auto mul_first = std::make_shared<ngraph::opset7::Multiply>(input, min);
auto mul_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {0.1666666716});
auto mul_second = std::make_shared<ngraph::opset7::Multiply>(mul_first, mul_constant);
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{mul_second}, ngraph::ParameterVector{input});
manager.register_pass<ngraph::pass::HSwishFusion>();
}
{
auto input = std::make_shared<ngraph::opset7::Parameter>(ngraph::element::f16, ngraph::PartialShape::dynamic(1));
auto hswish = std::make_shared<ngraph::opset7::HSwish>(input);
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{hswish}, ngraph::ParameterVector{input});
}
}
TEST_F(TransformationTestsF, HSwishFusionWithoutRelu) {
{
auto input = std::make_shared<ngraph::opset7::Parameter>(ngraph::element::f16, ngraph::PartialShape::dynamic(1));
auto add_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {3.0});
auto add = std::make_shared<ngraph::opset7::Add>(input, add_constant);
auto max_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {0.0});
auto max = std::make_shared<ngraph::opset7::Maximum>(add, max_constant);
auto min_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {6.0});
auto min = std::make_shared<ngraph::opset7::Minimum>(max, min_constant);
auto div_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {6.0});
auto div = std::make_shared<ngraph::opset7::Divide>(min, div_constant);
auto mul = std::make_shared<ngraph::opset7::Multiply>(input, div);
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{mul}, ngraph::ParameterVector{input});
auto gr = manager.register_pass<ngraph::pass::GraphRewrite>();
gr->add_matcher<ngraph::pass::HSigmoidFusion>();
gr->add_matcher<ngraph::pass::HSwishFusion>();
}
{
auto input = std::make_shared<ngraph::opset7::Parameter>(ngraph::element::f16, ngraph::PartialShape::dynamic(1));
auto hswish = std::make_shared<ngraph::opset7::HSwish>(input);
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{hswish}, ngraph::ParameterVector{input});
}
}
TEST_F(TransformationTestsF, HSwishFusionWithClampMul) {
{
auto input = std::make_shared<ngraph::opset7::Parameter>(ngraph::element::f16, ngraph::PartialShape::dynamic(1));
auto add_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {3.0});
auto add = std::make_shared<ngraph::opset7::Add>(input, add_constant);
auto clamp = std::make_shared<ngraph::opset7::Clamp>(add, 0.0f, 6.0f);
auto mul_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {1.0 / 6.0});
auto mul_first = std::make_shared<ngraph::opset7::Multiply>(clamp, mul_constant);
auto mul_second = std::make_shared<ngraph::opset7::Multiply>(input, mul_first);
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{mul_second}, ngraph::ParameterVector{input});
auto gr = manager.register_pass<ngraph::pass::GraphRewrite>();
gr->add_matcher<ngraph::pass::HSigmoidFusion>();
gr->add_matcher<ngraph::pass::HSwishFusion>();
}
{
auto input = std::make_shared<ngraph::opset7::Parameter>(ngraph::element::f16, ngraph::PartialShape::dynamic(1));
auto hswish = std::make_shared<ngraph::opset7::HSwish>(input);
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{hswish}, ngraph::ParameterVector{input});
}
}
TEST_F(TransformationTestsF, HSwishFusionWithClampDiv) {
{
auto input = std::make_shared<ngraph::opset7::Parameter>(ngraph::element::f16, ngraph::PartialShape::dynamic(1));
auto add_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {3.0});
auto add = std::make_shared<ngraph::opset7::Add>(input, add_constant);
auto clamp = std::make_shared<ngraph::opset7::Clamp>(add, 0.0f, 6.0f);
auto div_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {6.0});
auto div = std::make_shared<ngraph::opset7::Divide>(clamp, div_constant);
auto mul = std::make_shared<ngraph::opset7::Multiply>(input, div);
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{mul}, ngraph::ParameterVector{input});
auto gr = manager.register_pass<ngraph::pass::GraphRewrite>();
gr->add_matcher<ngraph::pass::HSigmoidFusion>();
gr->add_matcher<ngraph::pass::HSwishFusion>();
}
{
auto input = std::make_shared<ngraph::opset7::Parameter>(ngraph::element::f16, ngraph::PartialShape::dynamic(1));
auto hswish = std::make_shared<ngraph::opset7::HSwish>(input);
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{hswish}, ngraph::ParameterVector{input});
}
}
TEST_F(TransformationTestsF, HSwishFusionWithReluMulWrongConstValue) {
{
auto input = std::make_shared<ngraph::opset7::Parameter>(ngraph::element::f16, ngraph::PartialShape::dynamic(1));
auto add_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {3.0});
auto add = std::make_shared<ngraph::opset7::Add>(input, add_constant);
auto relu = std::make_shared<ngraph::opset7::Relu>(add);
auto min_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {6.0});
auto min = std::make_shared<ngraph::opset7::Minimum>(relu, min_constant);
auto mul_first = std::make_shared<ngraph::opset7::Multiply>(input, min);
auto mul_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {0.167});
auto mul_second = std::make_shared<ngraph::opset7::Multiply>(mul_first, mul_constant);
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{mul_second}, ngraph::ParameterVector{input});
manager.register_pass<ngraph::pass::HSwishFusion>();
}
{
auto input = std::make_shared<ngraph::opset7::Parameter>(ngraph::element::f16, ngraph::PartialShape::dynamic(1));
auto add_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {3.0});
auto add = std::make_shared<ngraph::opset7::Add>(input, add_constant);
auto relu = std::make_shared<ngraph::opset7::Relu>(add);
auto min_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {6.0});
auto min = std::make_shared<ngraph::opset7::Minimum>(relu, min_constant);
auto mul_first = std::make_shared<ngraph::opset7::Multiply>(input, min);
auto mul_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {0.167});
auto mul_second = std::make_shared<ngraph::opset7::Multiply>(mul_first, mul_constant);
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{mul_second}, ngraph::ParameterVector{input});
}
}
TEST_F(TransformationTestsF, HSwishFusionWithReluDivWrongConstValue) {
{
auto input = std::make_shared<ngraph::opset7::Parameter>(ngraph::element::f16, ngraph::Shape{});
auto add_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {3.01});
auto add = std::make_shared<ngraph::opset7::Add>(input, add_constant);
auto relu = std::make_shared<ngraph::opset7::Relu>(add);
auto min_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {6.002});
auto min = std::make_shared<ngraph::opset7::Minimum>(relu, min_constant);
auto mul = std::make_shared<ngraph::opset7::Multiply>(input, min);
auto div_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {0.1});
auto div = std::make_shared<ngraph::opset7::Divide>(mul, div_constant);
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{div}, ngraph::ParameterVector{input});
manager.register_pass<ngraph::pass::HSwishFusion>();
}
{
auto input = std::make_shared<ngraph::opset7::Parameter>(ngraph::element::f16, ngraph::Shape{});
auto add_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {3.01});
auto add = std::make_shared<ngraph::opset7::Add>(input, add_constant);
auto relu = std::make_shared<ngraph::opset7::Relu>(add);
auto min_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {6.002});
auto min = std::make_shared<ngraph::opset7::Minimum>(relu, min_constant);
auto mul = std::make_shared<ngraph::opset7::Multiply>(input, min);
auto div_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {0.0});
auto div = std::make_shared<ngraph::opset7::Divide>(mul, div_constant);
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{div}, ngraph::ParameterVector{input});
}
}
TEST_F(TransformationTestsF, HSwishFusionWithoutReluWrongConstValue) {
{
auto input = std::make_shared<ngraph::opset7::Parameter>(ngraph::element::f16, ngraph::PartialShape::dynamic(1));
auto add_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {3.11});
auto add = std::make_shared<ngraph::opset7::Add>(input, add_constant);
auto max_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {0.22});
auto max = std::make_shared<ngraph::opset7::Maximum>(add, max_constant);
auto min_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {6.01});
auto min = std::make_shared<ngraph::opset7::Minimum>(max, min_constant);
auto div_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {6.002});
auto div = std::make_shared<ngraph::opset7::Divide>(min, div_constant);
auto mul = std::make_shared<ngraph::opset7::Multiply>(input, div);
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{mul}, ngraph::ParameterVector{input});
auto gr = manager.register_pass<ngraph::pass::GraphRewrite>();
gr->add_matcher<ngraph::pass::HSigmoidFusion>();
gr->add_matcher<ngraph::pass::HSwishFusion>();
}
{
auto input = std::make_shared<ngraph::opset7::Parameter>(ngraph::element::f16, ngraph::PartialShape::dynamic(1));
auto add_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {3.11});
auto add = std::make_shared<ngraph::opset7::Add>(input, add_constant);
auto max_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {0.22});
auto max = std::make_shared<ngraph::opset7::Maximum>(add, max_constant);
auto min_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {6.01});
auto min = std::make_shared<ngraph::opset7::Minimum>(max, min_constant);
auto div_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {6.002});
auto div = std::make_shared<ngraph::opset7::Divide>(min, div_constant);
auto mul = std::make_shared<ngraph::opset7::Multiply>(input, div);
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{mul}, ngraph::ParameterVector{input});
}
}
TEST_F(TransformationTestsF, HSwishFusionWithClampWrongConstValue) {
{
auto input = std::make_shared<ngraph::opset7::Parameter>(ngraph::element::f16, ngraph::PartialShape::dynamic(1));
auto add_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {3.11});
auto add = std::make_shared<ngraph::opset7::Add>(input, add_constant);
auto clamp = std::make_shared<ngraph::opset7::Clamp>(add, 0.11f, 6.02f);
auto mul_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {0.98 / 6.15});
auto mul_first = std::make_shared<ngraph::opset7::Multiply>(clamp, mul_constant);
auto mul_second = std::make_shared<ngraph::opset7::Multiply>(input, mul_first);
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{mul_second}, ngraph::ParameterVector{input});
auto gr = manager.register_pass<ngraph::pass::GraphRewrite>();
gr->add_matcher<ngraph::pass::HSigmoidFusion>();
gr->add_matcher<ngraph::pass::HSwishFusion>();
}
{
auto input = std::make_shared<ngraph::opset7::Parameter>(ngraph::element::f16, ngraph::PartialShape::dynamic(1));
auto add_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {3.11});
auto add = std::make_shared<ngraph::opset7::Add>(input, add_constant);
auto clamp = std::make_shared<ngraph::opset7::Clamp>(add, 0.11f, 6.02f);
auto mul_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {0.98 / 6.15});
auto mul_first = std::make_shared<ngraph::opset7::Multiply>(clamp, mul_constant);
auto mul_second = std::make_shared<ngraph::opset7::Multiply>(input, mul_first);
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{mul_second}, ngraph::ParameterVector{input});
}
}
TEST_F(TransformationTestsF, HSwishFusionWithHSigmoidMul) {
{
auto input = std::make_shared<ngraph::opset7::Parameter>(ngraph::element::f16, ngraph::PartialShape::dynamic(1));
auto hsigmoid = std::make_shared<ngraph::opset7::HSigmoid>(input);
auto mul = std::make_shared<ngraph::opset7::Multiply>(input, hsigmoid);
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{mul}, ngraph::ParameterVector{input});
manager.register_pass<ngraph::pass::HSwishFusion>();
}
{
auto input = std::make_shared<ngraph::opset7::Parameter>(ngraph::element::f16, ngraph::PartialShape::dynamic(1));
auto hswish = std::make_shared<ngraph::opset7::HSwish>(input);
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{hswish}, ngraph::ParameterVector{input});
}
}
TEST_F(TransformationTestsF, HSwishFusionWithClamp) {
{
auto input = std::make_shared<ngraph::opset7::Parameter>(ngraph::element::f16, ngraph::PartialShape::dynamic(1));
auto add_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {3.0});
auto add = std::make_shared<ngraph::opset7::Add>(input, add_constant);
auto clamp = std::make_shared<ngraph::opset7::Clamp>(add, 0.0f, 6.0f);
auto mul = std::make_shared<ngraph::opset7::Multiply>(input, clamp);
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{mul}, ngraph::ParameterVector{input});
auto gr = manager.register_pass<ngraph::pass::GraphRewrite>();
gr->add_matcher<ngraph::pass::HSwishFusion>();
}
{
auto input = std::make_shared<ngraph::opset7::Parameter>(ngraph::element::f16, ngraph::PartialShape::dynamic(1));
auto hswish = std::make_shared<ngraph::opset7::HSwish>(input);
auto mul_const = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {6.0});
auto mul = std::make_shared<ngraph::opset7::Multiply>(hswish, mul_const);
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{mul}, ngraph::ParameterVector{input});
}
}
TEST_F(TransformationTestsF, HSwishFusionWithClampWithWrongConstant) {
{
auto input = std::make_shared<ngraph::opset7::Parameter>(ngraph::element::f16, ngraph::PartialShape::dynamic(1));
auto add_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {3.11});
auto add = std::make_shared<ngraph::opset7::Add>(input, add_constant);
auto clamp = std::make_shared<ngraph::opset7::Clamp>(add, 0.11f, 6.32f);
auto mul = std::make_shared<ngraph::opset7::Multiply>(input, clamp);
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{mul}, ngraph::ParameterVector{input});
auto gr = manager.register_pass<ngraph::pass::GraphRewrite>();
gr->add_matcher<ngraph::pass::HSwishFusion>();
}
{
auto input = std::make_shared<ngraph::opset7::Parameter>(ngraph::element::f16, ngraph::PartialShape::dynamic(1));
auto add_constant = ngraph::opset7::Constant::create(ngraph::element::f16, ngraph::Shape{}, {3.11});
auto add = std::make_shared<ngraph::opset7::Add>(input, add_constant);
auto clamp = std::make_shared<ngraph::opset7::Clamp>(add, 0.11f, 6.32f);
auto mul = std::make_shared<ngraph::opset7::Multiply>(input, clamp);
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{mul}, ngraph::ParameterVector{input});
}
} | 8,652 |
3,334 | <filename>themes/uno/theme.json
{
"files": {
"default": "default.hbs",
"error": "error.hbs",
"index": "index.hbs",
"page": "page.hbs",
"post": "post.hbs",
"tag": "tag.hbs",
"comments": "partials/comments.hbs",
"footer": "partials/footer.hbs",
"list-posts": "partials/list-posts.hbs",
"pagination": "partials/pagination.hbs",
"side-panel": "partials/side-panel.hbs",
"social": "partials/social.hbs"
},
"version":"1.0.0"
}
| 224 |
874 | package com.jnape.palatable.lambda.functions.builtin.fn2;
import com.jnape.palatable.lambda.functions.Fn1;
import com.jnape.palatable.lambda.functions.Fn2;
import static com.jnape.palatable.lambda.functions.builtin.fn1.Tail.tail;
import static com.jnape.palatable.lambda.functions.builtin.fn2.PrependAll.prependAll;
/**
* Lazily inject the provided separator value between each value in the supplied <code>Iterable</code>. An
* <code>Iterable</code> with fewer than two elements is left untouched.
*
* @param <A> the Iterable parameter type
* @see PrependAll
*/
public final class Intersperse<A> implements Fn2<A, Iterable<A>, Iterable<A>> {
private static final Intersperse<?> INSTANCE = new Intersperse<>();
private Intersperse() {
}
@Override
public Iterable<A> checkedApply(A a, Iterable<A> as) {
return tail(prependAll(a, as));
}
@SuppressWarnings("unchecked")
public static <A> Intersperse<A> intersperse() {
return (Intersperse<A>) INSTANCE;
}
public static <A> Fn1<Iterable<A>, Iterable<A>> intersperse(A a) {
return Intersperse.<A>intersperse().apply(a);
}
public static <A> Iterable<A> intersperse(A a, Iterable<A> as) {
return intersperse(a).apply(as);
}
}
| 481 |
435 | {
"copyright_text": "Creative Commons Attribution license (reuse allowed)",
"description": "<NAME> - Dynamic Class Generation in Python\n[EuroPython 2016]\n[18 July 2016]\n[Bilbao, Euskadi, Spain]\n(https://ep2016.europython.eu//conference/talks/dynamic-class-generation-in-python)\n\nThis talk is about dynamic class generation in python: the practice of\nwriting code that generates classes and their functionality at\nruntime. It will use boto3, the AWS SDK for Python, as a basis to dive\ninto the basics, the benefits, and the drawbacks to dynamically\ngenerating classes.\n\n-----\n\nThis talk is about the concept of dynamic class generation in python.\nThe whole idea is writing code that generates classes and their\nfunctionality at runtime. You now may be asking yourself, \u201cThat sounds\nlike a neat trick. Why would I ever generate my classes at runtime?\u201d\nHere are a few reasons why:\n\n- It can decrease the physical size of your code.\n\n- It can improve the workflow in adding new functionality.\n\n- It can improve reliability of your code.\n\nOne example where the power of this concept has really been leveraged\nis in boto3, the AWS SDK for Python. Dynamic class generation has\nallowed boto3 to become heavily data driven such that most of its\nclasses and methods are generated based off JSON models representing\naspects of an AWS service\u2019s API. For example, to add support for a new\nAWS service API in boto3, just plop in a JSON file into the library\nwith no additional Python code required.\n\nUsing lessons and techniques drawn from developing boto3, this talk\nwill dive into the following topics related to dynamic class\ngeneration:\n\n- The basics of dynamic class generation such as how to effectively dynamically generate classes.\n\n- How to overcome some of the challenges of dynamic class generation.\n\n- The tradeoffs in dynamically generating classes and discussion on when it is appropriate.\n\nBy the end of this talk, the hope is that you will have a better\nunderstanding of dynamic class generation and come away with helpful\nideas for your next big project.",
"duration": 1571,
"language": "eng",
"recorded": "2016-07-28",
"related_urls": [
"https://ep2016.europython.eu//conference/talks/dynamic-class-generation-in-python"
],
"speakers": [
"<NAME>"
],
"tags": [],
"thumbnail_url": "https://i.ytimg.com/vi/fhqE7aS6cj8/maxresdefault.jpg",
"title": "Dynamic Class Generation in Python",
"videos": [
{
"type": "youtube",
"url": "https://www.youtube.com/watch?v=fhqE7aS6cj8"
}
]
}
| 747 |
1,093 | /*
* Copyright 2015-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.integration.zookeeper.config.xml;
import org.w3c.dom.Element;
import org.springframework.beans.factory.support.AbstractBeanDefinition;
import org.springframework.beans.factory.support.BeanDefinitionBuilder;
import org.springframework.beans.factory.xml.AbstractBeanDefinitionParser;
import org.springframework.beans.factory.xml.ParserContext;
import org.springframework.integration.config.xml.IntegrationNamespaceUtils;
import org.springframework.integration.zookeeper.config.LeaderInitiatorFactoryBean;
/**
* @author <NAME>
* @author <NAME>
*
* @since 4.2
*
*/
public class LeaderListenerParser extends AbstractBeanDefinitionParser {
@Override
protected AbstractBeanDefinition parseInternal(Element element, ParserContext parserContext) {
BeanDefinitionBuilder builder =
BeanDefinitionBuilder.genericBeanDefinition(LeaderInitiatorFactoryBean.class)
.addPropertyReference("client", element.getAttribute("client"))
.addPropertyValue("role", element.getAttribute(IntegrationNamespaceUtils.ROLE))
.addPropertyValue("path", element.getAttribute("path"));
IntegrationNamespaceUtils.setValueIfAttributeDefined(builder, element, IntegrationNamespaceUtils.AUTO_STARTUP);
IntegrationNamespaceUtils.setValueIfAttributeDefined(builder, element, IntegrationNamespaceUtils.PHASE);
IntegrationNamespaceUtils.setReferenceIfAttributeDefined(builder, element, "candidate");
return builder.getBeanDefinition();
}
@Override
protected boolean shouldGenerateIdAsFallback() {
return true;
}
}
| 617 |
5,169 | {
"name": "FXFormController",
"version": "0.2.0",
"summary": "A short description of FXFormController.",
"description": "TODO: Add long description of the pod here.",
"homepage": "https://github.com/<EMAIL>/FXFormController",
"license": {
"type": "MIT",
"file": "LICENSE"
},
"authors": {
"<EMAIL>": "<EMAIL>"
},
"source": {
"git": "https://github.com/CoderSword/FXFormController.git",
"tag": "0.2.0"
},
"platforms": {
"ios": "8.0"
},
"source_files": "FXFormController/Classes/**/*",
"resource_bundles": {
"FXFormController": [
"FXFormController/Assets/imageShare.bundle"
]
},
"dependencies": {
"MJExtension": [
],
"TZImagePickerController": [
],
"GKPhotoBrowser": [
],
"BRPickerView": [
],
"SDWebImage": [
],
"Masonry": [
]
}
}
| 371 |
1,225 | package com.flask.colorpicker;
import android.graphics.Canvas;
import android.graphics.Paint;
import android.graphics.drawable.ColorDrawable;
import com.flask.colorpicker.builder.PaintBuilder;
public class ColorCircleDrawable extends ColorDrawable {
private float strokeWidth;
private Paint strokePaint = PaintBuilder.newPaint().style(Paint.Style.STROKE).stroke(strokeWidth).color(0xff9e9e9e).build();
private Paint fillPaint = PaintBuilder.newPaint().style(Paint.Style.FILL).color(0).build();
private Paint fillBackPaint = PaintBuilder.newPaint().shader(PaintBuilder.createAlphaPatternShader(26)).build();
public ColorCircleDrawable(int color) {
super(color);
}
@Override
public void draw(Canvas canvas) {
canvas.drawColor(0);
int width = canvas.getWidth();
float radius = width / 2f;
strokeWidth = radius / 8f;
this.strokePaint.setStrokeWidth(strokeWidth);
this.fillPaint.setColor(getColor());
canvas.drawCircle(radius, radius, radius - strokeWidth, fillBackPaint);
canvas.drawCircle(radius, radius, radius - strokeWidth, fillPaint);
canvas.drawCircle(radius, radius, radius - strokeWidth, strokePaint);
}
@Override
public void setColor(int color) {
super.setColor(color);
invalidateSelf();
}
}
| 419 |
8,966 | __version__ = '4.8.0dev'
| 13 |
328 | package com.dragon.flow.config;
import org.aspectj.lang.annotation.Aspect;
import org.springframework.aop.framework.autoproxy.BeanNameAutoProxyCreator;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.transaction.TransactionDefinition;
import org.springframework.transaction.TransactionManager;
import org.springframework.transaction.interceptor.*;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
/**
* 事务配置
*
* @author bruce.liu
* @date 2021/3/25 11:07
*/
@Aspect
@Configuration
public class TransactionConfig {
@Autowired
private TransactionManager transactionManager;
@Bean(name = "txAdvice")
public TransactionInterceptor txAdvice() {
NameMatchTransactionAttributeSource source = new NameMatchTransactionAttributeSource();
/*只读事务,不做更新操作*/
RuleBasedTransactionAttribute readOnlyTx = new RuleBasedTransactionAttribute();
readOnlyTx.setReadOnly(true);
readOnlyTx.setPropagationBehavior(TransactionDefinition.PROPAGATION_NOT_SUPPORTED);
/*当前存在事务就使用当前事务,当前不存在事务就创建一个新的事务*/
RuleBasedTransactionAttribute requiredTx = new RuleBasedTransactionAttribute(TransactionDefinition.PROPAGATION_REQUIRED,
Collections.singletonList(new RollbackRuleAttribute(Exception.class)));
// requiredTx.setTimeout(5);
Map<String, TransactionAttribute> txMap = new HashMap<>();
txMap.put("save*", requiredTx);
txMap.put("insert*", requiredTx);
txMap.put("create*", requiredTx);
txMap.put("add*", requiredTx);
txMap.put("update*", requiredTx);
txMap.put("edit*", requiredTx);
txMap.put("del*", requiredTx);
txMap.put("drop*", requiredTx);
txMap.put("remove*", requiredTx);
txMap.put("import*", requiredTx);
txMap.put("active*", requiredTx);
txMap.put("stop*", requiredTx);
/************************* flowable **************************/
txMap.put("start*", requiredTx);
txMap.put("revoke*", requiredTx);
txMap.put("complete*", requiredTx);
txMap.put("turn*", requiredTx);
txMap.put("claim*", requiredTx);
txMap.put("back*", requiredTx);
txMap.put("deploy*", requiredTx);
txMap.put("set*", requiredTx);
txMap.put("before*", requiredTx);
txMap.put("after*", requiredTx);
txMap.put("activate*", requiredTx);
txMap.put("publish*", requiredTx);
txMap.put("suspend*", requiredTx);
txMap.put("sync*", requiredTx);
txMap.put("review*", requiredTx);
txMap.put("copy*", requiredTx);
/************************* flowable **************************/
txMap.put("query*", readOnlyTx);
txMap.put("find*", readOnlyTx);
txMap.put("select*", readOnlyTx);
txMap.put("get*", readOnlyTx);
txMap.put("*", readOnlyTx);
source.setNameMap(txMap);
return new TransactionInterceptor(transactionManager, source);
}
@Bean
public BeanNameAutoProxyCreator txProxy() {
BeanNameAutoProxyCreator creator = new BeanNameAutoProxyCreator();
creator.setInterceptorNames("txAdvice");
creator.setBeanNames("*Service", "*ServiceImpl", "*component", "*componentImpl");
creator.setProxyTargetClass(true);
return creator;
}
}
| 1,426 |
1,605 | /*****************************************************************************
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
package org.apache.xmpbox.schema;
import java.util.stream.Stream;
import org.apache.xmpbox.XMPMetadata;
import org.apache.xmpbox.type.Cardinality;
import org.apache.xmpbox.type.Types;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
class PhotoshopSchemaTest
{
private XMPMetadata metadata;
private Class<?> schemaClass;
@BeforeEach
void initMetadata()
{
metadata = XMPMetadata.createXMPMetadata();
schemaClass = PhotoshopSchema.class;
}
@ParameterizedTest
@MethodSource("initializeParameters")
void testInitializedToNull(String fieldName, Types type, Cardinality card) throws Exception
{
SchemaTester schemaTester = new SchemaTester(metadata, schemaClass, fieldName, type, card);
schemaTester.testInitializedToNull();
}
@ParameterizedTest
@MethodSource("initializeParameters")
void testSettingValue(String fieldName, Types type, Cardinality card) throws Exception
{
SchemaTester schemaTester = new SchemaTester(metadata, schemaClass, fieldName, type, card);
schemaTester.testSettingValue();
}
@ParameterizedTest
@MethodSource("initializeParameters")
void testRandomSettingValue(String fieldName, Types type, Cardinality card) throws Exception
{
SchemaTester schemaTester = new SchemaTester(metadata, schemaClass, fieldName, type, card);
schemaTester.testRandomSettingValue();
}
@ParameterizedTest
@MethodSource("initializeParameters")
void testSettingValueInArray(String fieldName, Types type, Cardinality card) throws Exception
{
SchemaTester schemaTester = new SchemaTester(metadata, schemaClass, fieldName, type, card);
schemaTester.testSettingValueInArray();
}
@ParameterizedTest
@MethodSource("initializeParameters")
void testRandomSettingValueInArray(String fieldName, Types type, Cardinality card) throws Exception
{
SchemaTester schemaTester = new SchemaTester(metadata, schemaClass, fieldName, type, card);
schemaTester.testRandomSettingValueInArray();
}
@ParameterizedTest
@MethodSource("initializeParameters")
void testPropertySetterSimple(String fieldName, Types type, Cardinality card) throws Exception
{
SchemaTester schemaTester = new SchemaTester(metadata, schemaClass, fieldName, type, card);
schemaTester.testPropertySetterSimple();
}
@ParameterizedTest
@MethodSource("initializeParameters")
void testRandomPropertySetterSimple(String fieldName, Types type, Cardinality card) throws Exception
{
SchemaTester schemaTester = new SchemaTester(metadata, schemaClass, fieldName, type, card);
schemaTester.testRandomPropertySetterSimple();
}
@ParameterizedTest
@MethodSource("initializeParameters")
void testPropertySetterInArray(String fieldName, Types type, Cardinality card) throws Exception
{
SchemaTester schemaTester = new SchemaTester(metadata, schemaClass, fieldName, type, card);
schemaTester.testPropertySetterInArray();
}
@ParameterizedTest
@MethodSource("initializeParameters")
void testRandomPropertySetterInArray(String fieldName, Types type, Cardinality card) throws Exception
{
SchemaTester schemaTester = new SchemaTester(metadata, schemaClass, fieldName, type, card);
schemaTester.testRandomPropertySetterInArray();
}
private static Stream<Arguments> initializeParameters()
{
return Stream.of(
Arguments.of("AncestorID", Types.URI, Cardinality.Simple),
Arguments.of("AuthorsPosition", Types.Text, Cardinality.Simple),
Arguments.of("CaptionWriter", Types.ProperName, Cardinality.Simple),
Arguments.of("Category", Types.Text, Cardinality.Simple),
Arguments.of("City", Types.Text, Cardinality.Simple),
Arguments.of("ColorMode", Types.Integer, Cardinality.Simple),
Arguments.of("Country", Types.Text, Cardinality.Simple),
Arguments.of("Credit", Types.Text, Cardinality.Simple),
Arguments.of("DateCreated", Types.Date, Cardinality.Simple),
Arguments.of("Headline", Types.Text, Cardinality.Simple),
Arguments.of("History", Types.Text, Cardinality.Simple),
Arguments.of("ICCProfile", Types.Text, Cardinality.Simple),
Arguments.of("Instructions", Types.Text, Cardinality.Simple),
Arguments.of("Source", Types.Text, Cardinality.Simple),
Arguments.of("State", Types.Text, Cardinality.Simple),
Arguments.of("SupplementalCategories", Types.Text, Cardinality.Simple),
Arguments.of("TransmissionReference", Types.Text, Cardinality.Simple),
Arguments.of("Urgency", Types.Integer, Cardinality.Simple)
);
}
}
| 1,987 |
6,989 | <gh_stars>1000+
#include "hyperparameter_tuning.h"
#include <catboost/private/libs/algo/data.h>
#include <catboost/private/libs/algo/approx_dimension.h>
#include <catboost/libs/data/feature_names_converter.h>
#include <catboost/libs/data/objects_grouping.h>
#include <catboost/libs/helpers/cpu_random.h>
#include <catboost/libs/helpers/exception.h>
#include <catboost/libs/helpers/dynamic_iterator.h>
#include <catboost/libs/loggers/catboost_logger_helpers.h>
#include <catboost/libs/loggers/logger.h>
#include <catboost/libs/logging/logging.h>
#include <catboost/libs/logging/profile_info.h>
#include <catboost/libs/train_lib/dir_helper.h>
#include <catboost/private/libs/options/plain_options_helper.h>
#include <util/generic/algorithm.h>
#include <util/generic/deque.h>
#include <util/generic/set.h>
#include <util/generic/xrange.h>
#include <util/random/shuffle.h>
#include <numeric>
namespace {
const TVector<TString> NanModeParamAliaces {"nan_mode"};
const TVector<TString> BorderCountParamAliaces {"border_count", "max_bin"};
const TVector<TString> BorderTypeParamAliaces {"feature_border_type"};
constexpr ui32 IndexOfFirstTrainingParameter = 3;
// TEnumeratedSet - type of sets, TValue - type of values in sets
// Set should have access to elements by index and size() method
// Uniqueness of elements is not required: 'set' is just unformal term
template <class TEnumeratedSet, class TValue>
class TProductIteratorBase: public NCB::IDynamicIterator<TConstArrayRef<TValue>> {
protected:
bool IsStopIteration = false;
size_t FirstVaryingDigit = 0;
ui64 PassedElementsCount = 0;
ui64 TotalElementsCount;
TVector<size_t> MultiIndex;
TVector<TEnumeratedSet> Sets;
TVector<TValue> State;
protected:
explicit TProductIteratorBase(const TVector<TEnumeratedSet>& sets)
: Sets(sets) {
InitClassFields(sets);
ui64 totalCount = 1;
ui64 logTotalCount = 0;
for (const auto& set : sets) {
CB_ENSURE(set.size() > 0, "Error: set should be not empty");
logTotalCount += log2(set.size());
CB_ENSURE(logTotalCount < 64, "Error: The parameter grid is too large. Try to reduce it.");
totalCount *= set.size();
}
TotalElementsCount = totalCount;
}
void InitClassFields(const TVector<TEnumeratedSet>& sets) {
if (sets.size() == 0) {
IsStopIteration = true;
return;
}
MultiIndex.resize(sets.size(), 0);
size_t idx = 0;
for (const auto& set : sets) {
State.push_back(set[0]);
MultiIndex[idx] = set.size() - 1;
++idx;
}
}
const TVector<TValue>& NextWithOffset(ui64 offset) {
for (size_t setIdx = MultiIndex.size() - 1; setIdx > 0; --setIdx) {
size_t oldDigit = MultiIndex[setIdx];
MultiIndex[setIdx] = (MultiIndex[setIdx] + offset) % Sets[setIdx].size();
State[setIdx] = Sets[setIdx][MultiIndex[setIdx]];
if (oldDigit + offset < Sets[setIdx].size()) {
return State;
}
offset = (offset - (Sets[setIdx].size() - oldDigit)) / Sets[setIdx].size() + 1;
}
MultiIndex[0] = (MultiIndex[0] + offset) % Sets[0].size();
State[0] = Sets[0][MultiIndex[0]];
return State;
}
bool IsIteratorReachedEnd() {
return PassedElementsCount >= TotalElementsCount;
}
public:
ui64 GetTotalElementsCount() {
return TotalElementsCount;
}
};
template <class TEnumeratedSet, class TValue>
class TCartesianProductIterator: public TProductIteratorBase<TEnumeratedSet, TValue> {
public:
explicit TCartesianProductIterator(const TVector<TEnumeratedSet>& sets)
: TProductIteratorBase<TEnumeratedSet, TValue>(sets)
{}
bool Next(TConstArrayRef<TValue>* value) override {
if (this->IsIteratorReachedEnd()) {
return false;
}
this->PassedElementsCount++;
*value = this->NextWithOffset(1);
return true;
}
};
template <class TEnumeratedSet, class TValue>
class TRandomizedProductIterator: public TProductIteratorBase<TEnumeratedSet, TValue> {
private:
TVector<ui64> FlatOffsets;
size_t OffsetIndex = 0;
public:
// pass count={any positive number} to iterate over random {count} elements
TRandomizedProductIterator(const TVector<TEnumeratedSet>& sets, ui32 count, bool allowRepeat = false)
: TProductIteratorBase<TEnumeratedSet, TValue>(sets) {
CB_ENSURE(count > 0, "Error: param count for TRandomizedProductIterator should be a positive number");
ui64 totalCount = this->TotalElementsCount;
if (count > totalCount && !allowRepeat) {
count = totalCount;
}
TVector<ui64> indexes;
if (static_cast<double>(count) / totalCount > 0.7 && !allowRepeat) {
indexes.resize(totalCount);
std::iota(indexes.begin(), indexes.end(), 1);
Shuffle(indexes.begin(), indexes.end());
indexes.resize(count);
} else {
TSet<ui64> choosenIndexes;
TRandom random;
while (indexes.size() != count) {
ui64 nextRandom = random.NextUniformL() % totalCount;
while (choosenIndexes.contains(nextRandom)) {
nextRandom = random.NextUniformL() % totalCount;
}
indexes.push_back(nextRandom);
if (!allowRepeat) {
choosenIndexes.insert(nextRandom);
}
}
}
Sort(indexes);
ui64 lastIndex = 0;
for (const auto& index : indexes) {
FlatOffsets.push_back(index - lastIndex);
lastIndex = index;
}
this->TotalElementsCount = count;
}
bool Next(TConstArrayRef<TValue>* values) override {
if (this->IsIteratorReachedEnd()) {
return false;
}
ui64 offset = 1;
offset = FlatOffsets[OffsetIndex];
++OffsetIndex;
this->PassedElementsCount++;
*values = this->NextWithOffset(offset);
return true;
}
};
struct TGeneralQuatizationParamsInfo {
bool IsBordersCountInGrid = false;
bool IsBorderTypeInGrid = false;
bool IsNanModeInGrid = false;
TString BordersCountParamName = BorderCountParamAliaces[0];
TString BorderTypeParamName = BorderTypeParamAliaces[0];
TString NanModeParamName = NanModeParamAliaces[0];
};
struct TQuantizationParamsInfo {
int BinsCount = -1;
EBorderSelectionType BorderType;
ENanMode NanMode;
TGeneralQuatizationParamsInfo GeneralInfo;
};
struct TGridParamsInfo {
TQuantizationParamsInfo QuantizationParamsSet;
NCB::TQuantizedFeaturesInfoPtr QuantizedFeatureInfo;
NJson::TJsonValue OthersParamsSet;
TVector<TString> GridParamNames;
};
bool CheckIfRandomDisribution(const TString& value) {
return value.rfind("CustomRandomDistributionGenerator", 0) == 0;
}
NJson::TJsonValue GetRandomValueIfNeeded(
const NJson::TJsonValue& value,
const THashMap<TString, NCB::TCustomRandomDistributionGenerator>& randDistGen) {
if (value.GetType() == NJson::EJsonValueType::JSON_STRING) {
if (CheckIfRandomDisribution(value.GetString())) {
CB_ENSURE(
randDistGen.find(value.GetString()) != randDistGen.end(),
"Error: Reference to unknown random distribution generator"
);
const auto& rnd = randDistGen.at(value.GetString());
return NJson::TJsonValue(rnd.EvalFunc(rnd.CustomData));
}
}
return value;
}
void AssignOptionsToJson(
TConstArrayRef<TString> names,
TConstArrayRef<NJson::TJsonValue> values,
const THashMap<TString, NCB::TCustomRandomDistributionGenerator>& randDistGen,
NJson::TJsonValue* jsonValues) {
CB_ENSURE(names.size() == values.size(), "Error: names and values should have same size");
for (size_t i : xrange(names.size())) {
(*jsonValues)[names[i]] = GetRandomValueIfNeeded(values[i], randDistGen);
}
}
NCB::TTrainingDataProviders PrepareTrainTestSplit(
NCB::TTrainingDataProviderPtr srcData,
const TTrainTestSplitParams& trainTestSplitParams,
ui64 cpuUsedRamLimit,
NPar::ILocalExecutor* localExecutor) {
CB_ENSURE(
srcData->ObjectsData->GetOrder() != NCB::EObjectsOrder::Ordered,
"Params search for ordered objects data is not yet implemented"
);
NCB::TArraySubsetIndexing<ui32> trainIndices;
NCB::TArraySubsetIndexing<ui32> testIndices;
if (trainTestSplitParams.Stratified) {
NCB::TMaybeData<TConstArrayRef<float>> maybeTarget
= srcData->TargetData->GetOneDimensionalTarget();
CB_ENSURE(maybeTarget, "Cannot do stratified split: Target data is unavailable");
NCB::StratifiedTrainTestSplit(
*srcData->ObjectsGrouping,
*maybeTarget,
trainTestSplitParams.TrainPart,
&trainIndices,
&testIndices
);
} else {
TrainTestSplit(
*srcData->ObjectsGrouping,
trainTestSplitParams.TrainPart,
&trainIndices,
&testIndices
);
}
return NCB::CreateTrainTestSubsets<NCB::TTrainingDataProviders>(
srcData,
std::move(trainIndices),
std::move(testIndices),
cpuUsedRamLimit,
localExecutor
);
}
bool TryCheckParamType(
const TString& paramName,
const TSet<NJson::EJsonValueType>& allowedTypes,
const NJson::TJsonValue& gridJsonValues) {
if (!gridJsonValues.GetMap().contains(paramName)) {
return false;
}
const auto& jsonValues = gridJsonValues.GetMap().at(paramName);
for (const auto& value : jsonValues.GetArray()) {
const auto type = value.GetType();
if (allowedTypes.find(type) != allowedTypes.end()) {
continue;
}
if (type == NJson::EJsonValueType::JSON_STRING && CheckIfRandomDisribution(value.GetString())) {
continue;
}
ythrow TCatBoostException() << "Can't parse parameter \"" << paramName
<< "\" with value: " << value;
}
return true;
}
template <class T, typename Func>
void FindAndExtractParam(
const TVector<TString>& paramAliases,
const NCatboostOptions::TOption<T>& option,
const TSet<NJson::EJsonValueType>& allowedTypes,
const Func& typeCaster,
bool* isInGrid,
TString* exactParamName,
TDeque<NJson::TJsonValue>* values,
NJson::TJsonValue* gridJsonValues,
NJson::TJsonValue* modelJsonParams) {
for (const auto& paramName : paramAliases) {
*exactParamName = paramName;
*isInGrid = TryCheckParamType(
*exactParamName,
allowedTypes,
*gridJsonValues
);
if (*isInGrid) {
break;
}
}
if (*isInGrid) {
*values = (*gridJsonValues)[*exactParamName].GetArray();
gridJsonValues->EraseValue(*exactParamName);
modelJsonParams->EraseValue(*exactParamName);
} else {
values->push_back(
NJson::TJsonValue(
typeCaster(option.Get())
)
);
}
}
void FindAndExtractGridQuantizationParams(
const NCatboostOptions::TCatBoostOptions& catBoostOptions,
TDeque<NJson::TJsonValue>* borderMaxCounts,
bool* isBordersCountInGrid,
TString* borderCountsParamName,
TDeque<NJson::TJsonValue>* borderTypes,
bool* isBorderTypeInGrid,
TString* borderTypesParamName,
TDeque<NJson::TJsonValue>* nanModes,
bool* isNanModeInGrid,
TString* nanModesParamName,
NJson::TJsonValue* gridJsonValues,
NJson::TJsonValue* modelJsonParams) {
FindAndExtractParam(
BorderCountParamAliaces,
catBoostOptions.DataProcessingOptions->FloatFeaturesBinarization.Get().BorderCount,
{
NJson::EJsonValueType::JSON_INTEGER,
NJson::EJsonValueType::JSON_UINTEGER,
NJson::EJsonValueType::JSON_DOUBLE
},
[](ui32 value){ return value; },
isBordersCountInGrid,
borderCountsParamName,
borderMaxCounts,
gridJsonValues,
modelJsonParams
);
FindAndExtractParam(
BorderTypeParamAliaces,
catBoostOptions.DataProcessingOptions->FloatFeaturesBinarization.Get().BorderSelectionType,
{NJson::EJsonValueType::JSON_STRING},
[](EBorderSelectionType value){ return ToString(value); },
isBorderTypeInGrid,
borderTypesParamName,
borderTypes,
gridJsonValues,
modelJsonParams
);
FindAndExtractParam(
NanModeParamAliaces,
catBoostOptions.DataProcessingOptions->FloatFeaturesBinarization.Get().NanMode,
{NJson::EJsonValueType::JSON_STRING},
[](ENanMode value){ return ToString(value); },
isNanModeInGrid,
nanModesParamName,
nanModes,
gridJsonValues,
modelJsonParams
);
}
bool QuantizeDataIfNeeded(
bool allowWriteFiles,
const TString& tmpDir,
NCB::TFeaturesLayoutPtr featuresLayout,
NCB::TQuantizedFeaturesInfoPtr quantizedFeaturesInfo,
NCB::TDataProviderPtr data,
const TQuantizationParamsInfo& oldQuantizedParamsInfo,
const TQuantizationParamsInfo& newQuantizedParamsInfo,
TLabelConverter* labelConverter,
NPar::ILocalExecutor* localExecutor,
TRestorableFastRng64* rand,
NCatboostOptions::TCatBoostOptions* catBoostOptions,
NCB::TTrainingDataProviderPtr* result) {
if (oldQuantizedParamsInfo.BinsCount != newQuantizedParamsInfo.BinsCount ||
oldQuantizedParamsInfo.BorderType != newQuantizedParamsInfo.BorderType ||
oldQuantizedParamsInfo.NanMode != newQuantizedParamsInfo.NanMode)
{
NCatboostOptions::TBinarizationOptions commonFloatFeaturesBinarization(
newQuantizedParamsInfo.BorderType,
newQuantizedParamsInfo.BinsCount,
newQuantizedParamsInfo.NanMode
);
TVector<ui32> ignoredFeatureNums; // TODO(ilikepugs): MLTOOLS-3838
TMaybe<float> targetBorder = catBoostOptions->DataProcessingOptions->TargetBorder;
quantizedFeaturesInfo = MakeIntrusive<NCB::TQuantizedFeaturesInfo>(
*(featuresLayout.Get()),
MakeConstArrayRef(ignoredFeatureNums),
commonFloatFeaturesBinarization,
/*perFloatFeatureQuantization*/TMap<ui32, NCatboostOptions::TBinarizationOptions>(),
/*floatFeaturesAllowNansInTestOnly*/true
);
// Quantizing training data
*result = GetTrainingData(
data,
/*dataCanBeEmpty*/ false,
/*isLearnData*/ true,
/*datasetName*/ TStringBuf(),
/*bordersFile*/ Nothing(), // Already at quantizedFeaturesInfo
/*unloadCatFeaturePerfectHashFromRam*/ allowWriteFiles,
/*ensureConsecutiveLearnFeaturesDataForCpu*/ false, // data will be split afterwards anyway
tmpDir,
quantizedFeaturesInfo,
catBoostOptions,
labelConverter,
&targetBorder,
localExecutor,
rand
);
return true;
}
return false;
}
bool QuantizeAndSplitDataIfNeeded(
bool allowWriteFiles,
const TString& tmpDir,
const TTrainTestSplitParams& trainTestSplitParams,
ui64 cpuUsedRamLimit,
NCB::TFeaturesLayoutPtr featuresLayout,
NCB::TQuantizedFeaturesInfoPtr quantizedFeaturesInfo,
NCB::TDataProviderPtr data,
const TQuantizationParamsInfo& oldQuantizedParamsInfo,
const TQuantizationParamsInfo& newQuantizedParamsInfo,
TLabelConverter* labelConverter,
NPar::ILocalExecutor* localExecutor,
TRestorableFastRng64* rand,
NCatboostOptions::TCatBoostOptions* catBoostOptions,
NCB::TTrainingDataProviders* result) {
NCB::TTrainingDataProviderPtr quantizedData;
bool isNeedSplit = QuantizeDataIfNeeded(
allowWriteFiles,
tmpDir,
featuresLayout,
quantizedFeaturesInfo,
data,
oldQuantizedParamsInfo,
newQuantizedParamsInfo,
labelConverter,
localExecutor,
rand,
catBoostOptions,
&quantizedData
);
if (isNeedSplit) {
// Train-test split
*result = PrepareTrainTestSplit(
quantizedData,
trainTestSplitParams,
cpuUsedRamLimit,
localExecutor
);
return true;
}
return false;
}
void ParseGridParams(
const NCatboostOptions::TCatBoostOptions& catBoostOptions,
NJson::TJsonValue* jsonGrid,
NJson::TJsonValue* modelJsonParams,
TVector<TString>* paramNames,
TVector<TDeque<NJson::TJsonValue>>* paramPossibleValues,
TGeneralQuatizationParamsInfo* generalQuantizeParamsInfo) {
paramPossibleValues->resize(3);
FindAndExtractGridQuantizationParams(
catBoostOptions,
&(*paramPossibleValues)[0],
&generalQuantizeParamsInfo->IsBordersCountInGrid,
&generalQuantizeParamsInfo->BordersCountParamName,
&(*paramPossibleValues)[1],
&generalQuantizeParamsInfo->IsBorderTypeInGrid,
&generalQuantizeParamsInfo->BorderTypeParamName,
&(*paramPossibleValues)[2],
&generalQuantizeParamsInfo->IsNanModeInGrid,
&generalQuantizeParamsInfo->NanModeParamName,
jsonGrid,
modelJsonParams
);
for (const auto& set : jsonGrid->GetMap()) {
paramNames->push_back(set.first);
paramPossibleValues->resize(paramPossibleValues->size() + 1);
CB_ENSURE(set.second.GetArray().size() > 0, "Error: an empty set of values for parameter " + paramNames->back());
for (auto& value : set.second.GetArray()) {
(*paramPossibleValues)[paramPossibleValues->size() - 1].push_back(value);
}
}
}
void SetGridParamsToBestOptionValues(
const TGridParamsInfo & gridParams,
NCB::TBestOptionValuesWithCvResult* namedOptionsCollection) {
namedOptionsCollection->SetOptionsFromJson(gridParams.OthersParamsSet.GetMap(), gridParams.GridParamNames);
// Adding quantization params if needed
if (gridParams.QuantizationParamsSet.GeneralInfo.IsBordersCountInGrid) {
const TString& paramName = gridParams.QuantizationParamsSet.GeneralInfo.BordersCountParamName;
namedOptionsCollection->IntOptions[paramName] = gridParams.QuantizationParamsSet.BinsCount;
}
if (gridParams.QuantizationParamsSet.GeneralInfo.IsBorderTypeInGrid) {
const TString& paramName = gridParams.QuantizationParamsSet.GeneralInfo.BorderTypeParamName;
namedOptionsCollection->StringOptions[paramName] = ToString(gridParams.QuantizationParamsSet.BorderType);
}
if (gridParams.QuantizationParamsSet.GeneralInfo.IsNanModeInGrid) {
const TString& paramName = gridParams.QuantizationParamsSet.GeneralInfo.NanModeParamName;
namedOptionsCollection->StringOptions[paramName] = ToString(gridParams.QuantizationParamsSet.NanMode);
}
}
int GetSignForMetricMinimization(const THolder<IMetric>& metric) {
EMetricBestValue metricValueType;
metric->GetBestValue(&metricValueType, nullptr); // Choosing best params only by first metric
int metricSign;
if (metricValueType == EMetricBestValue::Min) {
metricSign = 1;
} else if (metricValueType == EMetricBestValue::Max) {
metricSign = -1;
} else {
CB_ENSURE(false, "Error: metric for grid search must be minimized or maximized");
}
return metricSign;
}
bool SetBestParamsAndUpdateMetricValueIfNeeded(
double metricValue,
const TVector<THolder<IMetric>>& metrics,
const TQuantizationParamsInfo& quantizationParamsSet,
const NJson::TJsonValue& modelParamsToBeTried,
const TVector<TString>& paramNames,
NCB::TQuantizedFeaturesInfoPtr quantizedFeaturesInfo,
TGridParamsInfo* bestGridParams,
double* bestParamsSetMetricValue) {
int metricSign = GetSignForMetricMinimization(metrics[0]);
if (metricSign * metricValue < *bestParamsSetMetricValue * metricSign) {
*bestParamsSetMetricValue = metricValue;
bestGridParams->QuantizationParamsSet = quantizationParamsSet;
bestGridParams->OthersParamsSet = modelParamsToBeTried;
bestGridParams->QuantizedFeatureInfo = quantizedFeaturesInfo;
bestGridParams->GridParamNames = paramNames;
return true;
}
return false;
}
static TString GetNamesPrefix(ui32 foldIdx) {
return "fold_" + ToString(foldIdx) + "_";
}
static void InitializeFilesLoggers(
const TVector<THolder<IMetric>>& metrics,
const TOutputFiles& outputFiles,
const int iterationCount,
const ELaunchMode launchMode,
const int foldCountOrTestSize,
const TString& parametersToken,
TLogger* logger
) {
TVector<TString> learnSetNames;
TVector<TString> testSetNames;
switch (launchMode) {
case ELaunchMode::CV: {
for (auto foldIdx : xrange(foldCountOrTestSize)) {
learnSetNames.push_back("fold_" + ToString(foldIdx) + "_learn");
testSetNames.push_back("fold_" + ToString(foldIdx) + "_test");
}
break;
}
case ELaunchMode::Train: {
const auto& learnToken = GetTrainModelLearnToken();
const auto& testTokens = GetTrainModelTestTokens(foldCountOrTestSize);
learnSetNames = { outputFiles.NamesPrefix + learnToken };
for (int testIdx = 0; testIdx < testTokens.ysize(); ++testIdx) {
testSetNames.push_back({ outputFiles.NamesPrefix + testTokens[testIdx] });
}
break;
}
default: CB_ENSURE(false, "unexpected launchMode" << launchMode);
}
AddFileLoggers(
false,
outputFiles.LearnErrorLogFile,
outputFiles.TestErrorLogFile,
outputFiles.TimeLeftLogFile,
outputFiles.JsonLogFile,
outputFiles.ProfileLogFile,
outputFiles.TrainDir,
GetJsonMeta(
iterationCount,
outputFiles.ExperimentName,
GetConstPointers(metrics),
learnSetNames,
testSetNames,
parametersToken,
launchMode),
/*metric period*/ 1,
logger
);
}
static void LogTrainTest(
const TString& lossDescription,
TOneInterationLogger& oneIterLogger,
const TMaybe<double> bestLearnResult,
const double bestTestResult,
const TString& learnToken,
const TString& testToken,
bool isMainMetric) {
if (bestLearnResult.Defined()) {
oneIterLogger.OutputMetric(
learnToken,
TMetricEvalResult(
lossDescription,
*bestLearnResult,
isMainMetric
)
);
}
oneIterLogger.OutputMetric(
testToken,
TMetricEvalResult(
lossDescription,
bestTestResult,
isMainMetric
)
);
}
static void LogParameters(
const TVector<TString>& paramNames,
TConstArrayRef<NJson::TJsonValue> paramsSet,
const TString& parametersToken,
const TGeneralQuatizationParamsInfo& generalQuantizeParamsInfo,
TOneInterationLogger& oneIterLogger) {
NJson::TJsonValue jsonParams;
// paramsSet: {border_count, feature_border_type, nan_mode, [others]}
if (generalQuantizeParamsInfo.IsBordersCountInGrid) {
jsonParams.InsertValue(generalQuantizeParamsInfo.BordersCountParamName, paramsSet[0]);
}
if (generalQuantizeParamsInfo.IsBorderTypeInGrid) {
jsonParams.InsertValue(generalQuantizeParamsInfo.BorderTypeParamName, paramsSet[1]);
}
if (generalQuantizeParamsInfo.IsNanModeInGrid) {
jsonParams.InsertValue(generalQuantizeParamsInfo.NanModeParamName, paramsSet[2]);
}
for (size_t idx = IndexOfFirstTrainingParameter; idx < paramsSet.size(); ++idx) {
const auto key = paramNames[idx - IndexOfFirstTrainingParameter];
jsonParams.InsertValue(key, paramsSet[idx]);
}
oneIterLogger.OutputParameters(parametersToken, jsonParams);
}
bool ParseJsonParams(
const NCB::TDataMetaInfo& metaInfo,
const NJson::TJsonValue& modelParamsToBeTried,
NCatboostOptions::TCatBoostOptions *catBoostOptions,
NCatboostOptions::TOutputFilesOptions *outputFileOptions,
TString* paramsErrorMessage) {
try {
NJson::TJsonValue jsonParams;
NJson::TJsonValue outputJsonParams;
NCatboostOptions::PlainJsonToOptions(modelParamsToBeTried, &jsonParams, &outputJsonParams);
ConvertParamsToCanonicalFormat(metaInfo, &jsonParams);
*catBoostOptions = NCatboostOptions::LoadOptions(jsonParams);
outputFileOptions->Load(outputJsonParams);
return true;
} catch (const TCatBoostException& exception) {
*paramsErrorMessage = ToString(exception.what());
return false;
}
}
double TuneHyperparamsCV(
const TVector<TString>& paramNames,
const TMaybe<TCustomObjectiveDescriptor>& objectiveDescriptor,
const TMaybe<TCustomMetricDescriptor>& evalMetricDescriptor,
const TCrossValidationParams& cvParams,
const TGeneralQuatizationParamsInfo& generalQuantizeParamsInfo,
ui64 cpuUsedRamLimit,
NCB::TDataProviderPtr data,
TProductIteratorBase<TDeque<NJson::TJsonValue>, NJson::TJsonValue>* gridIterator,
NJson::TJsonValue* modelParamsToBeTried,
TGridParamsInfo* bestGridParams,
TVector<TCVResult>* bestCvResult,
NPar::ILocalExecutor* localExecutor,
int verbose,
const THashMap<TString, NCB::TCustomRandomDistributionGenerator>& randDistGenerators = {}) {
TRestorableFastRng64 rand(cvParams.PartitionRandSeed);
if (cvParams.Shuffle) {
auto objectsGroupingSubset = NCB::Shuffle(data->ObjectsGrouping, 1, &rand);
data = data->GetSubset(objectsGroupingSubset, cpuUsedRamLimit, localExecutor);
}
TSetLogging inThisScope(ELoggingLevel::Debug);
TLogger logger;
const auto parametersToken = GetParametersToken();
TString searchToken = "loss";
AddConsoleLogger(
searchToken,
{},
/*hasTrain=*/true,
verbose,
gridIterator->GetTotalElementsCount(),
&logger
);
double bestParamsSetMetricValue = 0;
// Other parameters
TQuantizationParamsInfo lastQuantizationParamsSet;
TLabelConverter labelConverter;
int iterationIdx = 0;
int bestIterationIdx = 0;
TProfileInfo profile(gridIterator->GetTotalElementsCount());
TConstArrayRef<NJson::TJsonValue> paramsSet;
TString paramsErrorString;
bool foundValidParams = false;
while (gridIterator->Next(¶msSet)) {
profile.StartIterationBlock();
// paramsSet: {border_count, feature_border_type, nan_mode, [others]}
TQuantizationParamsInfo quantizationParamsSet;
quantizationParamsSet.BinsCount = GetRandomValueIfNeeded(paramsSet[0], randDistGenerators).GetInteger();
quantizationParamsSet.BorderType = FromString<EBorderSelectionType>(paramsSet[1].GetString());
quantizationParamsSet.NanMode = FromString<ENanMode>(paramsSet[2].GetString());
AssignOptionsToJson(
TConstArrayRef<TString>(paramNames),
TConstArrayRef<NJson::TJsonValue>(
paramsSet.begin() + IndexOfFirstTrainingParameter,
paramsSet.end()
), // Ignoring quantization params
randDistGenerators,
modelParamsToBeTried
);
NCatboostOptions::TCatBoostOptions catBoostOptions(ETaskType::CPU);
NCatboostOptions::TOutputFilesOptions outputFileOptions;
bool areParamsValid = ParseJsonParams(
data.Get()->MetaInfo,
*modelParamsToBeTried,
&catBoostOptions,
&outputFileOptions,
¶msErrorString
);
if (!areParamsValid) {
continue;
}
foundValidParams = true;
TString tmpDir;
if (outputFileOptions.AllowWriteFiles()) {
NCB::NPrivate::CreateTrainDirWithTmpDirIfNotExist(outputFileOptions.GetTrainDir(), &tmpDir);
}
InitializeEvalMetricIfNotSet(catBoostOptions.MetricOptions->ObjectiveMetric, &catBoostOptions.MetricOptions->EvalMetric);
NCB::TFeaturesLayoutPtr featuresLayout = data->MetaInfo.FeaturesLayout;
NCB::TQuantizedFeaturesInfoPtr quantizedFeaturesInfo;
TMetricsAndTimeLeftHistory metricsAndTimeHistory;
TVector<TCVResult> cvResult;
{
TSetLogging inThisScope(catBoostOptions.LoggingLevel);
lastQuantizationParamsSet = quantizationParamsSet;
CrossValidate(
*modelParamsToBeTried,
quantizedFeaturesInfo,
objectiveDescriptor,
evalMetricDescriptor,
labelConverter,
data,
cvParams,
localExecutor,
&cvResult);
}
ui32 approxDimension = NCB::GetApproxDimension(catBoostOptions, labelConverter, data->RawTargetData.GetTargetDimension());
const TVector<THolder<IMetric>> metrics = CreateMetrics(
catBoostOptions.MetricOptions,
evalMetricDescriptor,
approxDimension,
data->MetaInfo.HasWeights
);
double bestMetricValue = cvResult[0].AverageTest.back(); //[testId][lossDescription]
if (iterationIdx == 0) {
// We guarantee to update the parameters on the first iteration
bestParamsSetMetricValue = cvResult[0].AverageTest.back() + GetSignForMetricMinimization(metrics[0]);
if (outputFileOptions.AllowWriteFiles()) {
// Initialize Files Loggers
TString namesPrefix = "fold_0_";
TOutputFiles outputFiles(outputFileOptions, namesPrefix);
InitializeFilesLoggers(
metrics,
outputFiles,
gridIterator->GetTotalElementsCount(),
ELaunchMode::CV,
cvParams.FoldCount,
parametersToken,
&logger
);
}
}
bool isUpdateBest = SetBestParamsAndUpdateMetricValueIfNeeded(
bestMetricValue,
metrics,
quantizationParamsSet,
*modelParamsToBeTried,
paramNames,
quantizedFeaturesInfo,
bestGridParams,
&bestParamsSetMetricValue);
if (isUpdateBest) {
bestIterationIdx = iterationIdx;
*bestCvResult = cvResult;
}
const TString& lossDescription = metrics[0]->GetDescription();
TOneInterationLogger oneIterLogger(logger);
oneIterLogger.OutputMetric(
searchToken,
TMetricEvalResult(
lossDescription,
bestMetricValue,
bestParamsSetMetricValue,
bestIterationIdx,
true
)
);
if (outputFileOptions.AllowWriteFiles()) {
//log metrics
const auto& skipMetricOnTrain = GetSkipMetricOnTrain(metrics);
for (auto foldIdx : xrange((size_t)cvParams.FoldCount)) {
for (auto metricIdx : xrange(metrics.size())) {
LogTrainTest(
metrics[metricIdx]->GetDescription(),
oneIterLogger,
skipMetricOnTrain[metricIdx] ? Nothing() :
MakeMaybe<double>(cvResult[metricIdx].LastTrainEvalMetric[foldIdx]),
cvResult[metricIdx].LastTestEvalMetric[foldIdx],
GetNamesPrefix(foldIdx) + "learn",
GetNamesPrefix(foldIdx) + "test",
metricIdx == 0
);
}
}
//log parameters
LogParameters(
paramNames,
paramsSet,
parametersToken,
generalQuantizeParamsInfo,
oneIterLogger
);
}
profile.FinishIterationBlock(1);
oneIterLogger.OutputProfile(profile.GetProfileResults());
iterationIdx++;
}
if (!foundValidParams) {
ythrow TCatBoostException() << "All params in grid were invalid, last error message: " << paramsErrorString;
}
return bestParamsSetMetricValue;
}
double TuneHyperparamsTrainTest(
const TVector<TString>& paramNames,
const TMaybe<TCustomObjectiveDescriptor>& objectiveDescriptor,
const TMaybe<TCustomMetricDescriptor>& evalMetricDescriptor,
const TTrainTestSplitParams& trainTestSplitParams,
const TGeneralQuatizationParamsInfo& generalQuantizeParamsInfo,
ui64 cpuUsedRamLimit,
NCB::TDataProviderPtr data,
TProductIteratorBase<TDeque<NJson::TJsonValue>, NJson::TJsonValue>* gridIterator,
NJson::TJsonValue* modelParamsToBeTried,
TGridParamsInfo * bestGridParams,
TMetricsAndTimeLeftHistory* trainTestResult,
NPar::ILocalExecutor* localExecutor,
int verbose,
const THashMap<TString, NCB::TCustomRandomDistributionGenerator>& randDistGenerators = {}) {
TRestorableFastRng64 rand(trainTestSplitParams.PartitionRandSeed);
if (trainTestSplitParams.Shuffle) {
auto objectsGroupingSubset = NCB::Shuffle(data->ObjectsGrouping, 1, &rand);
data = data->GetSubset(objectsGroupingSubset, cpuUsedRamLimit, localExecutor);
}
TSetLogging inThisScope(ELoggingLevel::Debug);
TLogger logger;
TString searchToken = "loss";
const auto parametersToken = GetParametersToken();
AddConsoleLogger(
searchToken,
{},
/*hasTrain=*/true,
verbose,
gridIterator->GetTotalElementsCount(),
&logger
);
double bestParamsSetMetricValue = 0;
// Other parameters
NCB::TTrainingDataProviders trainTestData;
TQuantizationParamsInfo lastQuantizationParamsSet;
TLabelConverter labelConverter;
int iterationIdx = 0;
int bestIterationIdx = 0;
TProfileInfo profile(gridIterator->GetTotalElementsCount());
TConstArrayRef<NJson::TJsonValue> paramsSet;
bool foundValidParams = false;
TString paramsErrorString;
while (gridIterator->Next(¶msSet)) {
profile.StartIterationBlock();
// paramsSet: {border_count, feature_border_type, nan_mode, [others]}
TQuantizationParamsInfo quantizationParamsSet;
quantizationParamsSet.BinsCount = GetRandomValueIfNeeded(paramsSet[0], randDistGenerators).GetInteger();
quantizationParamsSet.BorderType = FromString<EBorderSelectionType>(paramsSet[1].GetString());
quantizationParamsSet.NanMode = FromString<ENanMode>(paramsSet[2].GetString());
AssignOptionsToJson(
TConstArrayRef<TString>(paramNames),
TConstArrayRef<NJson::TJsonValue>(
paramsSet.begin() + IndexOfFirstTrainingParameter,
paramsSet.end()
), // Ignoring quantization params
randDistGenerators,
modelParamsToBeTried
);
NCatboostOptions::TCatBoostOptions catBoostOptions(ETaskType::CPU);
NCatboostOptions::TOutputFilesOptions outputFileOptions;
bool areParamsValid = ParseJsonParams(
data.Get()->MetaInfo,
*modelParamsToBeTried,
&catBoostOptions,
&outputFileOptions,
¶msErrorString
);
if (!areParamsValid) {
continue;
}
foundValidParams = true;
static const bool allowWriteFiles = outputFileOptions.AllowWriteFiles();
TString tmpDir;
if (allowWriteFiles) {
NCB::NPrivate::CreateTrainDirWithTmpDirIfNotExist(outputFileOptions.GetTrainDir(), &tmpDir);
}
InitializeEvalMetricIfNotSet(catBoostOptions.MetricOptions->ObjectiveMetric, &catBoostOptions.MetricOptions->EvalMetric);
UpdateSampleRateOption(data->GetObjectCount(), &catBoostOptions);
NCB::TFeaturesLayoutPtr featuresLayout = data->MetaInfo.FeaturesLayout;
NCB::TQuantizedFeaturesInfoPtr quantizedFeaturesInfo;
TMetricsAndTimeLeftHistory metricsAndTimeHistory;
{
TSetLogging inThisScope(catBoostOptions.LoggingLevel);
QuantizeAndSplitDataIfNeeded(
allowWriteFiles,
tmpDir,
trainTestSplitParams,
cpuUsedRamLimit,
featuresLayout,
quantizedFeaturesInfo,
data,
lastQuantizationParamsSet,
quantizationParamsSet,
&labelConverter,
localExecutor,
&rand,
&catBoostOptions,
&trainTestData
);
lastQuantizationParamsSet = quantizationParamsSet;
THolder<IModelTrainer> modelTrainerHolder = THolder<IModelTrainer>(TTrainerFactory::Construct(catBoostOptions.GetTaskType()));
TEvalResult evalRes;
TTrainModelInternalOptions internalOptions;
internalOptions.CalcMetricsOnly = true;
internalOptions.ForceCalcEvalMetricOnEveryIteration = false;
internalOptions.OffsetMetricPeriodByInitModelSize = true;
outputFileOptions.SetAllowWriteFiles(false);
const auto defaultTrainingCallbacks = MakeHolder<ITrainingCallbacks>();
const auto defaultCustomCallbacks = MakeHolder<TCustomCallbacks>(Nothing());
// Training model
modelTrainerHolder->TrainModel(
internalOptions,
catBoostOptions,
outputFileOptions,
objectiveDescriptor,
evalMetricDescriptor,
trainTestData,
/*precomputedSingleOnlineCtrDataForSingleFold*/ Nothing(),
labelConverter,
defaultTrainingCallbacks.Get(), // TODO(ilikepugs): MLTOOLS-3540
defaultCustomCallbacks.Get(),
/*initModel*/ Nothing(),
/*initLearnProgress*/ nullptr,
/*initModelApplyCompatiblePools*/ NCB::TDataProviders(),
localExecutor,
&rand,
/*dstModel*/ nullptr,
/*evalResultPtrs*/ {&evalRes},
&metricsAndTimeHistory,
/*dstLearnProgress*/nullptr
);
}
ui32 approxDimension = NCB::GetApproxDimension(catBoostOptions, labelConverter, data->RawTargetData.GetTargetDimension());
const TVector<THolder<IMetric>> metrics = CreateMetrics(
catBoostOptions.MetricOptions,
evalMetricDescriptor,
approxDimension,
data->MetaInfo.HasWeights
);
const TString& lossDescription = metrics[0]->GetDescription();
double bestMetricValue = metricsAndTimeHistory.TestBestError[0][lossDescription]; //[testId][lossDescription]
if (iterationIdx == 0) {
// We guarantee to update the parameters on the first iteration
bestParamsSetMetricValue = bestMetricValue + GetSignForMetricMinimization(metrics[0]);
outputFileOptions.SetAllowWriteFiles(allowWriteFiles);
if (allowWriteFiles) {
// Initialize Files Loggers
TOutputFiles outputFiles(outputFileOptions, "");
InitializeFilesLoggers(
metrics,
outputFiles,
gridIterator->GetTotalElementsCount(),
ELaunchMode::Train,
trainTestData.Test.ysize(),
parametersToken,
&logger
);
}
(*trainTestResult) = metricsAndTimeHistory;
}
bool isUpdateBest = SetBestParamsAndUpdateMetricValueIfNeeded(
bestMetricValue,
metrics,
quantizationParamsSet,
*modelParamsToBeTried,
paramNames,
quantizedFeaturesInfo,
bestGridParams,
&bestParamsSetMetricValue);
if (isUpdateBest) {
bestIterationIdx = iterationIdx;
(*trainTestResult) = metricsAndTimeHistory;
}
TOneInterationLogger oneIterLogger(logger);
oneIterLogger.OutputMetric(
searchToken,
TMetricEvalResult(
lossDescription,
bestMetricValue,
bestParamsSetMetricValue,
bestIterationIdx,
true
)
);
if (allowWriteFiles) {
//log metrics
const auto& skipMetricOnTrain = GetSkipMetricOnTrain(metrics);
auto& learnErrors = metricsAndTimeHistory.LearnBestError;
auto& testErrors = metricsAndTimeHistory.TestBestError[0];
for (auto metricIdx : xrange(metrics.size())) {
const auto& lossDescription = metrics[metricIdx]->GetDescription();
LogTrainTest(
lossDescription,
oneIterLogger,
skipMetricOnTrain[metricIdx] ? Nothing() :
MakeMaybe<double>(learnErrors.at(lossDescription)),
testErrors.at(lossDescription),
"learn",
"test",
metricIdx == 0
);
}
//log parameters
LogParameters(
paramNames,
paramsSet,
parametersToken,
generalQuantizeParamsInfo,
oneIterLogger
);
}
profile.FinishIterationBlock(1);
oneIterLogger.OutputProfile(profile.GetProfileResults());
iterationIdx++;
}
if (!foundValidParams) {
ythrow TCatBoostException() << "All params in grid were invalid, last error message: " << paramsErrorString;
}
return bestParamsSetMetricValue;
}
} // anonymous namespace
namespace NCB {
void TBestOptionValuesWithCvResult::SetOptionsFromJson(
const THashMap<TString, NJson::TJsonValue>& options,
const TVector<TString>& optionsNames) {
BoolOptions.clear();
IntOptions.clear();
UIntOptions.clear();
DoubleOptions.clear();
StringOptions.clear();
ListOfDoublesOptions.clear();
for (const auto& optionName : optionsNames) {
const auto& option = options.at(optionName);
NJson::EJsonValueType type = option.GetType();
switch(type) {
case NJson::EJsonValueType::JSON_BOOLEAN: {
BoolOptions[optionName] = option.GetBoolean();
break;
}
case NJson::EJsonValueType::JSON_INTEGER: {
IntOptions[optionName] = option.GetInteger();
break;
}
case NJson::EJsonValueType::JSON_UINTEGER: {
UIntOptions[optionName] = option.GetUInteger();
break;
}
case NJson::EJsonValueType::JSON_DOUBLE: {
DoubleOptions[optionName] = option.GetDouble();
break;
}
case NJson::EJsonValueType::JSON_STRING: {
StringOptions[optionName] = option.GetString();
break;
}
case NJson::EJsonValueType::JSON_ARRAY: {
for (const auto& listElement : option.GetArray()) {
ListOfDoublesOptions[optionName].push_back(listElement.GetDouble());
}
break;
}
default: {
CB_ENSURE(false, "Error: option value should be bool, int, ui32, double, string or list of doubles");
}
}
}
}
void GridSearch(
const NJson::TJsonValue& gridJsonValues,
const NJson::TJsonValue& modelJsonParams,
const TTrainTestSplitParams& trainTestSplitParams,
const TCrossValidationParams& cvParams,
const TMaybe<TCustomObjectiveDescriptor>& objectiveDescriptor,
const TMaybe<TCustomMetricDescriptor>& evalMetricDescriptor,
TDataProviderPtr data,
TBestOptionValuesWithCvResult* bestOptionValuesWithCvResult,
TMetricsAndTimeLeftHistory* trainTestResult,
bool isSearchUsingTrainTestSplit,
bool returnCvStat,
int verbose) {
// CatBoost options
NJson::TJsonValue jsonParams;
NJson::TJsonValue outputJsonParams;
NCatboostOptions::PlainJsonToOptions(modelJsonParams, &jsonParams, &outputJsonParams);
ConvertParamsToCanonicalFormat(data.Get()->MetaInfo, &jsonParams);
NCatboostOptions::TCatBoostOptions catBoostOptions(NCatboostOptions::LoadOptions(jsonParams));
NCatboostOptions::TOutputFilesOptions outputFileOptions;
outputFileOptions.Load(outputJsonParams);
CB_ENSURE(!outputJsonParams["save_snapshot"].GetBoolean(), "Snapshots are not yet supported for GridSearchCV");
InitializeEvalMetricIfNotSet(catBoostOptions.MetricOptions->ObjectiveMetric, &catBoostOptions.MetricOptions->EvalMetric);
NPar::TLocalExecutor localExecutor;
localExecutor.RunAdditionalThreads(catBoostOptions.SystemOptions->NumThreads.Get() - 1);
TGridParamsInfo bestGridParams;
TDeque<NJson::TJsonValue> paramGrids;
if (gridJsonValues.GetType() == NJson::EJsonValueType::JSON_MAP) {
paramGrids.push_back(gridJsonValues);
} else {
paramGrids = gridJsonValues.GetArray();
}
double bestParamsSetMetricValue = Max<double>();
TVector<TCVResult> bestCvResult;
for (auto gridEnumerator : xrange(paramGrids.size())) {
auto grid = paramGrids[gridEnumerator];
// Preparing parameters for cartesian product
TVector<TDeque<NJson::TJsonValue>> paramPossibleValues; // {border_count, feature_border_type, nan_mode, ...}
TGeneralQuatizationParamsInfo generalQuantizeParamsInfo;
TQuantizationParamsInfo quantizationParamsSet;
TVector<TString> paramNames;
NJson::TJsonValue modelParamsToBeTried(modelJsonParams);
TGridParamsInfo gridParams;
ParseGridParams(
catBoostOptions,
&grid,
&modelParamsToBeTried,
¶mNames,
¶mPossibleValues,
&generalQuantizeParamsInfo
);
TCartesianProductIterator<TDeque<NJson::TJsonValue>, NJson::TJsonValue> gridIterator(paramPossibleValues);
const ui64 cpuUsedRamLimit
= ParseMemorySizeDescription(catBoostOptions.SystemOptions->CpuUsedRamLimit.Get());
double metricValue;
if (verbose && paramGrids.size() > 1) {
TSetLogging inThisScope(ELoggingLevel::Verbose);
CATBOOST_NOTICE_LOG << "Grid #" << gridEnumerator << Endl;
}
if (isSearchUsingTrainTestSplit) {
metricValue = TuneHyperparamsTrainTest(
paramNames,
objectiveDescriptor,
evalMetricDescriptor,
trainTestSplitParams,
generalQuantizeParamsInfo,
cpuUsedRamLimit,
data,
&gridIterator,
&modelParamsToBeTried,
&gridParams,
trainTestResult,
&localExecutor,
verbose
);
} else {
metricValue = TuneHyperparamsCV(
paramNames,
objectiveDescriptor,
evalMetricDescriptor,
cvParams,
generalQuantizeParamsInfo,
cpuUsedRamLimit,
data,
&gridIterator,
&modelParamsToBeTried,
&gridParams,
&bestCvResult,
&localExecutor,
verbose
);
}
if (metricValue < bestParamsSetMetricValue) {
bestGridParams = gridParams;
bestGridParams.QuantizationParamsSet.GeneralInfo = generalQuantizeParamsInfo;
SetGridParamsToBestOptionValues(bestGridParams, bestOptionValuesWithCvResult);
}
}
if (returnCvStat || isSearchUsingTrainTestSplit) {
if (isSearchUsingTrainTestSplit) {
if (verbose) {
TSetLogging inThisScope(ELoggingLevel::Verbose);
CATBOOST_NOTICE_LOG << "Estimating final quality...\n";
}
CrossValidate(
bestGridParams.OthersParamsSet,
bestGridParams.QuantizedFeatureInfo,
objectiveDescriptor,
evalMetricDescriptor,
data,
cvParams,
&(bestOptionValuesWithCvResult->CvResult)
);
} else {
bestOptionValuesWithCvResult->CvResult = bestCvResult;
}
}
}
void RandomizedSearch(
ui32 numberOfTries,
const THashMap<TString, TCustomRandomDistributionGenerator>& randDistGenerators,
const NJson::TJsonValue& gridJsonValues,
const NJson::TJsonValue& modelJsonParams,
const TTrainTestSplitParams& trainTestSplitParams,
const TCrossValidationParams& cvParams,
const TMaybe<TCustomObjectiveDescriptor>& objectiveDescriptor,
const TMaybe<TCustomMetricDescriptor>& evalMetricDescriptor,
TDataProviderPtr data,
TBestOptionValuesWithCvResult* bestOptionValuesWithCvResult,
TMetricsAndTimeLeftHistory* trainTestResult,
bool isSearchUsingTrainTestSplit,
bool returnCvStat,
int verbose) {
// CatBoost options
NJson::TJsonValue jsonParams;
NJson::TJsonValue outputJsonParams;
NCatboostOptions::PlainJsonToOptions(modelJsonParams, &jsonParams, &outputJsonParams);
ConvertParamsToCanonicalFormat(data.Get()->MetaInfo, &jsonParams);
NCatboostOptions::TCatBoostOptions catBoostOptions(NCatboostOptions::LoadOptions(jsonParams));
NCatboostOptions::TOutputFilesOptions outputFileOptions;
outputFileOptions.Load(outputJsonParams);
CB_ENSURE(!outputJsonParams["save_snapshot"].GetBoolean(), "Snapshots are not yet supported for RandomizedSearchCV");
InitializeEvalMetricIfNotSet(catBoostOptions.MetricOptions->ObjectiveMetric, &catBoostOptions.MetricOptions->EvalMetric);
NPar::TLocalExecutor localExecutor;
localExecutor.RunAdditionalThreads(catBoostOptions.SystemOptions->NumThreads.Get() - 1);
NJson::TJsonValue paramGrid;
if (gridJsonValues.GetType() == NJson::EJsonValueType::JSON_MAP) {
paramGrid = gridJsonValues;
} else {
paramGrid = gridJsonValues.GetArray()[0];
}
// Preparing parameters for cartesian product
TVector<TDeque<NJson::TJsonValue>> paramPossibleValues; // {border_count, feature_border_type, nan_mode, ...}
TGeneralQuatizationParamsInfo generalQuantizeParamsInfo;
TQuantizationParamsInfo quantizationParamsSet;
TVector<TString> paramNames;
NJson::TJsonValue modelParamsToBeTried(modelJsonParams);
ParseGridParams(
catBoostOptions,
¶mGrid,
&modelParamsToBeTried,
¶mNames,
¶mPossibleValues,
&generalQuantizeParamsInfo
);
TRandomizedProductIterator<TDeque<NJson::TJsonValue>, NJson::TJsonValue> gridIterator(
paramPossibleValues,
numberOfTries,
randDistGenerators.size() > 0
);
const ui64 cpuUsedRamLimit
= ParseMemorySizeDescription(catBoostOptions.SystemOptions->CpuUsedRamLimit.Get());
TGridParamsInfo bestGridParams;
TVector<TCVResult> cvResult;
if (isSearchUsingTrainTestSplit) {
TuneHyperparamsTrainTest(
paramNames,
objectiveDescriptor,
evalMetricDescriptor,
trainTestSplitParams,
generalQuantizeParamsInfo,
cpuUsedRamLimit,
data,
&gridIterator,
&modelParamsToBeTried,
&bestGridParams,
trainTestResult,
&localExecutor,
verbose,
randDistGenerators
);
} else {
TuneHyperparamsCV(
paramNames,
objectiveDescriptor,
evalMetricDescriptor,
cvParams,
generalQuantizeParamsInfo,
cpuUsedRamLimit,
data,
&gridIterator,
&modelParamsToBeTried,
&bestGridParams,
&cvResult,
&localExecutor,
verbose,
randDistGenerators
);
}
bestGridParams.QuantizationParamsSet.GeneralInfo = generalQuantizeParamsInfo;
SetGridParamsToBestOptionValues(bestGridParams, bestOptionValuesWithCvResult);
if (returnCvStat || isSearchUsingTrainTestSplit) {
if (isSearchUsingTrainTestSplit) {
if (verbose) {
TSetLogging inThisScope(ELoggingLevel::Verbose);
CATBOOST_NOTICE_LOG << "Estimating final quality...\n";
}
CrossValidate(
bestGridParams.OthersParamsSet,
bestGridParams.QuantizedFeatureInfo,
objectiveDescriptor,
evalMetricDescriptor,
data,
cvParams,
&(bestOptionValuesWithCvResult->CvResult)
);
} else {
bestOptionValuesWithCvResult->CvResult = cvResult;
}
}
}
}
| 28,888 |
839 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cxf.jaxrs.impl;
import javax.ws.rs.core.CacheControl;
import javax.ws.rs.core.Cookie;
import javax.ws.rs.core.EntityTag;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.NewCookie;
import javax.ws.rs.core.Response.ResponseBuilder;
import javax.ws.rs.core.UriBuilder;
import javax.ws.rs.core.Variant.VariantListBuilder;
import javax.ws.rs.ext.RuntimeDelegate;
import org.junit.Test;
import static org.junit.Assert.assertSame;
public class RuntimeDelegateImplTest {
@Test
public void testRuntimeDelegate() throws Exception {
RuntimeDelegate rd = RuntimeDelegate.getInstance();
assertSame(rd.getClass(), RuntimeDelegateImpl.class);
}
@Test
public void testCreateInstance() throws Exception {
assertSame(ResponseBuilderImpl.class,
new RuntimeDelegateImpl().
createInstance(ResponseBuilder.class).getClass());
assertSame(UriBuilderImpl.class,
new RuntimeDelegateImpl().
createInstance(UriBuilder.class).getClass());
assertSame(VariantListBuilderImpl.class,
new RuntimeDelegateImpl().
createInstance(VariantListBuilder.class).getClass());
}
@Test
public void testCreateHeaderProvider() throws Exception {
assertSame(MediaTypeHeaderProvider.class,
new RuntimeDelegateImpl().
createHeaderDelegate(MediaType.class).getClass());
assertSame(EntityTagHeaderProvider.class,
new RuntimeDelegateImpl().
createHeaderDelegate(EntityTag.class).getClass());
assertSame(CacheControlHeaderProvider.class,
new RuntimeDelegateImpl().
createHeaderDelegate(CacheControl.class).getClass());
assertSame(CookieHeaderProvider.class,
new RuntimeDelegateImpl().
createHeaderDelegate(Cookie.class).getClass());
assertSame(NewCookieHeaderProvider.class,
new RuntimeDelegateImpl().
createHeaderDelegate(NewCookie.class).getClass());
}
} | 1,137 |
5,921 | <reponame>Zilv1128/test1
import os
import sys
import numpy
numpy.random.seed(42)
with open(os.path.join(sys.argv[1], "dev-other.lst"), "r") as f:
data = [line.strip() for line in f]
for n, seed_val in enumerate([0, 2, 3, 4, 5]):
numpy.random.seed(42 + seed_val)
data = numpy.random.permutation(data)
with open("tts_shuffled_{}.txt".format(n), "w") as fout:
for line in data:
line_new = line.split(" ")
new_tr = numpy.random.permutation(line_new[3:])
fout.write(line + "\n")
fout.write("{}\n".format(" ".join(new_tr)))
| 286 |
346 | <gh_stars>100-1000
/**
* This defines the loading screen to use for specific sectors. If the sector loading screen is not defined here, the game falls back to rules to determine loading screen.
*
* Fields:
* - sector: The short name of the sector, e.g. A9, B10
* - sectorLevel: The Z-level if sector is underground. Defaults to 0, meaning that the sector is on the surface
* - night: The internal name of the loading screen to use during night time (see loading-screens.json)
* - day: The internal name of the loading screen to use during day time
*/
[
{"sector": "A2", "night": "NIGHTCHITZENA", "day": "DAYCHITZENA"},
{"sector": "B2", "night": "NIGHTCHITZENA" , "day": "DAYCHITZENA"},
{"sector": "A9", "night": "NIGHTOMERTA", "day": "DAYOMERTA"},
{"sector": "A10", "night": "NIGHTOMERTA", "day": "DAYOMERTA"},
{"sector": "P3", "night": "NIGHTPALACE", "day": "DAYPALACE"},
// Military installations
{"sector": "H13", "night": "NIGHTMILITARY", "day": "DAYMILITARY"},
{"sector": "H14", "night": "NIGHTMILITARY", "day": "DAYMILITARY"},
{"sector": "I13", "night": "NIGHTMILITARY", "day": "DAYMILITARY"},
{"sector": "N7", "night": "NIGHTMILITARY", "day": "DAYMILITARY"},
{"sector": "K4", "night": "NIGHTLAB", "day": "DAYLAB"},
{"sector": "J9", "night": "NIGHTPRISON", "day": "DAYPRISON"},
{"sector": "F8", "night": "NIGHTHOSPITAL", "day": "DAYHOSPITAL"},
{"sector": "B13", "night": "NIGHTAIRPORT", "day": "DAYAIRPORT"},
{"sector": "N3", "night": "NIGHTAIRPORT", "day": "DAYAIRPORT"},
{"sector": "L11", "night": "NIGHTBALIME", "day": "DAYBALIME"},
{"sector": "L12", "night": "NIGHTBALIME", "day": "DAYBALIME"},
{"sector": "H3", "night": "NIGHTMINE", "day": "DAYMINE"},
{"sector": "H8", "night": "NIGHTMINE", "day": "DAYMINE"},
{"sector": "D4", "night": "NIGHTMINE", "day": "DAYMINE"},
{"sector": "A10", "sectorLevel": 1, "night": "BASEMENT", "day": "BASEMENT"}, // Miguel's basement
{"sector": "I13", "sectorLevel": 1, "night": "BASEMENT", "day": "BASEMENT"}, // Alma prison dungeon
{"sector": "J9", "sectorLevel": 1, "night": "BASEMENT", "day": "BASEMENT"}, // Tixa prison dungeon
{"sector": "K4", "sectorLevel": 1, "night": "BASEMENT", "day": "BASEMENT"}, // Orta weapons plant
{"sector": "O3", "sectorLevel": 1, "night": "BASEMENT", "day": "BASEMENT"}, // Meduna
{"sector": "P3", "sectorLevel": 1, "night": "BASEMENT", "day": "BASEMENT"} // Meduna
] | 1,060 |
1,863 | //
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2018 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PHYSICS_NX_PRUNING_STRUCTURE
#define PX_PHYSICS_NX_PRUNING_STRUCTURE
/** \addtogroup physics
@{ */
#include "PxPhysXConfig.h"
#include "common/PxBase.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief A precomputed pruning structure to accelerate scene queries against newly added actors.
The pruning structure can be provided to #PxScene:: addActors() in which case it will get merged
directly into the scene query optimization AABB tree, thus leading to improved performance when
doing queries against the newly added actors. This applies to both static and dynamic actors.
\note PxPruningStructure objects can be added to a collection and get serialized.
\note Adding a PxPruningStructure object to a collection will also add the actors that were used to build the pruning structure.
\note PxPruningStructure must be released before its rigid actors.
\note PxRigidBody objects can be in one PxPruningStructure only.
\note Changing the bounds of PxRigidBody objects assigned to a pruning structure that has not been added to a scene yet will
invalidate the pruning structure. Same happens if shape scene query flags change or shape gets removed from an actor.
@see PxScene::addActors PxCollection
*/
class PxPruningStructure : public PxBase
{
public:
/**
\brief Release this object.
*/
virtual void release() = 0;
/**
\brief Retrieve rigid actors in the pruning structure.
You can retrieve the number of rigid actor pointers by calling #getNbRigidActors()
\param[out] userBuffer The buffer to store the actor pointers.
\param[in] bufferSize Size of provided user buffer.
\param[in] startIndex Index of first actor pointer to be retrieved
\return Number of rigid actor pointers written to the buffer.
@see PxRigidActor
*/
virtual PxU32 getRigidActors(PxRigidActor** userBuffer, PxU32 bufferSize, PxU32 startIndex=0) const = 0;
/**
\brief Returns the number of rigid actors in the pruning structure.
You can use #getRigidActors() to retrieve the rigid actor pointers.
\return Number of rigid actors in the pruning structure.
@see PxRigidActor
*/
virtual PxU32 getNbRigidActors() const = 0;
virtual const char* getConcreteTypeName() const { return "PxPruningStructure"; }
protected:
PX_INLINE PxPruningStructure(PxType concreteType, PxBaseFlags baseFlags) : PxBase(concreteType, baseFlags) {}
PX_INLINE PxPruningStructure(PxBaseFlags baseFlags) : PxBase(baseFlags) {}
virtual ~PxPruningStructure() {}
virtual bool isKindOf(const char* name) const { return !::strcmp("PxPruningStructure", name) || PxBase::isKindOf(name); }
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif // PX_PHYSICS_NX_PRUNING_STRUCTURE
| 1,362 |
488 | <reponame>maurizioabba/rose<gh_stars>100-1000
#include <mpi.h>
#include <stdlib.h>
int SOURCE_DETERMINISM = 1;
int TAG_DETERMINISM = 1;
int FUNCTION_DETERMINISM = 1;
int foo(void *, int, MPI_Datatype, int, int, MPI_Comm, MPI_Status *);
int main(int argc, char **argv) {
int a;
// Test calling functions that are not MPI routines, but have similar arguments in the same positions
foo(&a, 1, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, NULL);
MPI_Recv(&a, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, NULL);
return 0;
}
| 229 |
369 | <reponame>Shiva-D/rtos-course<gh_stars>100-1000
/**
* \file
*
* \brief Component description for WDT
*
* Copyright (c) 2013 Atmel Corporation. All rights reserved.
*
* \asf_license_start
*
* \page License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. The name of Atmel may not be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* 4. This software may only be redistributed and used in connection with an
* Atmel microcontroller product.
*
* THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
* EXPRESSLY AND SPECIFICALLY DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* \asf_license_stop
*
*/
#ifndef _SAMD20_WDT_COMPONENT_
#define _SAMD20_WDT_COMPONENT_
/* ========================================================================== */
/** SOFTWARE API DEFINITION FOR WDT */
/* ========================================================================== */
/** \addtogroup SAMD20_WDT Watchdog Timer */
/*@{*/
#define REV_WDT 0x200
/* -------- WDT_CTRL : (WDT Offset: 0x0) (R/W 8) Control Register -------- */
#if !(defined(__ASSEMBLY__) || defined(__IAR_SYSTEMS_ASM__))
typedef union {
struct {
uint8_t :1; /*!< bit: 0 Reserved */
uint8_t ENABLE:1; /*!< bit: 1 Enable */
uint8_t WEN:1; /*!< bit: 2 Watchdog Timer Window Mode Enable */
uint8_t :4; /*!< bit: 3.. 6 Reserved */
uint8_t ALWAYSON:1; /*!< bit: 7 Watchdog Timer Always-On Enable */
} bit; /*!< Structure used for bit access */
uint8_t reg; /*!< Type used for register access */
} WDT_CTRL_Type;
#endif /* !(defined(__ASSEMBLY__) || defined(__IAR_SYSTEMS_ASM__)) */
#define WDT_CTRL_OFFSET 0x0 /**< \brief (WDT_CTRL offset) Control Register */
#define WDT_CTRL_RESETVALUE 0x00 /**< \brief (WDT_CTRL reset_value) Control Register */
#define WDT_CTRL_ENABLE_Pos 1 /**< \brief (WDT_CTRL) Enable */
#define WDT_CTRL_ENABLE (0x1u << WDT_CTRL_ENABLE_Pos)
#define WDT_CTRL_WEN_Pos 2 /**< \brief (WDT_CTRL) Watchdog Timer Window Mode Enable */
#define WDT_CTRL_WEN (0x1u << WDT_CTRL_WEN_Pos)
#define WDT_CTRL_ALWAYSON_Pos 7 /**< \brief (WDT_CTRL) Watchdog Timer Always-On Enable */
#define WDT_CTRL_ALWAYSON (0x1u << WDT_CTRL_ALWAYSON_Pos)
#define WDT_CTRL_MASK 0x86u /**< \brief (WDT_CTRL) MASK Register */
/* -------- WDT_CONFIG : (WDT Offset: 0x1) (R/W 8) Configuration Register -------- */
#if !(defined(__ASSEMBLY__) || defined(__IAR_SYSTEMS_ASM__))
typedef union {
struct {
uint8_t PER:4; /*!< bit: 0.. 3 Timeout Period */
uint8_t WINDOW:4; /*!< bit: 4.. 7 Watchdow Timer Window Timeout Period */
} bit; /*!< Structure used for bit access */
uint8_t reg; /*!< Type used for register access */
} WDT_CONFIG_Type;
#endif /* !(defined(__ASSEMBLY__) || defined(__IAR_SYSTEMS_ASM__)) */
#define WDT_CONFIG_OFFSET 0x1 /**< \brief (WDT_CONFIG offset) Configuration Register */
#define WDT_CONFIG_RESETVALUE 0xBB /**< \brief (WDT_CONFIG reset_value) Configuration Register */
#define WDT_CONFIG_PER_Pos 0 /**< \brief (WDT_CONFIG) Timeout Period */
#define WDT_CONFIG_PER_Msk (0xFu << WDT_CONFIG_PER_Pos)
#define WDT_CONFIG_PER(value) ((WDT_CONFIG_PER_Msk & ((value) << WDT_CONFIG_PER_Pos)))
#define WDT_CONFIG_WINDOW_Pos 4 /**< \brief (WDT_CONFIG) Watchdow Timer Window Timeout Period */
#define WDT_CONFIG_WINDOW_Msk (0xFu << WDT_CONFIG_WINDOW_Pos)
#define WDT_CONFIG_WINDOW(value) ((WDT_CONFIG_WINDOW_Msk & ((value) << WDT_CONFIG_WINDOW_Pos)))
#define WDT_CONFIG_MASK 0xFFu /**< \brief (WDT_CONFIG) MASK Register */
/* -------- WDT_EWCTRL : (WDT Offset: 0x2) (R/W 8) Early Warning Control Register -------- */
#if !(defined(__ASSEMBLY__) || defined(__IAR_SYSTEMS_ASM__))
typedef union {
struct {
uint8_t EWOFFSET:4; /*!< bit: 0.. 3 Early Warning Interrupt Time Offset */
uint8_t :4; /*!< bit: 4.. 7 Reserved */
} bit; /*!< Structure used for bit access */
uint8_t reg; /*!< Type used for register access */
} WDT_EWCTRL_Type;
#endif /* !(defined(__ASSEMBLY__) || defined(__IAR_SYSTEMS_ASM__)) */
#define WDT_EWCTRL_OFFSET 0x2 /**< \brief (WDT_EWCTRL offset) Early Warning Control Register */
#define WDT_EWCTRL_RESETVALUE 0x0B /**< \brief (WDT_EWCTRL reset_value) Early Warning Control Register */
#define WDT_EWCTRL_EWOFFSET_Pos 0 /**< \brief (WDT_EWCTRL) Early Warning Interrupt Time Offset */
#define WDT_EWCTRL_EWOFFSET_Msk (0xFu << WDT_EWCTRL_EWOFFSET_Pos)
#define WDT_EWCTRL_EWOFFSET(value) ((WDT_EWCTRL_EWOFFSET_Msk & ((value) << WDT_EWCTRL_EWOFFSET_Pos)))
#define WDT_EWCTRL_MASK 0x0Fu /**< \brief (WDT_EWCTRL) MASK Register */
/* -------- WDT_INTENCLR : (WDT Offset: 0x4) (R/W 8) Interrupt Enable Clear Register -------- */
#if !(defined(__ASSEMBLY__) || defined(__IAR_SYSTEMS_ASM__))
typedef union {
struct {
uint8_t EW:1; /*!< bit: 0 Early Warning Interrupt Disable */
uint8_t :7; /*!< bit: 1.. 7 Reserved */
} bit; /*!< Structure used for bit access */
uint8_t reg; /*!< Type used for register access */
} WDT_INTENCLR_Type;
#endif /* !(defined(__ASSEMBLY__) || defined(__IAR_SYSTEMS_ASM__)) */
#define WDT_INTENCLR_OFFSET 0x4 /**< \brief (WDT_INTENCLR offset) Interrupt Enable Clear Register */
#define WDT_INTENCLR_RESETVALUE 0x00 /**< \brief (WDT_INTENCLR reset_value) Interrupt Enable Clear Register */
#define WDT_INTENCLR_EW_Pos 0 /**< \brief (WDT_INTENCLR) Early Warning Interrupt Disable */
#define WDT_INTENCLR_EW (0x1u << WDT_INTENCLR_EW_Pos)
#define WDT_INTENCLR_MASK 0x01u /**< \brief (WDT_INTENCLR) MASK Register */
/* -------- WDT_INTENSET : (WDT Offset: 0x5) (R/W 8) Interrupt Enable Set Register -------- */
#if !(defined(__ASSEMBLY__) || defined(__IAR_SYSTEMS_ASM__))
typedef union {
struct {
uint8_t EW:1; /*!< bit: 0 Early Warning Interrupt Enable */
uint8_t :7; /*!< bit: 1.. 7 Reserved */
} bit; /*!< Structure used for bit access */
uint8_t reg; /*!< Type used for register access */
} WDT_INTENSET_Type;
#endif /* !(defined(__ASSEMBLY__) || defined(__IAR_SYSTEMS_ASM__)) */
#define WDT_INTENSET_OFFSET 0x5 /**< \brief (WDT_INTENSET offset) Interrupt Enable Set Register */
#define WDT_INTENSET_RESETVALUE 0x00 /**< \brief (WDT_INTENSET reset_value) Interrupt Enable Set Register */
#define WDT_INTENSET_EW_Pos 0 /**< \brief (WDT_INTENSET) Early Warning Interrupt Enable */
#define WDT_INTENSET_EW (0x1u << WDT_INTENSET_EW_Pos)
#define WDT_INTENSET_MASK 0x01u /**< \brief (WDT_INTENSET) MASK Register */
/* -------- WDT_INTFLAG : (WDT Offset: 0x6) (R/W 8) Interrupt Flag Status and Clear Register -------- */
#if !(defined(__ASSEMBLY__) || defined(__IAR_SYSTEMS_ASM__))
typedef union {
struct {
uint8_t EW:1; /*!< bit: 0 Early Warning Interrupt Flag */
uint8_t :7; /*!< bit: 1.. 7 Reserved */
} bit; /*!< Structure used for bit access */
uint8_t reg; /*!< Type used for register access */
} WDT_INTFLAG_Type;
#endif /* !(defined(__ASSEMBLY__) || defined(__IAR_SYSTEMS_ASM__)) */
#define WDT_INTFLAG_OFFSET 0x6 /**< \brief (WDT_INTFLAG offset) Interrupt Flag Status and Clear Register */
#define WDT_INTFLAG_RESETVALUE 0x00 /**< \brief (WDT_INTFLAG reset_value) Interrupt Flag Status and Clear Register */
#define WDT_INTFLAG_EW_Pos 0 /**< \brief (WDT_INTFLAG) Early Warning Interrupt Flag */
#define WDT_INTFLAG_EW (0x1u << WDT_INTFLAG_EW_Pos)
#define WDT_INTFLAG_MASK 0x01u /**< \brief (WDT_INTFLAG) MASK Register */
/* -------- WDT_STATUS : (WDT Offset: 0x7) (R/ 8) Status Register -------- */
#if !(defined(__ASSEMBLY__) || defined(__IAR_SYSTEMS_ASM__))
typedef union {
struct {
uint8_t :7; /*!< bit: 0.. 6 Reserved */
uint8_t SYNCBUSY:1; /*!< bit: 7 Synchronization Busy */
} bit; /*!< Structure used for bit access */
uint8_t reg; /*!< Type used for register access */
} WDT_STATUS_Type;
#endif /* !(defined(__ASSEMBLY__) || defined(__IAR_SYSTEMS_ASM__)) */
#define WDT_STATUS_OFFSET 0x7 /**< \brief (WDT_STATUS offset) Status Register */
#define WDT_STATUS_RESETVALUE 0x00 /**< \brief (WDT_STATUS reset_value) Status Register */
#define WDT_STATUS_SYNCBUSY_Pos 7 /**< \brief (WDT_STATUS) Synchronization Busy */
#define WDT_STATUS_SYNCBUSY (0x1u << WDT_STATUS_SYNCBUSY_Pos)
#define WDT_STATUS_MASK 0x80u /**< \brief (WDT_STATUS) MASK Register */
/* -------- WDT_CLEAR : (WDT Offset: 0x8) ( /W 8) Clear Register -------- */
#if !(defined(__ASSEMBLY__) || defined(__IAR_SYSTEMS_ASM__))
typedef union {
struct {
uint8_t CLEAR:8; /*!< bit: 0.. 7 Watchdog Timer Clears Command Register */
} bit; /*!< Structure used for bit access */
uint8_t reg; /*!< Type used for register access */
} WDT_CLEAR_Type;
#endif /* !(defined(__ASSEMBLY__) || defined(__IAR_SYSTEMS_ASM__)) */
#define WDT_CLEAR_OFFSET 0x8 /**< \brief (WDT_CLEAR offset) Clear Register */
#define WDT_CLEAR_RESETVALUE 0x00 /**< \brief (WDT_CLEAR reset_value) Clear Register */
#define WDT_CLEAR_CLEAR_Pos 0 /**< \brief (WDT_CLEAR) Watchdog Timer Clears Command Register */
#define WDT_CLEAR_CLEAR_Msk (0xFFu << WDT_CLEAR_CLEAR_Pos)
#define WDT_CLEAR_CLEAR(value) ((WDT_CLEAR_CLEAR_Msk & ((value) << WDT_CLEAR_CLEAR_Pos)))
#define WDT_CLEAR_CLEAR_KEY (0xA5u << 0) /**< \brief (WDT_CLEAR) Clear Key */
#define WDT_CLEAR_MASK 0xFFu /**< \brief (WDT_CLEAR) MASK Register */
/** \brief WDT hardware registers */
#if !(defined(__ASSEMBLY__) || defined(__IAR_SYSTEMS_ASM__))
typedef struct {
__IO WDT_CTRL_Type CTRL; /**< \brief Offset: 0x0 (R/W 8) Control Register */
__IO WDT_CONFIG_Type CONFIG; /**< \brief Offset: 0x1 (R/W 8) Configuration Register */
__IO WDT_EWCTRL_Type EWCTRL; /**< \brief Offset: 0x2 (R/W 8) Early Warning Control Register */
RoReg8 Reserved1[0x1];
__IO WDT_INTENCLR_Type INTENCLR; /**< \brief Offset: 0x4 (R/W 8) Interrupt Enable Clear Register */
__IO WDT_INTENSET_Type INTENSET; /**< \brief Offset: 0x5 (R/W 8) Interrupt Enable Set Register */
__IO WDT_INTFLAG_Type INTFLAG; /**< \brief Offset: 0x6 (R/W 8) Interrupt Flag Status and Clear Register */
__I WDT_STATUS_Type STATUS; /**< \brief Offset: 0x7 (R/ 8) Status Register */
__O WDT_CLEAR_Type CLEAR; /**< \brief Offset: 0x8 ( /W 8) Clear Register */
} Wdt;
#endif /* !(defined(__ASSEMBLY__) || defined(__IAR_SYSTEMS_ASM__)) */
/*@}*/
#endif /* _SAMD20_WDT_COMPONENT_ */
| 6,391 |
3,084 | /*++
Copyright (c) 2005 Microsoft Corporation
All rights reserved.
THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF
ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A
PARTICULAR PURPOSE.
File Name:
cmintentsschema.cpp
Abstract:
PageICMRenderingIntent PrintSchema implementation. This implements the features,
options and enumerations that describe the PrintSchema PageICMRenderingIntent feature.
--*/
#include "precomp.h"
#include "cmintentsschema.h"
LPCWSTR XDPrintSchema::PageICMRenderingIntent::ICMINTENT_FEATURE = L"PageICMRenderingIntent";
LPCWSTR XDPrintSchema::PageICMRenderingIntent::ICMINTENT_OPTIONS[] = {
L"AbsoluteColorimetric",
L"RelativeColorimetric",
L"Photographs",
L"BusinessGraphics"
};
| 320 |
839 | <reponame>kimjand/cxf
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cxf.rs.security.saml.sso;
import java.io.InputStream;
import java.security.KeyStore;
import java.security.PrivateKey;
import java.security.cert.X509Certificate;
import java.util.Collections;
import java.util.List;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.apache.cxf.helpers.DOMUtils;
import org.apache.wss4j.common.crypto.Crypto;
import org.apache.wss4j.common.crypto.CryptoType;
import org.apache.wss4j.common.crypto.Merlin;
import org.apache.wss4j.common.ext.WSSecurityException;
import org.apache.wss4j.common.saml.OpenSAMLUtil;
import org.apache.wss4j.common.saml.SAMLCallback;
import org.apache.wss4j.common.saml.SAMLUtil;
import org.apache.wss4j.common.saml.SamlAssertionWrapper;
import org.apache.wss4j.common.saml.bean.AudienceRestrictionBean;
import org.apache.wss4j.common.saml.bean.ConditionsBean;
import org.apache.wss4j.common.saml.bean.SubjectConfirmationDataBean;
import org.apache.wss4j.common.saml.builder.SAML2Constants;
import org.apache.wss4j.common.util.Loader;
import org.apache.wss4j.dom.engine.WSSConfig;
import org.joda.time.DateTime;
import org.opensaml.saml.common.SAMLVersion;
import org.opensaml.saml.common.SignableSAMLObject;
import org.opensaml.saml.common.xml.SAMLConstants;
import org.opensaml.saml.saml2.core.Response;
import org.opensaml.saml.saml2.core.Status;
import org.opensaml.security.x509.BasicX509Credential;
import org.opensaml.xmlsec.keyinfo.impl.X509KeyInfoGeneratorFactory;
import org.opensaml.xmlsec.signature.KeyInfo;
import org.opensaml.xmlsec.signature.Signature;
import org.opensaml.xmlsec.signature.support.SignatureConstants;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
/**
* Some unit tests for the SAMLProtocolResponseValidator.
*/
public class SAMLResponseValidatorTest {
static {
WSSConfig.init();
OpenSAMLUtil.initSamlEngine();
}
@org.junit.Test
public void testCreateAndValidateResponse() throws Exception {
Document doc = DOMUtils.createDocument();
Status status =
SAML2PResponseComponentBuilder.createStatus(
SAMLProtocolResponseValidator.SAML2_STATUSCODE_SUCCESS, null
);
Response response =
SAML2PResponseComponentBuilder.createSAMLResponse(
"http://cxf.apache.org/saml", "http://cxf.apache.org/issuer", status
);
// Create an AuthenticationAssertion
SAML2CallbackHandler callbackHandler = new SAML2CallbackHandler();
callbackHandler.setStatement(SAML2CallbackHandler.Statement.AUTHN);
callbackHandler.setIssuer("http://cxf.apache.org/issuer");
callbackHandler.setConfirmationMethod(SAML2Constants.CONF_SENDER_VOUCHES);
SAMLCallback samlCallback = new SAMLCallback();
SAMLUtil.doSAMLCallback(callbackHandler, samlCallback);
SamlAssertionWrapper assertion = new SamlAssertionWrapper(samlCallback);
response.getAssertions().add(assertion.getSaml2());
Element policyElement = OpenSAMLUtil.toDom(response, doc);
doc.appendChild(policyElement);
assertNotNull(policyElement);
Response marshalledResponse = (Response)OpenSAMLUtil.fromDom(policyElement);
// Validate the Response
SAMLProtocolResponseValidator validator = new SAMLProtocolResponseValidator();
validator.validateSamlResponse(marshalledResponse, null, null);
}
@org.junit.Test
public void testInvalidStatusCode() throws Exception {
Document doc = DOMUtils.createDocument();
Status status =
SAML2PResponseComponentBuilder.createStatus(
SAMLProtocolResponseValidator.SAML1_STATUSCODE_SUCCESS, null
);
Response response =
SAML2PResponseComponentBuilder.createSAMLResponse(
"http://cxf.apache.org/saml", "http://cxf.apache.org/issuer", status
);
// Create an AuthenticationAssertion
SAML2CallbackHandler callbackHandler = new SAML2CallbackHandler();
callbackHandler.setStatement(SAML2CallbackHandler.Statement.AUTHN);
callbackHandler.setIssuer("http://cxf.apache.org/issuer");
callbackHandler.setConfirmationMethod(SAML2Constants.CONF_SENDER_VOUCHES);
SAMLCallback samlCallback = new SAMLCallback();
SAMLUtil.doSAMLCallback(callbackHandler, samlCallback);
SamlAssertionWrapper assertion = new SamlAssertionWrapper(samlCallback);
response.getAssertions().add(assertion.getSaml2());
Element policyElement = OpenSAMLUtil.toDom(response, doc);
doc.appendChild(policyElement);
assertNotNull(policyElement);
Response marshalledResponse = (Response)OpenSAMLUtil.fromDom(policyElement);
// Validate the Response
SAMLProtocolResponseValidator validator = new SAMLProtocolResponseValidator();
try {
validator.validateSamlResponse(marshalledResponse, null, null);
fail("Expected failure on an invalid SAML code");
} catch (WSSecurityException ex) {
// expected
}
}
@org.junit.Test
public void testRequestDeniedStatusCode() throws Exception {
Document doc = DOMUtils.createDocument();
Status status =
SAML2PResponseComponentBuilder.createStatus(
"urn:oasis:names:tc:SAML:2.0:status:RequestDenied", null
);
Response response =
SAML2PResponseComponentBuilder.createSAMLResponse(
"http://cxf.apache.org/saml", "http://cxf.apache.org/issuer", status
);
// Create an AuthenticationAssertion
SAML2CallbackHandler callbackHandler = new SAML2CallbackHandler();
callbackHandler.setStatement(SAML2CallbackHandler.Statement.AUTHN);
callbackHandler.setIssuer("http://cxf.apache.org/issuer");
callbackHandler.setConfirmationMethod(SAML2Constants.CONF_SENDER_VOUCHES);
SAMLCallback samlCallback = new SAMLCallback();
SAMLUtil.doSAMLCallback(callbackHandler, samlCallback);
SamlAssertionWrapper assertion = new SamlAssertionWrapper(samlCallback);
response.getAssertions().add(assertion.getSaml2());
Element policyElement = OpenSAMLUtil.toDom(response, doc);
doc.appendChild(policyElement);
assertNotNull(policyElement);
Response marshalledResponse = (Response)OpenSAMLUtil.fromDom(policyElement);
// Validate the Response
SAMLProtocolResponseValidator validator = new SAMLProtocolResponseValidator();
try {
validator.validateSamlResponse(marshalledResponse, null, null);
fail("Expected failure on an invalid SAML code");
} catch (WSSecurityException ex) {
// expected
}
}
@org.junit.Test
public void testResponseSignedAssertion() throws Exception {
Document doc = DOMUtils.createDocument();
Status status =
SAML2PResponseComponentBuilder.createStatus(
SAMLProtocolResponseValidator.SAML2_STATUSCODE_SUCCESS, null
);
Response response =
SAML2PResponseComponentBuilder.createSAMLResponse(
"http://cxf.apache.org/saml", "http://cxf.apache.org/issuer", status
);
// Create an AuthenticationAssertion
SAML2CallbackHandler callbackHandler = new SAML2CallbackHandler();
callbackHandler.setStatement(SAML2CallbackHandler.Statement.AUTHN);
callbackHandler.setIssuer("http://cxf.apache.org/issuer");
callbackHandler.setConfirmationMethod(SAML2Constants.CONF_SENDER_VOUCHES);
SAMLCallback samlCallback = new SAMLCallback();
SAMLUtil.doSAMLCallback(callbackHandler, samlCallback);
SamlAssertionWrapper assertion = new SamlAssertionWrapper(samlCallback);
Crypto issuerCrypto = new Merlin();
KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType());
ClassLoader loader = Loader.getClassLoader(SAMLResponseValidatorTest.class);
InputStream input = Merlin.loadInputStream(loader, "alice.jks");
keyStore.load(input, "password".toCharArray());
((Merlin)issuerCrypto).setKeyStore(keyStore);
assertion.signAssertion("alice", "password", issuerCrypto, false);
response.getAssertions().add(assertion.getSaml2());
Element policyElement = OpenSAMLUtil.toDom(response, doc);
doc.appendChild(policyElement);
assertNotNull(policyElement);
Response marshalledResponse = (Response)OpenSAMLUtil.fromDom(policyElement);
// Validate the Response
SAMLProtocolResponseValidator validator = new SAMLProtocolResponseValidator();
try {
validator.validateSamlResponse(marshalledResponse, null, new KeystorePasswordCallback());
fail("Expected failure on no Signature Crypto");
} catch (WSSecurityException ex) {
// expected
}
// Validate the Response
validator.validateSamlResponse(
marshalledResponse, issuerCrypto, new KeystorePasswordCallback()
);
}
@org.junit.Test
public void testResponseModifiedSignedAssertion() throws Exception {
Document doc = DOMUtils.createDocument();
Status status =
SAML2PResponseComponentBuilder.createStatus(
SAMLProtocolResponseValidator.SAML2_STATUSCODE_SUCCESS, null
);
Response response =
SAML2PResponseComponentBuilder.createSAMLResponse(
"http://cxf.apache.org/saml", "http://cxf.apache.org/issuer", status
);
// Create an AuthenticationAssertion
SAML2CallbackHandler callbackHandler = new SAML2CallbackHandler();
callbackHandler.setStatement(SAML2CallbackHandler.Statement.AUTHN);
callbackHandler.setIssuer("http://cxf.apache.org/issuer");
callbackHandler.setConfirmationMethod(SAML2Constants.CONF_SENDER_VOUCHES);
SAMLCallback samlCallback = new SAMLCallback();
SAMLUtil.doSAMLCallback(callbackHandler, samlCallback);
SamlAssertionWrapper assertion = new SamlAssertionWrapper(samlCallback);
Crypto issuerCrypto = new Merlin();
KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType());
ClassLoader loader = Loader.getClassLoader(SAMLResponseValidatorTest.class);
InputStream input = Merlin.loadInputStream(loader, "alice.jks");
keyStore.load(input, "password".toCharArray());
((Merlin)issuerCrypto).setKeyStore(keyStore);
assertion.signAssertion("alice", "password", issuerCrypto, false);
response.getAssertions().add(assertion.getSaml2());
Element policyElement = OpenSAMLUtil.toDom(response, doc);
doc.appendChild(policyElement);
assertNotNull(policyElement);
List<Element> assertions =
DOMUtils.findAllElementsByTagNameNS(policyElement, SAMLConstants.SAML20_NS, "Assertion");
assertNotNull(assertions);
assertTrue(assertions.size() == 1);
assertions.get(0).setAttributeNS(null, "newattr", "http://apache.org");
Response marshalledResponse = (Response)OpenSAMLUtil.fromDom(policyElement);
// Validate the Response
SAMLProtocolResponseValidator validator = new SAMLProtocolResponseValidator();
try {
// Validate the Response
validator.validateSamlResponse(
marshalledResponse, issuerCrypto, new KeystorePasswordCallback()
);
fail("Expected failure on a bad signature");
} catch (WSSecurityException ex) {
// expected
}
}
@org.junit.Test
public void testSignedResponse() throws Exception {
Document doc = DOMUtils.createDocument();
Status status =
SAML2PResponseComponentBuilder.createStatus(
SAMLProtocolResponseValidator.SAML2_STATUSCODE_SUCCESS, null
);
Response response =
SAML2PResponseComponentBuilder.createSAMLResponse(
"http://cxf.apache.org/saml", "http://cxf.apache.org/issuer", status
);
// Create an AuthenticationAssertion
SAML2CallbackHandler callbackHandler = new SAML2CallbackHandler();
callbackHandler.setStatement(SAML2CallbackHandler.Statement.AUTHN);
callbackHandler.setIssuer("http://cxf.apache.org/issuer");
callbackHandler.setConfirmationMethod(SAML2Constants.CONF_SENDER_VOUCHES);
SAMLCallback samlCallback = new SAMLCallback();
SAMLUtil.doSAMLCallback(callbackHandler, samlCallback);
SamlAssertionWrapper assertion = new SamlAssertionWrapper(samlCallback);
Crypto issuerCrypto = new Merlin();
KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType());
ClassLoader loader = Loader.getClassLoader(SAMLResponseValidatorTest.class);
InputStream input = Merlin.loadInputStream(loader, "alice.jks");
keyStore.load(input, "password".toCharArray());
((Merlin)issuerCrypto).setKeyStore(keyStore);
response.getAssertions().add(assertion.getSaml2());
signResponse(response, "alice", "password", issuerCrypto, true);
Element policyElement = OpenSAMLUtil.toDom(response, doc);
doc.appendChild(policyElement);
assertNotNull(policyElement);
Response marshalledResponse = (Response)OpenSAMLUtil.fromDom(policyElement);
// Validate the Response
SAMLProtocolResponseValidator validator = new SAMLProtocolResponseValidator();
try {
validator.validateSamlResponse(marshalledResponse, null, new KeystorePasswordCallback());
fail("Expected failure on no Signature Crypto");
} catch (WSSecurityException ex) {
// expected
}
// Validate the Response
validator.validateSamlResponse(
marshalledResponse, issuerCrypto, new KeystorePasswordCallback()
);
}
@org.junit.Test
public void testModifiedSignedResponse() throws Exception {
Document doc = DOMUtils.createDocument();
Status status =
SAML2PResponseComponentBuilder.createStatus(
SAMLProtocolResponseValidator.SAML2_STATUSCODE_SUCCESS, null
);
Response response =
SAML2PResponseComponentBuilder.createSAMLResponse(
"http://cxf.apache.org/saml", "http://cxf.apache.org/issuer", status
);
// Create an AuthenticationAssertion
SAML2CallbackHandler callbackHandler = new SAML2CallbackHandler();
callbackHandler.setStatement(SAML2CallbackHandler.Statement.AUTHN);
callbackHandler.setIssuer("http://cxf.apache.org/issuer");
callbackHandler.setConfirmationMethod(SAML2Constants.CONF_SENDER_VOUCHES);
SAMLCallback samlCallback = new SAMLCallback();
SAMLUtil.doSAMLCallback(callbackHandler, samlCallback);
SamlAssertionWrapper assertion = new SamlAssertionWrapper(samlCallback);
Crypto issuerCrypto = new Merlin();
KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType());
ClassLoader loader = Loader.getClassLoader(SAMLResponseValidatorTest.class);
InputStream input = Merlin.loadInputStream(loader, "alice.jks");
keyStore.load(input, "password".toCharArray());
((Merlin)issuerCrypto).setKeyStore(keyStore);
response.getAssertions().add(assertion.getSaml2());
signResponse(response, "alice", "password", issuerCrypto, true);
Element policyElement = OpenSAMLUtil.toDom(response, doc);
doc.appendChild(policyElement);
assertNotNull(policyElement);
policyElement.setAttributeNS(null, "newattr", "http://apache.org");
Response marshalledResponse = (Response)OpenSAMLUtil.fromDom(policyElement);
// Validate the Response
SAMLProtocolResponseValidator validator = new SAMLProtocolResponseValidator();
try {
// Validate the Response
validator.validateSamlResponse(
marshalledResponse, issuerCrypto, new KeystorePasswordCallback()
);
fail("Expected failure on a bad signature");
} catch (WSSecurityException ex) {
// expected
}
}
@org.junit.Test
public void testSignedResponseNoKeyInfo() throws Exception {
Document doc = DOMUtils.createDocument();
Status status =
SAML2PResponseComponentBuilder.createStatus(
SAMLProtocolResponseValidator.SAML2_STATUSCODE_SUCCESS, null
);
Response response =
SAML2PResponseComponentBuilder.createSAMLResponse(
"http://cxf.apache.org/saml", "http://cxf.apache.org/issuer", status
);
// Create an AuthenticationAssertion
SAML2CallbackHandler callbackHandler = new SAML2CallbackHandler();
callbackHandler.setStatement(SAML2CallbackHandler.Statement.AUTHN);
callbackHandler.setIssuer("http://cxf.apache.org/issuer");
callbackHandler.setConfirmationMethod(SAML2Constants.CONF_SENDER_VOUCHES);
SAMLCallback samlCallback = new SAMLCallback();
SAMLUtil.doSAMLCallback(callbackHandler, samlCallback);
SamlAssertionWrapper assertion = new SamlAssertionWrapper(samlCallback);
Crypto issuerCrypto = new Merlin();
KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType());
ClassLoader loader = Loader.getClassLoader(SAMLResponseValidatorTest.class);
InputStream input = Merlin.loadInputStream(loader, "alice.jks");
keyStore.load(input, "password".toCharArray());
((Merlin)issuerCrypto).setKeyStore(keyStore);
issuerCrypto.setDefaultX509Identifier("alice");
response.getAssertions().add(assertion.getSaml2());
signResponse(response, "alice", "password", issuerCrypto, false);
Element policyElement = OpenSAMLUtil.toDom(response, doc);
doc.appendChild(policyElement);
assertNotNull(policyElement);
Response marshalledResponse = (Response)OpenSAMLUtil.fromDom(policyElement);
// Validate the Response
SAMLProtocolResponseValidator validator = new SAMLProtocolResponseValidator();
validator.setKeyInfoMustBeAvailable(false);
try {
validator.validateSamlResponse(marshalledResponse, null, new KeystorePasswordCallback());
fail("Expected failure on no Signature Crypto");
} catch (WSSecurityException ex) {
// expected
}
// Validate the Response
validator.validateSamlResponse(
marshalledResponse, issuerCrypto, new KeystorePasswordCallback()
);
}
@org.junit.Test
public void testResponseInvalidVersion() throws Exception {
SubjectConfirmationDataBean subjectConfirmationData = new SubjectConfirmationDataBean();
subjectConfirmationData.setAddress("http://apache.org");
subjectConfirmationData.setInResponseTo("12345");
subjectConfirmationData.setNotAfter(new DateTime().plusMinutes(5));
subjectConfirmationData.setRecipient("http://recipient.apache.org");
// Create a AuthenticationAssertion
SAML2CallbackHandler callbackHandler = new SAML2CallbackHandler();
callbackHandler.setStatement(SAML2CallbackHandler.Statement.AUTHN);
callbackHandler.setIssuer("http://cxf.apache.org/issuer");
callbackHandler.setConfirmationMethod(SAML2Constants.CONF_SENDER_VOUCHES);
callbackHandler.setSubjectConfirmationData(subjectConfirmationData);
ConditionsBean conditions = new ConditionsBean();
conditions.setNotBefore(new DateTime());
conditions.setNotAfter(new DateTime().plusMinutes(5));
AudienceRestrictionBean audienceRestriction = new AudienceRestrictionBean();
audienceRestriction.setAudienceURIs(Collections.singletonList("http://service.apache.org"));
conditions.setAudienceRestrictions(Collections.singletonList(audienceRestriction));
callbackHandler.setConditions(conditions);
Response response = createResponse(subjectConfirmationData, callbackHandler);
response.setVersion(SAMLVersion.VERSION_10);
// Validate the Response
SAMLProtocolResponseValidator protocolValidator = new SAMLProtocolResponseValidator();
try {
protocolValidator.validateSamlResponse(response, null, null);
fail("Expected failure on bad response");
} catch (WSSecurityException ex) {
// expected
}
}
@org.junit.Test
public void testAssertionBadSubjectConfirmationMethod() throws Exception {
SubjectConfirmationDataBean subjectConfirmationData = new SubjectConfirmationDataBean();
subjectConfirmationData.setAddress("http://apache.org");
subjectConfirmationData.setInResponseTo("12345");
subjectConfirmationData.setNotAfter(new DateTime().plusMinutes(5));
subjectConfirmationData.setRecipient("http://recipient.apache.org");
// Create a AuthenticationAssertion
SAML2CallbackHandler callbackHandler = new SAML2CallbackHandler();
callbackHandler.setStatement(SAML2CallbackHandler.Statement.AUTHN);
callbackHandler.setIssuer("http://cxf.apache.org/issuer");
callbackHandler.setConfirmationMethod("xyz");
callbackHandler.setSubjectConfirmationData(subjectConfirmationData);
ConditionsBean conditions = new ConditionsBean();
conditions.setNotBefore(new DateTime());
conditions.setNotAfter(new DateTime().plusMinutes(5));
AudienceRestrictionBean audienceRestriction = new AudienceRestrictionBean();
audienceRestriction.setAudienceURIs(Collections.singletonList("http://service.apache.org"));
conditions.setAudienceRestrictions(Collections.singletonList(audienceRestriction));
callbackHandler.setConditions(conditions);
Response response = createResponse(subjectConfirmationData, callbackHandler);
// Validate the Response
SAMLProtocolResponseValidator protocolValidator = new SAMLProtocolResponseValidator();
try {
protocolValidator.validateSamlResponse(response, null, null);
fail("Expected failure on bad response");
} catch (WSSecurityException ex) {
// expected
}
}
@org.junit.Test
public void testResponseIssueInstant() throws Exception {
Document doc = DOMUtils.createDocument();
Status status =
SAML2PResponseComponentBuilder.createStatus(
SAMLProtocolResponseValidator.SAML2_STATUSCODE_SUCCESS, null
);
Response response =
SAML2PResponseComponentBuilder.createSAMLResponse(
"http://cxf.apache.org/saml", "http://cxf.apache.org/issuer", status
);
response.setIssueInstant(new DateTime().plusMinutes(5));
// Create an AuthenticationAssertion
SAML2CallbackHandler callbackHandler = new SAML2CallbackHandler();
callbackHandler.setStatement(SAML2CallbackHandler.Statement.AUTHN);
callbackHandler.setIssuer("http://cxf.apache.org/issuer");
callbackHandler.setConfirmationMethod(SAML2Constants.CONF_SENDER_VOUCHES);
SAMLCallback samlCallback = new SAMLCallback();
SAMLUtil.doSAMLCallback(callbackHandler, samlCallback);
SamlAssertionWrapper assertion = new SamlAssertionWrapper(samlCallback);
response.getAssertions().add(assertion.getSaml2());
Element policyElement = OpenSAMLUtil.toDom(response, doc);
doc.appendChild(policyElement);
assertNotNull(policyElement);
Response marshalledResponse = (Response)OpenSAMLUtil.fromDom(policyElement);
// Validate the Response
SAMLProtocolResponseValidator validator = new SAMLProtocolResponseValidator();
try {
validator.validateSamlResponse(marshalledResponse, null, null);
fail("Expected failure on an invalid Response IssueInstant");
} catch (WSSecurityException ex) {
// expected
}
}
@org.junit.Test
public void testAssertionIssueInstant() throws Exception {
Document doc = DOMUtils.createDocument();
Status status =
SAML2PResponseComponentBuilder.createStatus(
SAMLProtocolResponseValidator.SAML2_STATUSCODE_SUCCESS, null
);
Response response =
SAML2PResponseComponentBuilder.createSAMLResponse(
"http://cxf.apache.org/saml", "http://cxf.apache.org/issuer", status
);
// Create an AuthenticationAssertion
SAML2CallbackHandler callbackHandler = new SAML2CallbackHandler();
callbackHandler.setStatement(SAML2CallbackHandler.Statement.AUTHN);
callbackHandler.setIssuer("http://cxf.apache.org/issuer");
callbackHandler.setConfirmationMethod(SAML2Constants.CONF_SENDER_VOUCHES);
SAMLCallback samlCallback = new SAMLCallback();
SAMLUtil.doSAMLCallback(callbackHandler, samlCallback);
SamlAssertionWrapper assertion = new SamlAssertionWrapper(samlCallback);
assertion.getSaml2().setIssueInstant(new DateTime().plusMinutes(5));
response.getAssertions().add(assertion.getSaml2());
Element policyElement = OpenSAMLUtil.toDom(response, doc);
doc.appendChild(policyElement);
assertNotNull(policyElement);
Response marshalledResponse = (Response)OpenSAMLUtil.fromDom(policyElement);
// Validate the Response
SAMLProtocolResponseValidator validator = new SAMLProtocolResponseValidator();
try {
validator.validateSamlResponse(marshalledResponse, null, null);
fail("Expected failure on an invalid Assertion IssueInstant");
} catch (WSSecurityException ex) {
// expected
}
}
@org.junit.Test
public void testFutureAuthnInstant() throws Exception {
Document doc = DOMUtils.createDocument();
Status status =
SAML2PResponseComponentBuilder.createStatus(
SAMLProtocolResponseValidator.SAML2_STATUSCODE_SUCCESS, null
);
Response response =
SAML2PResponseComponentBuilder.createSAMLResponse(
"http://cxf.apache.org/saml", "http://cxf.apache.org/issuer", status
);
// Create an AuthenticationAssertion
SAML2CallbackHandler callbackHandler = new SAML2CallbackHandler();
callbackHandler.setStatement(SAML2CallbackHandler.Statement.AUTHN);
callbackHandler.setIssuer("http://cxf.apache.org/issuer");
callbackHandler.setConfirmationMethod(SAML2Constants.CONF_SENDER_VOUCHES);
callbackHandler.setAuthnInstant(new DateTime().plusDays(1));
SAMLCallback samlCallback = new SAMLCallback();
SAMLUtil.doSAMLCallback(callbackHandler, samlCallback);
SamlAssertionWrapper assertion = new SamlAssertionWrapper(samlCallback);
response.getAssertions().add(assertion.getSaml2());
Element policyElement = OpenSAMLUtil.toDom(response, doc);
doc.appendChild(policyElement);
assertNotNull(policyElement);
Response marshalledResponse = (Response)OpenSAMLUtil.fromDom(policyElement);
// Validate the Response
SAMLProtocolResponseValidator validator = new SAMLProtocolResponseValidator();
try {
validator.validateSamlResponse(marshalledResponse, null, null);
fail("Expected failure on an invalid Assertion AuthnInstant");
} catch (WSSecurityException ex) {
// expected
}
}
@org.junit.Test
public void testStaleSessionNotOnOrAfter() throws Exception {
Document doc = DOMUtils.createDocument();
Status status =
SAML2PResponseComponentBuilder.createStatus(
SAMLProtocolResponseValidator.SAML2_STATUSCODE_SUCCESS, null
);
Response response =
SAML2PResponseComponentBuilder.createSAMLResponse(
"http://cxf.apache.org/saml", "http://cxf.apache.org/issuer", status
);
// Create an AuthenticationAssertion
SAML2CallbackHandler callbackHandler = new SAML2CallbackHandler();
callbackHandler.setStatement(SAML2CallbackHandler.Statement.AUTHN);
callbackHandler.setIssuer("http://cxf.apache.org/issuer");
callbackHandler.setConfirmationMethod(SAML2Constants.CONF_SENDER_VOUCHES);
callbackHandler.setSessionNotOnOrAfter(new DateTime().minusDays(1));
SAMLCallback samlCallback = new SAMLCallback();
SAMLUtil.doSAMLCallback(callbackHandler, samlCallback);
SamlAssertionWrapper assertion = new SamlAssertionWrapper(samlCallback);
response.getAssertions().add(assertion.getSaml2());
Element policyElement = OpenSAMLUtil.toDom(response, doc);
doc.appendChild(policyElement);
assertNotNull(policyElement);
Response marshalledResponse = (Response)OpenSAMLUtil.fromDom(policyElement);
// Validate the Response
SAMLProtocolResponseValidator validator = new SAMLProtocolResponseValidator();
try {
validator.validateSamlResponse(marshalledResponse, null, null);
fail("Expected failure on an invalid SessionNotOnOrAfter");
} catch (WSSecurityException ex) {
// expected
}
}
@org.junit.Test
public void testInvalidSubjectLocality() throws Exception {
Document doc = DOMUtils.createDocument();
Status status =
SAML2PResponseComponentBuilder.createStatus(
SAMLProtocolResponseValidator.SAML2_STATUSCODE_SUCCESS, null
);
Response response =
SAML2PResponseComponentBuilder.createSAMLResponse(
"http://cxf.apache.org/saml", "http://cxf.apache.org/issuer", status
);
// Create an AuthenticationAssertion
SAML2CallbackHandler callbackHandler = new SAML2CallbackHandler();
callbackHandler.setStatement(SAML2CallbackHandler.Statement.AUTHN);
callbackHandler.setIssuer("http://cxf.apache.org/issuer");
callbackHandler.setConfirmationMethod(SAML2Constants.CONF_SENDER_VOUCHES);
callbackHandler.setSubjectLocality("xyz.123", null);
SAMLCallback samlCallback = new SAMLCallback();
SAMLUtil.doSAMLCallback(callbackHandler, samlCallback);
SamlAssertionWrapper assertion = new SamlAssertionWrapper(samlCallback);
response.getAssertions().add(assertion.getSaml2());
Element policyElement = OpenSAMLUtil.toDom(response, doc);
doc.appendChild(policyElement);
assertNotNull(policyElement);
Response marshalledResponse = (Response)OpenSAMLUtil.fromDom(policyElement);
// Validate the Response
SAMLProtocolResponseValidator validator = new SAMLProtocolResponseValidator();
try {
validator.validateSamlResponse(marshalledResponse, null, null);
fail("Expected failure on an invalid SessionNotOnOrAfter");
} catch (WSSecurityException ex) {
// expected
}
}
/**
* Sign a SAML Response
* @throws Exception
*/
private void signResponse(
Response response,
String issuerKeyName,
String issuerKeyPassword,
Crypto issuerCrypto,
boolean useKeyInfo
) throws Exception {
//
// Create the signature
//
Signature signature = OpenSAMLUtil.buildSignature();
signature.setCanonicalizationAlgorithm(SignatureConstants.ALGO_ID_C14N_EXCL_OMIT_COMMENTS);
// prepare to sign the SAML token
CryptoType cryptoType = new CryptoType(CryptoType.TYPE.ALIAS);
cryptoType.setAlias(issuerKeyName);
X509Certificate[] issuerCerts = issuerCrypto.getX509Certificates(cryptoType);
if (issuerCerts == null) {
throw new Exception(
"No issuer certs were found to sign the SAML Assertion using issuer name: "
+ issuerKeyName);
}
String sigAlgo = SignatureConstants.ALGO_ID_SIGNATURE_RSA_SHA1;
String pubKeyAlgo = issuerCerts[0].getPublicKey().getAlgorithm();
if ("DSA".equalsIgnoreCase(pubKeyAlgo)) {
sigAlgo = SignatureConstants.ALGO_ID_SIGNATURE_DSA;
}
PrivateKey privateKey = issuerCrypto.getPrivateKey(issuerKeyName, issuerKeyPassword);
signature.setSignatureAlgorithm(sigAlgo);
BasicX509Credential signingCredential =
new BasicX509Credential(issuerCerts[0], privateKey);
signature.setSigningCredential(signingCredential);
if (useKeyInfo) {
X509KeyInfoGeneratorFactory kiFactory = new X509KeyInfoGeneratorFactory();
kiFactory.setEmitEntityCertificate(true);
try {
KeyInfo keyInfo = kiFactory.newInstance().generate(signingCredential);
signature.setKeyInfo(keyInfo);
} catch (org.opensaml.security.SecurityException ex) {
throw new Exception(
"Error generating KeyInfo from signing credential", ex);
}
}
// add the signature to the assertion
SignableSAMLObject signableObject = response;
signableObject.setSignature(signature);
signableObject.releaseDOM();
signableObject.releaseChildrenDOM(true);
}
private Response createResponse(
SubjectConfirmationDataBean subjectConfirmationData,
SAML2CallbackHandler callbackHandler
) throws Exception {
Document doc = DOMUtils.createDocument();
Status status =
SAML2PResponseComponentBuilder.createStatus(
SAMLProtocolResponseValidator.SAML2_STATUSCODE_SUCCESS, null
);
Response response =
SAML2PResponseComponentBuilder.createSAMLResponse(
"http://cxf.apache.org/saml", "http://cxf.apache.org/issuer", status
);
// Create an AuthenticationAssertion
SAMLCallback samlCallback = new SAMLCallback();
SAMLUtil.doSAMLCallback(callbackHandler, samlCallback);
SamlAssertionWrapper assertion = new SamlAssertionWrapper(samlCallback);
response.getAssertions().add(assertion.getSaml2());
Element policyElement = OpenSAMLUtil.toDom(response, doc);
doc.appendChild(policyElement);
assertNotNull(policyElement);
return (Response)OpenSAMLUtil.fromDom(policyElement);
}
}
| 13,986 |
10,608 | <filename>datasets/hate_speech_pl/dataset_infos.json
{"default": {"description": "HateSpeech corpus in the current version contains over 2000 posts crawled from public Polish web. They represent various types and degrees of offensive language, expressed toward minorities (eg. ethnical, racial). The data were annotated manually.\n", "citation": "@article{troszynski2017czy,\n title={Czy komputer rozpozna hejtera? Wykorzystanie uczenia maszynowego (ML) w jako{'s}ciowej analizie danych},\n author={Troszy{'n}ski, <NAME>},\n journal={Przegl{\\k{a}}d Socjologii Jako{'s}ciowej},\n volume={13},\n number={2},\n pages={62--80},\n year={2017},\n publisher={Uniwersytet {\\L}{'o}dzki, Wydzia{\\l} Ekonomiczno-Socjologiczny, Katedra Socjologii~\u2026}\n}\n", "homepage": "", "license": "CC BY-NC-SA", "features": {"id": {"dtype": "uint16", "id": null, "_type": "Value"}, "text_id": {"dtype": "uint32", "id": null, "_type": "Value"}, "annotator_id": {"dtype": "uint8", "id": null, "_type": "Value"}, "minority_id": {"dtype": "uint8", "id": null, "_type": "Value"}, "negative_emotions": {"dtype": "bool", "id": null, "_type": "Value"}, "call_to_action": {"dtype": "bool", "id": null, "_type": "Value"}, "source_of_knowledge": {"dtype": "uint8", "id": null, "_type": "Value"}, "irony_sarcasm": {"dtype": "bool", "id": null, "_type": "Value"}, "topic": {"dtype": "uint8", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "rating": {"dtype": "uint8", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "hate_speech_pl", "config_name": "default", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3436190, "num_examples": 13887, "dataset_name": "hate_speech_pl"}}, "download_checksums": {"https://raw.githubusercontent.com/aiembassy/hatespeech-corpus-pl/master/data/fragment_anotatora_2011_ZK.csv": {"num_bytes": 417638, "checksum": "bf5c336c02cf87c9c7f1087ec982e57be2cfde924943916e5de1828a03b25a29"}, "https://raw.githubusercontent.com/aiembassy/hatespeech-corpus-pl/master/data/fragment_anotatora_2011b.csv": {"num_bytes": 2556002, "checksum": "ae2f293eb8cab3c44521eace7dc95e04c033cfb77003babd051ca6a7a8491adb"}, "https://raw.githubusercontent.com/aiembassy/hatespeech-corpus-pl/master/data/fragment_anotatora_2012_luty.csv": {"num_bytes": 904314, "checksum": "1c3f3ce27422ef9cf8e0eae10675f55d0c7463ce31e8e524d9ac29b25cad2f81"}}, "download_size": 3877954, "post_processing_size": null, "dataset_size": 3436190, "size_in_bytes": 7314144}} | 1,030 |
3,239 | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "llhttp.h"
#define CALLBACK_MAYBE(PARSER, NAME) \
do { \
const llhttp_settings_t* settings; \
settings = (const llhttp_settings_t*) (PARSER)->settings; \
if (settings == NULL || settings->NAME == NULL) { \
err = 0; \
break; \
} \
err = settings->NAME((PARSER)); \
} while (0)
#define SPAN_CALLBACK_MAYBE(PARSER, NAME, START, LEN) \
do { \
const llhttp_settings_t* settings; \
settings = (const llhttp_settings_t*) (PARSER)->settings; \
if (settings == NULL || settings->NAME == NULL) { \
err = 0; \
break; \
} \
err = settings->NAME((PARSER), (START), (LEN)); \
if (err == -1) { \
err = HPE_USER; \
llhttp_set_error_reason((PARSER), "Span callback error in " #NAME); \
} \
} while (0)
void llhttp_init(llhttp_t* parser, llhttp_type_t type,
const llhttp_settings_t* settings) {
llhttp__internal_init(parser);
parser->type = type;
parser->settings = (void*) settings;
}
#if defined(__wasm__)
extern int wasm_on_message_begin(llhttp_t * p);
extern int wasm_on_url(llhttp_t* p, const char* at, size_t length);
extern int wasm_on_status(llhttp_t* p, const char* at, size_t length);
extern int wasm_on_header_field(llhttp_t* p, const char* at, size_t length);
extern int wasm_on_header_value(llhttp_t* p, const char* at, size_t length);
extern int wasm_on_headers_complete(llhttp_t * p);
extern int wasm_on_body(llhttp_t* p, const char* at, size_t length);
extern int wasm_on_message_complete(llhttp_t * p);
const llhttp_settings_t wasm_settings = {
wasm_on_message_begin,
wasm_on_url,
wasm_on_status,
wasm_on_header_field,
wasm_on_header_value,
wasm_on_headers_complete,
wasm_on_body,
wasm_on_message_complete,
NULL,
NULL,
};
llhttp_t* llhttp_alloc(llhttp_type_t type) {
llhttp_t* parser = malloc(sizeof(llhttp_t));
llhttp_init(parser, type, &wasm_settings);
return parser;
}
void llhttp_free(llhttp_t* parser) {
free(parser);
}
/* Some getters required to get stuff from the parser */
uint8_t llhttp_get_type(llhttp_t* parser) {
return parser->type;
}
uint8_t llhttp_get_http_major(llhttp_t* parser) {
return parser->http_major;
}
uint8_t llhttp_get_http_minor(llhttp_t* parser) {
return parser->http_minor;
}
uint8_t llhttp_get_method(llhttp_t* parser) {
return parser->method;
}
int llhttp_get_status_code(llhttp_t* parser) {
return parser->status_code;
}
uint8_t llhttp_get_upgrade(llhttp_t* parser) {
return parser->upgrade;
}
#endif // defined(__wasm__)
void llhttp_reset(llhttp_t* parser) {
llhttp_type_t type = parser->type;
const llhttp_settings_t* settings = parser->settings;
void* data = parser->data;
uint8_t lenient_flags = parser->lenient_flags;
llhttp__internal_init(parser);
parser->type = type;
parser->settings = (void*) settings;
parser->data = data;
parser->lenient_flags = lenient_flags;
}
llhttp_errno_t llhttp_execute(llhttp_t* parser, const char* data, size_t len) {
return llhttp__internal_execute(parser, data, data + len);
}
void llhttp_settings_init(llhttp_settings_t* settings) {
memset(settings, 0, sizeof(*settings));
}
llhttp_errno_t llhttp_finish(llhttp_t* parser) {
int err;
/* We're in an error state. Don't bother doing anything. */
if (parser->error != 0) {
return 0;
}
switch (parser->finish) {
case HTTP_FINISH_SAFE_WITH_CB:
CALLBACK_MAYBE(parser, on_message_complete);
if (err != HPE_OK) return err;
/* FALLTHROUGH */
case HTTP_FINISH_SAFE:
return HPE_OK;
case HTTP_FINISH_UNSAFE:
parser->reason = "Invalid EOF state";
return HPE_INVALID_EOF_STATE;
default:
abort();
}
}
void llhttp_pause(llhttp_t* parser) {
if (parser->error != HPE_OK) {
return;
}
parser->error = HPE_PAUSED;
parser->reason = "Paused";
}
void llhttp_resume(llhttp_t* parser) {
if (parser->error != HPE_PAUSED) {
return;
}
parser->error = 0;
}
void llhttp_resume_after_upgrade(llhttp_t* parser) {
if (parser->error != HPE_PAUSED_UPGRADE) {
return;
}
parser->error = 0;
}
llhttp_errno_t llhttp_get_errno(const llhttp_t* parser) {
return parser->error;
}
const char* llhttp_get_error_reason(const llhttp_t* parser) {
return parser->reason;
}
void llhttp_set_error_reason(llhttp_t* parser, const char* reason) {
parser->reason = reason;
}
const char* llhttp_get_error_pos(const llhttp_t* parser) {
return parser->error_pos;
}
const char* llhttp_errno_name(llhttp_errno_t err) {
#define HTTP_ERRNO_GEN(CODE, NAME, _) case HPE_##NAME: return "HPE_" #NAME;
switch (err) {
HTTP_ERRNO_MAP(HTTP_ERRNO_GEN)
default: abort();
}
#undef HTTP_ERRNO_GEN
}
const char* llhttp_method_name(llhttp_method_t method) {
#define HTTP_METHOD_GEN(NUM, NAME, STRING) case HTTP_##NAME: return #STRING;
switch (method) {
HTTP_ALL_METHOD_MAP(HTTP_METHOD_GEN)
default: abort();
}
#undef HTTP_METHOD_GEN
}
void llhttp_set_lenient_headers(llhttp_t* parser, int enabled) {
if (enabled) {
parser->lenient_flags |= LENIENT_HEADERS;
} else {
parser->lenient_flags &= ~LENIENT_HEADERS;
}
}
void llhttp_set_lenient_chunked_length(llhttp_t* parser, int enabled) {
if (enabled) {
parser->lenient_flags |= LENIENT_CHUNKED_LENGTH;
} else {
parser->lenient_flags &= ~LENIENT_CHUNKED_LENGTH;
}
}
void llhttp_set_lenient_keep_alive(llhttp_t* parser, int enabled) {
if (enabled) {
parser->lenient_flags |= LENIENT_KEEP_ALIVE;
} else {
parser->lenient_flags &= ~LENIENT_KEEP_ALIVE;
}
}
/* Callbacks */
int llhttp__on_message_begin(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_message_begin);
return err;
}
int llhttp__on_url(llhttp_t* s, const char* p, const char* endp) {
int err;
SPAN_CALLBACK_MAYBE(s, on_url, p, endp - p);
return err;
}
int llhttp__on_url_complete(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_url_complete);
return err;
}
int llhttp__on_status(llhttp_t* s, const char* p, const char* endp) {
int err;
SPAN_CALLBACK_MAYBE(s, on_status, p, endp - p);
return err;
}
int llhttp__on_status_complete(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_status_complete);
return err;
}
int llhttp__on_header_field(llhttp_t* s, const char* p, const char* endp) {
int err;
SPAN_CALLBACK_MAYBE(s, on_header_field, p, endp - p);
return err;
}
int llhttp__on_header_field_complete(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_header_field_complete);
return err;
}
int llhttp__on_header_value(llhttp_t* s, const char* p, const char* endp) {
int err;
SPAN_CALLBACK_MAYBE(s, on_header_value, p, endp - p);
return err;
}
int llhttp__on_header_value_complete(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_header_value_complete);
return err;
}
int llhttp__on_headers_complete(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_headers_complete);
return err;
}
int llhttp__on_message_complete(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_message_complete);
return err;
}
int llhttp__on_body(llhttp_t* s, const char* p, const char* endp) {
int err;
SPAN_CALLBACK_MAYBE(s, on_body, p, endp - p);
return err;
}
int llhttp__on_chunk_header(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_chunk_header);
return err;
}
int llhttp__on_chunk_complete(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_chunk_complete);
return err;
}
/* Private */
void llhttp__debug(llhttp_t* s, const char* p, const char* endp,
const char* msg) {
if (p == endp) {
fprintf(stderr, "p=%p type=%d flags=%02x next=null debug=%s\n", s, s->type,
s->flags, msg);
} else {
fprintf(stderr, "p=%p type=%d flags=%02x next=%02x debug=%s\n", s,
s->type, s->flags, *p, msg);
}
}
| 4,436 |
14,668 | <gh_stars>1000+
{"rss_subscription_name":{"message":"Rozszerzenie Subskrypcje RSS (od Google)"},"rss_subscription_description":{"message":"Dodaje do paska narz\u0119dzi mo\u017cliwo\u015b\u0107 subskrybowania jednym klikni\u0119ciem."},"rss_subscription_default_title":{"message":"Subskrybuj ten kana\u0142"},"rss_subscription_action_title":{"message":"Kliknij, aby zasubskrybowa\u0107..."},"rss_subscription_unknown_feed_name":{"message":"Nieznana nazwa kana\u0142u"},"rss_subscription_manage_label":{"message":"Zarz\u0105dzaj..."},"rss_subscription_feed_for":{"message":"Kana\u0142 dla $1","placeholders":{"1":{"content":"$1"}}},"rss_subscription_error_fetching":{"message":"B\u0142\u0105d podczas pobierania kana\u0142u."},"rss_subscription_not_valid_feed":{"message":"To nie jest prawid\u0142owy kana\u0142."},"rss_subscription_no_entries":{"message":"Ten kana\u0142 nie zawiera \u017cadnych wpis\u00f3w."},"rss_subscription_one_entry_required":{"message":"Ta lista musi zawiera\u0107 co najmniej jedn\u0105 pozycj\u0119"},"rss_subscription_remove_confirmation":{"message":"Czy na pewno chcesz usun\u0105\u0107 $1?","placeholders":{"1":{"content":"$1"}}},"rss_subscription_always_use":{"message":"Zawsze u\u017cywaj tego czytnika do subskrybowania kana\u0142\u00f3w."},"rss_subscription_always_use_default":{"message":"Zawsze u\u017cywaj mojego domy\u015blnego czytnika przy subskrybowaniu kana\u0142\u00f3w."},"rss_subscription_subscribe_using":{"message":"Zasubskrybuj ten kana\u0142 przy u\u017cyciu:"},"rss_subscription_subscribe_button":{"message":"Subskrybuj teraz"},"rss_subscription_feed_preview":{"message":"Podgl\u0105d kana\u0142u"},"rss_subscription_feed_link":{"message":"Kana\u0142"},"rss_subscription_options":{"message":"Opcje Subskrypcji RSS"},"rss_subscription_default":{"message":"(domy\u015blny)"},"rss_subscription_add_reader":{"message":"Dodaj..."},"rss_subscription_edit_reader":{"message":"Edytuj..."},"rss_subscription_remove_reader":{"message":"Usu\u0144..."},"rss_subscription_make_default_reader":{"message":"Ustaw jako domy\u015blny"},"rss_subscription_reset_list":{"message":"Zresetuj list\u0119..."},"rss_subscription_no_localstorage":{"message":"Zmiana opcji wymaga w\u0142\u0105czenia pami\u0119ci lokalnej."},"rss_subscription_reset_list_confirm":{"message":"Czy na pewno chcesz cofn\u0105\u0107 wszelkie zmiany wprowadzone na tej li\u015bcie?"},"rss_subscription_remove_confirm":{"message":"Czy na pewno chcesz usun\u0105\u0107 \u201e$1\u201d?","placeholders":{"1":{"content":"$1"}}},"rss_subscription_edit_dialog_title":{"message":"Edytuj czytnik kana\u0142\u00f3w"},"rss_subscription_feed_description":{"message":"Opis:"},"rss_subscription_feed_url":{"message":"Adres URL:"},"rss_subscription_feed_url_assist":{"message":"Wstaw %s w adresie URL, gdzie powinien znajdowa\u0107 si\u0119 adres URL kana\u0142u."},"rss_subscription_save_button":{"message":"Zapisz"},"rss_subscription_close_button":{"message":"Zamknij"},"rss_subscription_feed_preview_count":{"message":"Liczba kana\u0142\u00f3w do wy\u015bwietlania na stronie podgl\u0105du:"}}
| 1,158 |
5,169 | <gh_stars>1000+
{
"name": "LDOptionButton",
"version": "0.1.0",
"summary": "Clean and beautiful menu option button.",
"description": "Clean and beautiful menu option button written in Swift.",
"homepage": "https://github.com/lajosdeme/LDOptionButton",
"screenshots": [
"https://user-images.githubusercontent.com/44027725/113474138-521edd80-946e-11eb-90dc-86a3dd105b4e.gif",
"https://user-images.githubusercontent.com/44027725/113474170-8c887a80-946e-11eb-9201-36040a7e246b.gif"
],
"license": {
"type": "MIT",
"file": "LICENSE"
},
"authors": {
"lajosdeme": "<EMAIL>"
},
"platforms": {
"ios": "14.2"
},
"source": {
"git": "https://github.com/lajosdeme/LDOptionButton.git",
"tag": "0.1.0"
},
"source_files": "LDOptionButton/**/*.{h,m}"
}
| 343 |
988 | {
".nuclide-refactorizer-rename-container": {
"enter": "core:confirm"
},
"atom-text-editor:not(.mini).enable-nuclide-refactorizer": {
"cmd-alt-r": "nuclide-refactorizer:rename"
},
".platform-darwin atom-text-editor:not(.mini).enable-nuclide-refactorizer": {
"cmd-ctrl-r": "nuclide-refactorizer:refactorize",
"cmd-alt-r": "nuclide-refactorizer:rename"
},
".platform-win32 atom-text-editor:not(.mini).enable-nuclide-refactorizer, .platform-linux atom-text-editor:not(.mini).enable-nuclide-refactorizer": {
"ctrl-alt-r": "nuclide-refactorizer:refactorize",
"cmd-alt-r": "nuclide-refactorizer:rename"
}
}
| 271 |
438 | <filename>tests/url_fixtures.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
INVALID_URLS = [
'http://',
'http://.',
'http://..',
'http://../',
'http://?',
'http://??',
'http://??/',
'http://#',
'http://##',
'http://##/',
'http://foo.bar?q=Spaces should be encoded',
'//',
'//a',
'///a',
'///',
'http:///a',
'foo.com',
'rdar://1234',
'h://test',
'http:// shouldfail.com',
':// should fail',
'http://foo.bar/foo(bar)baz quux',
'htto://foo.bar/',
'http://-error-.invalid/',
'http://-a.b.co',
'http://a.b-.co',
'http://.www.foo.bar/',
]
VALID_URLS = [
'http://foo.com/blah_blah', 'http://foo.com/blah_blah/',
'http://foo.com/blah_blah_(wikipedia)',
'http://foo.com/blah_blah_(wikipedia)_(again)',
'http://www.example.com/wpstyle/?p=364',
'https://www.example.com/foo/?bar=baz&inga=42&quux',
'http://172.16.31.10/',
'http://172.16.31.10:8080/',
'http://foo.com/blah_(wikipedia)#cite-132',
'http://foo.com/blah_(wikipedia)_blah#cite-1',
u'http://foo.com/unicode_(✪)_in_parens',
'http://foo.com/(something)?after=parens',
'http://sub.damowmow.com/',
'http://code.google.com/events/#&product=browser',
'http://j.mp', 'ftp://foo.bar/baz',
'http://foo.bar/?q=Test%20URL-encoded%20stuff',
'http://1337.net',
'http://a.b-c.de',
'http://172.16.58.3',
'http://a.b--c.de/',
]
| 733 |
9,267 | <filename>tests/functional/scripts/pkgutil_get_data.py
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2021, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
import pkgutil
import pkg3 # noqa: F401
expected_data = 'This is data text for testing the packaging module data.'.encode('ascii')
data = pkgutil.get_data('pkg3', 'sample-data.txt')
if not data:
raise SystemExit('Error: Could not read data with pkgutil.get_data().')
if data.strip() != expected_data:
raise SystemExit('Error: Read data is wrong: %r' % data)
print('Okay: Resource data read.')
| 264 |
1,535 | <reponame>react-native-community/react-native-share<gh_stars>1000+
package cl.json.social;
import android.content.ActivityNotFoundException;
import com.facebook.react.bridge.ReactApplicationContext;
import com.facebook.react.bridge.ReadableMap;
/**
* Created by Jacob on 06-06-2022.
*/
public class ViberShare extends SingleShareIntent {
private static final String PACKAGE = "com.viber.voip";
private static final String PLAY_STORE_LINK = "market://details?id=com.viber.voip";
public ViberShare(ReactApplicationContext reactContext) {
super(reactContext);
}
@Override
public void open(ReadableMap options) throws ActivityNotFoundException {
super.open(options);
// extra params here
this.openIntentChooser();
}
@Override
protected String getPackage() {
return PACKAGE;
}
@Override
protected String getDefaultWebLink() {
return null;
}
@Override
protected String getPlayStoreLink() {
return PLAY_STORE_LINK;
}
}
| 372 |
1,179 | <reponame>fengjixuchui/hypervisor-2
/// @copyright
/// Copyright (C) 2020 Assured Information Security, Inc.
///
/// @copyright
/// Permission is hereby granted, free of charge, to any person obtaining a copy
/// of this software and associated documentation files (the "Software"), to deal
/// in the Software without restriction, including without limitation the rights
/// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
/// copies of the Software, and to permit persons to whom the Software is
/// furnished to do so, subject to the following conditions:
///
/// @copyright
/// The above copyright notice and this permission notice shall be included in
/// all copies or substantial portions of the Software.
///
/// @copyright
/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
/// SOFTWARE.
#ifndef MOCKS_VS_POOL_T_HPP
#define MOCKS_VS_POOL_T_HPP
#include <allocated_status_t.hpp>
#include <bf_syscall_t.hpp>
#include <gs_t.hpp>
#include <intrinsic_t.hpp>
#include <tls_t.hpp>
#include <tuple>
#include <bsl/discard.hpp>
#include <bsl/safe_integral.hpp>
#include <bsl/unordered_map.hpp>
namespace example
{
/// <!-- description -->
/// @brief Defines the extension's VS pool
///
class vs_pool_t final
{
/// @brief stores the id that will be returned on allocate
bsl::safe_u16 m_id{};
/// @brief stores the return value of allocate()
bsl::unordered_map<std::tuple<bsl::safe_u16, bsl::safe_u16>, bool> m_allocate_fails{};
/// @brief stores the return value of is_allocated()
bsl::unordered_map<bsl::safe_u16, allocated_status_t> m_allocated{};
/// @brief stores the return value of assigned_vp()
bsl::unordered_map<bsl::safe_u16, bsl::safe_u16> m_assigned_vpid{};
/// @brief stores the return value of assigned_pp()
bsl::unordered_map<bsl::safe_u16, bsl::safe_u16> m_assigned_ppid{};
public:
/// <!-- description -->
/// @brief Initializes this vs_pool_t
///
/// <!-- inputs/outputs -->
/// @param gs the gs_t to use
/// @param tls the tls_t to use
/// @param sys the bf_syscall_t to use
/// @param intrinsic the intrinsic_t to use
///
static constexpr void
initialize(
gs_t const &gs,
tls_t const &tls,
syscall::bf_syscall_t const &sys,
intrinsic_t const &intrinsic) noexcept
{
bsl::discard(gs);
bsl::discard(tls);
bsl::discard(sys);
bsl::discard(intrinsic);
}
/// <!-- description -->
/// @brief Allocates a VS and returns it's ID
///
/// <!-- inputs/outputs -->
/// @param gs the gs_t to use
/// @param tls the tls_t to use
/// @param sys the bf_syscall_t to use
/// @param intrinsic the intrinsic_t to use
/// @param vpid the ID of the VM to assign the newly created VS to
/// @param ppid the ID of the PP to assign the newly created VS to
/// @return Returns ID of the newly allocated vs_t. Returns
/// bsl::safe_u16::failure() on failure.
///
[[nodiscard]] constexpr auto
allocate(
gs_t const &gs,
tls_t const &tls,
syscall::bf_syscall_t const &sys,
intrinsic_t const &intrinsic,
bsl::safe_u16 const &vpid,
bsl::safe_u16 const &ppid) noexcept -> bsl::safe_u16
{
bsl::discard(gs);
bsl::discard(tls);
bsl::discard(sys);
bsl::discard(intrinsic);
if (m_allocate_fails.at({vpid, ppid})) {
return bsl::safe_u16::failure();
}
auto const id{m_id};
m_id = (m_id + bsl::safe_u16::magic_1()).checked();
m_assigned_vpid.at(id) = vpid;
m_assigned_ppid.at(id) = ppid;
m_allocated.at(id) = allocated_status_t::allocated;
return id;
}
/// <!-- description -->
/// @brief Tells the allocate() function to return a failure
/// for the provided vpid/ppid combo.
///
/// <!-- inputs/outputs -->
/// @param vpid the ID of the VM to assign the newly created VS to
/// @param ppid the ID of the PP to assign the newly created VS to
///
constexpr void
set_allocate_fails(bsl::safe_u16 const &vpid, bsl::safe_u16 const &ppid) noexcept
{
m_allocate_fails.at({vpid, ppid}) = true;
}
/// <!-- description -->
/// @brief Deallocates the requested vs_t
///
/// <!-- inputs/outputs -->
/// @param gs the gs_t to use
/// @param tls the tls_t to use
/// @param sys the bf_syscall_t to use
/// @param intrinsic the intrinsic_t to use
/// @param vsid the ID of the vs_t to deallocate
///
constexpr void
deallocate(
gs_t const &gs,
tls_t const &tls,
syscall::bf_syscall_t const &sys,
intrinsic_t const &intrinsic,
bsl::safe_u16 const &vsid) noexcept
{
bsl::discard(gs);
bsl::discard(tls);
bsl::discard(sys);
bsl::discard(intrinsic);
bsl::discard(m_assigned_vpid.erase(vsid));
bsl::discard(m_assigned_ppid.erase(vsid));
bsl::discard(m_allocated.erase(vsid));
}
/// <!-- description -->
/// @brief Returns true if the requested vs_t is allocated,
/// false otherwise
///
/// <!-- inputs/outputs -->
/// @param vsid the ID of the vs_t to query
/// @return Returns true if the requested vs_t is allocated,
/// false otherwise
///
[[nodiscard]] constexpr auto
is_allocated(bsl::safe_u16 const &vsid) const noexcept -> bool
{
return m_allocated.at(vsid) == allocated_status_t::allocated;
}
/// <!-- description -->
/// @brief Returns true if the requested vs_t is deallocated,
/// false otherwise
///
/// <!-- inputs/outputs -->
/// @param vsid the ID of the vs_t to query
/// @return Returns true if the requested vs_t is deallocated,
/// false otherwise
///
[[nodiscard]] constexpr auto
is_deallocated(bsl::safe_u16 const &vsid) const noexcept -> bool
{
return m_allocated.at(vsid) == allocated_status_t::deallocated;
}
/// <!-- description -->
/// @brief Returns the ID of the VM the requested vs_t is assigned
/// to. If the vs_t is not assigned, syscall::BF_INVALID_ID is
/// returned.
///
/// <!-- inputs/outputs -->
/// @param vsid the ID of the vs_t to query
/// @return Returns the ID of the VM the requested vs_t is assigned
/// to. If the vs_t is not assigned, syscall::BF_INVALID_ID is
/// returned.
///
[[nodiscard]] constexpr auto
assigned_vp(bsl::safe_u16 const &vsid) const noexcept -> bsl::safe_u16
{
if (!m_assigned_vpid.contains(vsid)) {
return syscall::BF_INVALID_ID;
}
return m_assigned_vpid.at(vsid);
}
/// <!-- description -->
/// @brief Returns the ID of the PP the requested vs_t is assigned
/// to. If the vs_t is not assigned, syscall::BF_INVALID_ID is
/// returned.
///
/// <!-- inputs/outputs -->
/// @param vsid the ID of the vs_t to query
/// @return Returns the ID of the PP the requested vs_t is assigned
/// to. If the vs_t is not assigned, syscall::BF_INVALID_ID is
/// returned.
///
[[nodiscard]] constexpr auto
assigned_pp(bsl::safe_u16 const &vsid) const noexcept -> bsl::safe_u16
{
if (!m_assigned_ppid.contains(vsid)) {
return syscall::BF_INVALID_ID;
}
return m_assigned_ppid.at(vsid);
}
};
}
#endif
| 4,016 |
764 | <gh_stars>100-1000
{
"symbol": "FK",
"address": "0xF6Fe745e5647298639072D942E0eb4f2E3930ECB",
"overview":{
"en": "FK is the Taiwan dollar of the FKEx exchange, with a total issue of 900 million pieces, and is guaranteed never to issue more.",
"zh": "FK是FKEx交易所的平台币,发行总量为9亿枚,且保证永不增发。"
},
"email": "<EMAIL>",
"website": "https://fkex.co/",
"whitepaper": "https://fkex.co/files/whitepaper/FKEX.pdf",
"state": "NORMAL",
"published_on": "2019-12-31",
"links": {
"telegram": "https://t.me/FKExOfficial",
"twitter": "https://twitter.com/fkex9"
}
} | 335 |
841 | <reponame>brunolmfg/resteasy
package org.jboss.resteasy.test.form.resource;
import org.jboss.resteasy.annotations.Form;
import org.junit.Assert;
import jakarta.ws.rs.Consumes;
import jakarta.ws.rs.POST;
import jakarta.ws.rs.Path;
import jakarta.ws.rs.core.MediaType;
@Path("/")
public class CollectionsFormResource {
private static final String ERROR_MESSAGE = "Wrong form parameter";
@Path("/person")
@POST
@Consumes(MediaType.APPLICATION_FORM_URLENCODED)
public void post(@Form CollectionsFormPerson p) {
Assert.assertEquals(ERROR_MESSAGE, 2, p.telephoneNumbers.size());
Assert.assertEquals(ERROR_MESSAGE, 2, p.adresses.size());
Assert.assertEquals(ERROR_MESSAGE, "31", p.telephoneNumbers.get(0).countryCode);
Assert.assertEquals(ERROR_MESSAGE, "91", p.telephoneNumbers.get(1).countryCode);
Assert.assertEquals(ERROR_MESSAGE, "Main Street", p.adresses.get("INVOICE").street);
Assert.assertEquals(ERROR_MESSAGE, "Square One", p.adresses.get("SHIPPING").street);
}
}
| 404 |
817 | #####################################################
# Copyright (c) <NAME> [GitHub D-X-Y], 2021.03 #
#####################################################
# pytest ./tests/test_super_rearrange.py -s #
#####################################################
import unittest
import torch
from xautodl import xlayers
class TestSuperReArrange(unittest.TestCase):
"""Test the super re-arrange layer."""
def test_super_re_arrange(self):
layer = xlayers.SuperReArrange(
"b c (h p1) (w p2) -> b (h w) (c p1 p2)", p1=4, p2=4
)
tensor = torch.rand((8, 4, 32, 32))
print("The tensor shape: {:}".format(tensor.shape))
print(layer)
outs = layer(tensor)
print("The output tensor shape: {:}".format(outs.shape))
assert tuple(outs.shape) == (8, 32 * 32 // 16, 4 * 4 * 4)
| 337 |
1,244 | <filename>src/ic/arm/access-compiler-arm.cc
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_ARM
#include "src/ic/access-compiler.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
Handle<Code> code) {
__ Jump(code, RelocInfo::CODE_TARGET);
}
Register* PropertyAccessCompiler::load_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
static Register registers[] = {receiver, name, r3, r0, r4};
return registers;
}
Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
static Register registers[] = {receiver, name, r3, r4};
return registers;
}
#undef __
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_IA32
| 417 |
4,200 | package com.dtolabs.rundeck.core.storage;
/**
* Listener for changes to StorageManager resources
*/
public interface StorageManagerListener {
void resourceCreated(String path);
void resourceDeleted(String path);
void resourceModified(String path);
}
| 76 |
1,016 | package com.thinkbiganalytics.nifi.v2.core.feedinit;
/*-
* #%L
* thinkbig-nifi-core-service
* %%
* Copyright (C) 2017 ThinkBig Analytics
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
import com.thinkbiganalytics.jms.JmsConstants;
import com.thinkbiganalytics.metadata.event.jms.MetadataTopics;
import com.thinkbiganalytics.metadata.rest.model.event.FeedInitializationChangeEvent;
import com.thinkbiganalytics.nifi.core.api.metadata.MetadataRecorder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.jms.annotation.JmsListener;
import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedQueue;
/**
* Consumes the precondition events in JMS
*/
public class FeedInitializationChangeEventConsumer {
private static final Logger LOG = LoggerFactory.getLogger(FeedInitializationChangeEventConsumer.class);
private Queue<MetadataRecorder> metadataRecorders = new ConcurrentLinkedQueue<>();
/**
* default constructor
*/
public FeedInitializationChangeEventConsumer() {
super();
LOG.debug("New FeedInitializationChangeEventConsumer {}", this);
}
public FeedInitializationChangeEventConsumer(MetadataRecorder recorder) {
this.metadataRecorders.add(recorder);
}
@JmsListener(destination = MetadataTopics.FEED_INIT_STATUS_CHANGE, containerFactory = JmsConstants.TOPIC_LISTENER_CONTAINER_FACTORY)
public void receiveEvent(FeedInitializationChangeEvent event) {
LOG.debug("{} Received JMS message - topic: {}, message: {}", this, MetadataTopics.FEED_INIT_STATUS_CHANGE, event);
LOG.info("{} Received feed initialization status change event: {}", this, event);
if (this.metadataRecorders.isEmpty()) {
LOG.debug("No metadata recorder registerd yet - ingoring event: {}", event);
} else {
this.metadataRecorders.forEach(r -> r.initializationStatusChanged(event.getFeedId(), event.getStatus()));
}
}
public boolean addMetadataRecorder(MetadataRecorder recorder) {
return this.metadataRecorders.add(recorder);
}
public boolean removeMetadataRecorder(MetadataRecorder recorder) {
return this.metadataRecorders.remove(recorder);
}
}
| 915 |
358 | /*
* ColorBlendingFilter.cpp
*
* Created on: Sep 14, 2017
* Author: <NAME>
* Institute: ETH Zurich, ANYbotics
*/
#include "grid_map_filters/ColorBlendingFilter.hpp"
#include <math.h>
#include <Eigen/Dense>
#include <grid_map_core/grid_map_core.hpp>
using namespace filters;
namespace grid_map {
ColorBlendingFilter::ColorBlendingFilter() : opacity_(1.0), blendMode_(BlendModes::Normal) {}
ColorBlendingFilter::~ColorBlendingFilter() = default;
bool ColorBlendingFilter::configure() {
if (!FilterBase::getParam(std::string("background_layer"), backgroundLayer_)) {
ROS_ERROR("Color blending filter did not find parameter `background_layer`.");
return false;
}
ROS_DEBUG("Color blending filter background layer is = %s.", backgroundLayer_.c_str());
if (!FilterBase::getParam(std::string("foreground_layer"), foregroundLayer_)) {
ROS_ERROR("Color blending filter did not find parameter `foreground_layer`.");
return false;
}
ROS_DEBUG("Color blending filter foreground layer is = %s.", foregroundLayer_.c_str());
std::string blendMode;
if (!FilterBase::getParam(std::string("blend_mode"), blendMode)) {
blendMode = "normal";
}
ROS_DEBUG("Color blending filter blend mode is = %s.", blendMode.c_str());
if (blendMode == "normal") {
blendMode_ = BlendModes::Normal;
} else if (blendMode == "hard_light") {
blendMode_ = BlendModes::HardLight;
} else if (blendMode == "soft_light") {
blendMode_ = BlendModes::SoftLight;
} else {
ROS_ERROR("Color blending filter blend mode `%s` does not exist.", blendMode.c_str());
return false;
}
if (!FilterBase::getParam(std::string("opacity"), opacity_)) {
ROS_ERROR("Color blending filter did not find parameter `opacity`.");
return false;
}
ROS_DEBUG("Color blending filter opacity is = %f.", opacity_);
if (!FilterBase::getParam(std::string("output_layer"), outputLayer_)) {
ROS_ERROR("Color blending filter did not find parameter `output_layer`.");
return false;
}
ROS_DEBUG("Color blending filter output_layer = %s.", outputLayer_.c_str());
return true;
}
bool ColorBlendingFilter::update(const GridMap& mapIn, GridMap& mapOut) {
const auto& background = mapIn[backgroundLayer_];
const auto& foreground = mapIn[foregroundLayer_];
mapOut = mapIn;
mapOut.add(outputLayer_);
auto& output = mapOut[outputLayer_];
// For each cell in map.
for (Eigen::Index i = 0; i < output.size(); ++i) {
if (std::isnan(background(i))) {
output(i) = foreground(i);
} else if (std::isnan(foreground(i))) {
output(i) = background(i);
} else {
Eigen::Array3f backgroundColor;
Eigen::Array3f foregroundColor;
Eigen::Array3f outputColor;
Eigen::Vector3f color;
colorValueToVector(background(i), color);
backgroundColor = color.array();
colorValueToVector(foreground(i), color);
foregroundColor = color.array();
switch (blendMode_) {
case BlendModes::Normal:
outputColor = (1.0 - opacity_) * backgroundColor + opacity_ * foregroundColor;
break;
case BlendModes::HardLight: {
Eigen::Array3f blendedColor;
if (foregroundColor.mean() < 0.5) {
blendedColor = 2.0 * backgroundColor * foregroundColor;
} else {
blendedColor = 1.0 - 2.0 * (1.0 - backgroundColor) * (1.0 - foregroundColor);
}
if (opacity_ == 1.0) {
outputColor = blendedColor;
} else {
outputColor = (1.0 - opacity_) * backgroundColor + opacity_ * blendedColor;
}
break;
}
case BlendModes::SoftLight: {
Eigen::Array3f blendedColor;
// Photoshop.
// if (foregroundColor.mean() < 0.5) {
// blendedColor = 2.0 * backgroundColor * foregroundColor + backgroundColor.square() * (1.0 - 2.0 * foregroundColor);
// } else {
// blendedColor = 2.0 * backgroundColor * (1.0 - foregroundColor) + backgroundColor.sqrt() * (2.0 * foregroundColor
// - 1.0);
// }
// Pegtop.
blendedColor = ((1.0 - 2.0 * foregroundColor) * backgroundColor.square() + 2.0 * backgroundColor * foregroundColor);
if (opacity_ == 1.0) {
outputColor = blendedColor;
} else {
outputColor = (1.0 - opacity_) * backgroundColor + opacity_ * blendedColor;
}
break;
}
}
colorVectorToValue(Eigen::Vector3f(outputColor), output(i));
}
}
return true;
}
} // namespace grid_map
| 1,828 |
460 | <filename>trunk/win/Source/BT_ThemeManager.h
// Copyright 2012 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BT_THEMEMANAGER
#define BT_THEMEMANAGER
#include "BT_Singleton.h"
#include "BT_ColorVal.h"
// -----------------------------------------------------------------------------
class ThemeEventHandler
{
public:
virtual void onThemeChanged() = 0;
};
// -----------------------------------------------------------------------------
class ThemeManager
{
Q_DECLARE_TR_FUNCTIONS(ThemeManager)
Json::Value _root;
Json::Reader _reader;
bool _loaded;
typedef QHash<QString, pair<unsigned int, Json::Value> > CacheContainer;
CacheContainer _cache;
unsigned int _maxCacheLimit;
// events
set<ThemeEventHandler *> _handlers;
// Singleton
friend class Singleton<ThemeManager>;
ThemeManager();
private:
bool validateThemeAgainstSchema(const Json::Value& root);
bool validateValuesRecursive(const Json::Value& root, const Json::Value& schemaRoot, QString pathToRoot);
bool mergeValuesRecursive(Json::Value& to, const Json::Value& from, QDir toRootPath, QDir fromRootPath);
bool mergeValues(QFileInfo themeDescPath, QDir defaultThemePath, QDir backupThemePath);
bool overrideValuesRecursive(const Json::Value& from, Json::Value& to);
void cacheValue(QString keyPath, const Json::Value& value);
void notifyOnChange();
public:
~ThemeManager();
// operations
bool reloadDefaultTheme(bool notifyAllHandlers=true);
void registerThemeEventHandler(ThemeEventHandler * handler);
void unregisterThemeEventHandler(ThemeEventHandler * handler);
// accessors
Json::Value getValue(QString keyPath);
// used to resolve arrays/compound properties as bt objects
ColorVal getValueAsColor(QString keyPath, ColorVal defaultVal);
QString getValueAsQString(QString keyPath, QString defaultVal);
QString getValueAsFontFamilyName(QString keyPath, QString defaultVal);
bool getValueAsBool(QString keyPath, bool defaultVal);
int getValueAsInt(QString keyPath, int defaultVal);
double getValueAsDouble(QString keyPath, double defaultVal);
const char * getValueAsCString(QString keyPath, const char * defaultVal);
uint getValueAsUInt(QString keyPath, uint defaultVal);
};
// -----------------------------------------------------------------------------
#define themeManager Singleton<ThemeManager>::getSharedInstance()
// -----------------------------------------------------------------------------
#endif // BT_THEMEMANAGER | 920 |
1,444 |
package mage.game.permanent.token;
import mage.MageInt;
import mage.constants.CardType;
import mage.constants.SubType;
/**
*
* @author Styxo
*/
public final class GremlinToken extends TokenImpl {
public GremlinToken() {
super("Gremlin", "2/2 red Gremlin creature token");
cardType.add(CardType.CREATURE);
this.setOriginalExpansionSetCode("AER");
subtype.add(SubType.GREMLIN);
color.setRed(true);
power = new MageInt(2);
toughness = new MageInt(2);
}
public GremlinToken(final GremlinToken token) {
super(token);
}
public GremlinToken copy() {
return new GremlinToken(this);
}
}
| 277 |
436 | """Module containing cache implementations."""
from collections import OrderedDict
from threading import RLock
from typing import Any
class LRUCache:
"""A simple LRU cache implementation
:param capacity: Maximum number of entries to keep.
"""
_cache: OrderedDict
def __init__(self, capacity: int) -> None:
self._lock = RLock()
self._cache = OrderedDict()
self.capacity = capacity
def get(self, key: Any) -> Any:
"""
Get the cached value for a key. Mark as most recently used.
:param key: Key to query.
:returns: Cached value or None.
"""
with self._lock:
try:
self._cache.move_to_end(key)
return self._cache[key]
except KeyError:
return None
def put(self, key: Any, value: Any) -> None:
"""
Set the cached value for a key. Mark as most recently used.
:param key: Key to use. Must be hashable.
:param value: Value to cache.
"""
with self._lock:
self._cache[key] = value
self._cache.move_to_end(key)
if len(self._cache) > self.capacity:
self._cache.popitem(last=False)
def clear(self) -> None:
"""
Clears the cache.
"""
with self._lock:
self._cache.clear()
| 622 |
676 | package com.alorma.github.ui.actions;
import android.content.Context;
import android.support.annotation.NonNull;
import android.support.annotation.Nullable;
import android.support.v4.util.Pair;
import com.afollestad.materialdialogs.MaterialDialog;
import com.afollestad.materialdialogs.Theme;
import com.alorma.github.R;
import com.alorma.github.sdk.bean.info.IssueInfo;
import com.alorma.github.sdk.services.repo.GetRepoCollaboratorsClient;
import com.alorma.github.ui.utils.DialogUtils;
import core.User;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import rx.Observable;
import rx.Subscriber;
import rx.android.schedulers.AndroidSchedulers;
import rx.schedulers.Schedulers;
public class CollaboratorsPickerAction extends Action<Pair<List<User>, List<User>>> {
private Context context;
private List<User> currentAssignees;
private IssueInfo issueInfo;
private MaterialDialog dialog;
public CollaboratorsPickerAction(Context context, List<User> currentAssignees, IssueInfo issueInfo) {
this.context = context;
this.currentAssignees = currentAssignees;
this.issueInfo = issueInfo;
}
@Override
public Action<Pair<List<User>, List<User>>> execute() {
dialog = new DialogUtils().builder(context).content(R.string.loading_collaborators).progress(true, 0).theme(Theme.DARK).show();
GetRepoCollaboratorsClient contributorsClient = new GetRepoCollaboratorsClient(issueInfo.repoInfo);
contributorsClient.observable()
.subscribeOn(Schedulers.io())
.observeOn(AndroidSchedulers.mainThread())
.subscribe(new Subscriber<List<User>>() {
@Override
public void onCompleted() {
}
@Override
public void onError(Throwable e) {
}
@Override
public void onNext(List<User> users) {
showDialogSelect(users);
}
});
return this;
}
private void showDialogSelect(final List<User> users) {
if (dialog != null) {
dialog.dismiss();
}
if (users != null) {
Collections.reverse(users);
List<String> names = getNames(users);
Integer[] selectedIndices = getSelectedUsers(users);
MaterialDialog.Builder builder = new DialogUtils().builder(context);
builder.items(names);
builder.itemsCallbackMultiChoice(selectedIndices, (dialog1, which, text) -> {
Map<Integer, User> mapUser = new HashMap<>(users.size());
for (int i = 0; i < users.size(); i++) {
mapUser.put(i, users.get(i));
}
List<User> selectedUsers = new ArrayList<>();
for (Integer integer : which) {
selectedUsers.add(mapUser.get(integer));
mapUser.remove(integer);
}
Observable<List<User>> added = Observable.just(selectedUsers);
Observable<List<User>> removed = Observable.just(mapUser.values()).map(ArrayList::new);
Observable.zip(added, removed, Pair::new).subscribe(CollaboratorsPickerAction.this);
return true;
});
builder.positiveText(R.string.ok);
builder.negativeText(R.string.no_assignee);
builder.onNegative((dialog1, which) -> Observable.<Pair<List<User>, List<User>>>just(null).subscribe(CollaboratorsPickerAction.this));
builder.show();
}
}
@NonNull
private List<String> getNames(List<User> users) {
List<String> names = new ArrayList<>(users.size());
for (User user : users) {
names.add(user.getLogin());
}
return names;
}
@Nullable
private Integer[] getSelectedUsers(List<User> users) {
Integer[] selectedIndices = null;
if (currentAssignees != null) {
selectedIndices = new Integer[currentAssignees.size()];
Map<Integer, Integer> mapIds = new HashMap<>();
for (int i = 0; i < users.size(); i++) {
mapIds.put(users.get(i).getId(), i);
}
for (int i = 0; i < currentAssignees.size(); i++) {
selectedIndices[i] = mapIds.get(currentAssignees.get(i).getId());
}
}
return selectedIndices;
}
@Override
public void onNext(Pair<List<User>, List<User>> users) {
if (getCallback() != null) {
getCallback().onResult(users);
}
}
}
| 1,651 |
460 | <filename>trunk/win/Source/Includes/QtIncludes/src/3rdparty/webkit/JavaScriptCore/parser/Lexer.h<gh_stars>100-1000
/*
* Copyright (C) 1999-2000 <NAME> (<EMAIL>)
* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public License
* along with this library; see the file COPYING.LIB. If not, write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
*
*/
#ifndef Lexer_h
#define Lexer_h
#include "Lookup.h"
#include "ParserArena.h"
#include "SourceCode.h"
#include <wtf/ASCIICType.h>
#include <wtf/SegmentedVector.h>
#include <wtf/Vector.h>
#include <wtf/unicode/Unicode.h>
namespace JSC {
class RegExp;
class Lexer : public Noncopyable {
public:
// Character manipulation functions.
static bool isWhiteSpace(int character);
static bool isLineTerminator(int character);
static unsigned char convertHex(int c1, int c2);
static UChar convertUnicode(int c1, int c2, int c3, int c4);
// Functions to set up parsing.
void setCode(const SourceCode&, ParserArena&);
void setIsReparsing() { m_isReparsing = true; }
// Functions for the parser itself.
int lex(void* lvalp, void* llocp);
int lineNumber() const { return m_lineNumber; }
bool prevTerminator() const { return m_terminator; }
SourceCode sourceCode(int openBrace, int closeBrace, int firstLine);
bool scanRegExp(const Identifier*& pattern, const Identifier*& flags, UChar patternPrefix = 0);
bool skipRegExp();
// Functions for use after parsing.
bool sawError() const { return m_error; }
void clear();
private:
friend class JSGlobalData;
Lexer(JSGlobalData*);
~Lexer();
void shift1();
void shift2();
void shift3();
void shift4();
void shiftLineTerminator();
void record8(int);
void record16(int);
void record16(UChar);
void copyCodeWithoutBOMs();
int currentOffset() const;
const UChar* currentCharacter() const;
const Identifier* makeIdentifier(const UChar* characters, size_t length);
bool lastTokenWasRestrKeyword() const;
static const size_t initialReadBufferCapacity = 32;
int m_lineNumber;
Vector<char> m_buffer8;
Vector<UChar> m_buffer16;
bool m_terminator;
bool m_delimited; // encountered delimiter like "'" and "}" on last run
int m_lastToken;
const SourceCode* m_source;
const UChar* m_code;
const UChar* m_codeStart;
const UChar* m_codeEnd;
bool m_isReparsing;
bool m_atLineStart;
bool m_error;
// current and following unicode characters (int to allow for -1 for end-of-file marker)
int m_current;
int m_next1;
int m_next2;
int m_next3;
IdentifierArena* m_arena;
JSGlobalData* m_globalData;
const HashTable m_keywordTable;
Vector<UChar> m_codeWithoutBOMs;
};
inline bool Lexer::isWhiteSpace(int ch)
{
return isASCII(ch) ? (ch == ' ' || ch == '\t' || ch == 0xB || ch == 0xC) : WTF::Unicode::isSeparatorSpace(ch);
}
inline bool Lexer::isLineTerminator(int ch)
{
return ch == '\r' || ch == '\n' || (ch & ~1) == 0x2028;
}
inline unsigned char Lexer::convertHex(int c1, int c2)
{
return (toASCIIHexValue(c1) << 4) | toASCIIHexValue(c2);
}
inline UChar Lexer::convertUnicode(int c1, int c2, int c3, int c4)
{
return (convertHex(c1, c2) << 8) | convertHex(c3, c4);
}
// A bridge for yacc from the C world to the C++ world.
inline int jscyylex(void* lvalp, void* llocp, void* globalData)
{
return static_cast<JSGlobalData*>(globalData)->lexer->lex(lvalp, llocp);
}
} // namespace JSC
#endif // Lexer_h
| 2,027 |
969 | // Copyright (c) 2010-2021, Lawrence Livermore National Security, LLC. Produced
// at the Lawrence Livermore National Laboratory. All Rights reserved. See files
// LICENSE and NOTICE for details. LLNL-CODE-806117.
//
// This file is part of the MFEM library. For more information and source code
// availability visit https://mfem.org.
//
// MFEM is free software; you can redistribute it and/or modify it under the
// terms of the BSD-3 license. We welcome feedback and contributions, see file
// CONTRIBUTING.md for details.
#ifndef MFEM_SLEPC
#define MFEM_SLEPC
#include "../config/config.hpp"
#ifdef MFEM_USE_SLEPC
#ifdef MFEM_USE_MPI
#include "petsc.hpp"
// Forward declaration of SLEPc's internal struct _p_EPS:
struct _p_EPS;
namespace mfem
{
// Declare an alias of SLEPc's EPS type, mfem::slepc::EPS:
namespace slepc { typedef struct ::_p_EPS *EPS; }
void MFEMInitializeSlepc();
void MFEMInitializeSlepc(int*,char***);
void MFEMInitializeSlepc(int*,char***,const char[],const char[]);
void MFEMFinalizeSlepc();
class SlepcEigenSolver
{
private:
/// Boolean to handle SetFromOptions calls
mutable bool clcustom;
/// SLEPc linear eigensolver object
slepc::EPS eps;
/// Real and imaginary part of eigenvector
mutable PetscParVector *VR, *VC;
public:
/// Constructors
SlepcEigenSolver(MPI_Comm comm, const std::string &prefix = std::string());
virtual ~SlepcEigenSolver();
/// Set solver tolerance
void SetTol(double tol);
/// Set maximum number of iterations
void SetMaxIter(int max_iter);
/// Set the number of required eigenmodes
void SetNumModes(int num_eigs);
/// Set operator for standard eigenvalue problem
void SetOperator(const PetscParMatrix &op);
/// Set operator for generalized eigenvalue problem
void SetOperators(const PetscParMatrix &op, const PetscParMatrix &opB);
/// Customize object with options set
void Customize(bool customize = true) const;
/// Solve the eigenvalue problem for the specified number of eigenvalues
void Solve();
/// Get the number of converged eigenvalues
int GetNumConverged();
/// Get the corresponding eigenvalue
void GetEigenvalue(unsigned int i, double & lr) const;
void GetEigenvalue(unsigned int i, double & lr, double & lc) const;
/// Get the corresponding eigenvector
void GetEigenvector(unsigned int i, Vector & vr) const;
void GetEigenvector(unsigned int i, Vector & vr, Vector & vc) const;
/// Target spectrum for the eigensolver. Target imaginary is not supported
/// without complex support in SLEPc, and intervals are not implemented.
enum Which
{
LARGEST_MAGNITUDE,
SMALLEST_MAGNITUDE,
LARGEST_REAL,
SMALLEST_REAL,
LARGEST_IMAGINARY,
SMALLEST_IMAGINARY,
TARGET_MAGNITUDE,
TARGET_REAL
};
enum SpectralTransformation
{
SHIFT,
SHIFT_INVERT
};
void SetWhichEigenpairs(Which which);
void SetTarget(double target);
void SetSpectralTransformation(SpectralTransformation transformation);
/// Conversion function to SLEPc's EPS type.
operator slepc::EPS() const { return eps; }
/// Conversion function to PetscObject
operator PetscObject() const {return (PetscObject)eps; }
};
}
#endif // MFEM_USE_MPI
#endif // MFEM_USE_SLEPC
#endif // MFEM_SLEPC
| 1,146 |
2,338 | //===- TypeToLLVM.cpp - type translation from MLIR to LLVM IR -===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "mlir/Target/LLVMIR/TypeToLLVM.h"
#include "mlir/Dialect/LLVMIR/LLVMTypes.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/MLIRContext.h"
#include "llvm/ADT/TypeSwitch.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Type.h"
using namespace mlir;
namespace mlir {
namespace LLVM {
namespace detail {
/// Support for translating MLIR LLVM dialect types to LLVM IR.
class TypeToLLVMIRTranslatorImpl {
public:
/// Constructs a class creating types in the given LLVM context.
TypeToLLVMIRTranslatorImpl(llvm::LLVMContext &context) : context(context) {}
/// Translates a single type.
llvm::Type *translateType(Type type) {
// If the conversion is already known, just return it.
if (knownTranslations.count(type))
return knownTranslations.lookup(type);
// Dispatch to an appropriate function.
llvm::Type *translated =
llvm::TypeSwitch<Type, llvm::Type *>(type)
.Case([this](LLVM::LLVMVoidType) {
return llvm::Type::getVoidTy(context);
})
.Case(
[this](Float16Type) { return llvm::Type::getHalfTy(context); })
.Case([this](BFloat16Type) {
return llvm::Type::getBFloatTy(context);
})
.Case(
[this](Float32Type) { return llvm::Type::getFloatTy(context); })
.Case([this](Float64Type) {
return llvm::Type::getDoubleTy(context);
})
.Case([this](Float80Type) {
return llvm::Type::getX86_FP80Ty(context);
})
.Case([this](Float128Type) {
return llvm::Type::getFP128Ty(context);
})
.Case([this](LLVM::LLVMPPCFP128Type) {
return llvm::Type::getPPC_FP128Ty(context);
})
.Case([this](LLVM::LLVMX86MMXType) {
return llvm::Type::getX86_MMXTy(context);
})
.Case([this](LLVM::LLVMTokenType) {
return llvm::Type::getTokenTy(context);
})
.Case([this](LLVM::LLVMLabelType) {
return llvm::Type::getLabelTy(context);
})
.Case([this](LLVM::LLVMMetadataType) {
return llvm::Type::getMetadataTy(context);
})
.Case<LLVM::LLVMArrayType, IntegerType, LLVM::LLVMFunctionType,
LLVM::LLVMPointerType, LLVM::LLVMStructType,
LLVM::LLVMFixedVectorType, LLVM::LLVMScalableVectorType,
VectorType>(
[this](auto type) { return this->translate(type); })
.Default([](Type t) -> llvm::Type * {
llvm_unreachable("unknown LLVM dialect type");
});
// Cache the result of the conversion and return.
knownTranslations.try_emplace(type, translated);
return translated;
}
private:
/// Translates the given array type.
llvm::Type *translate(LLVM::LLVMArrayType type) {
return llvm::ArrayType::get(translateType(type.getElementType()),
type.getNumElements());
}
/// Translates the given function type.
llvm::Type *translate(LLVM::LLVMFunctionType type) {
SmallVector<llvm::Type *, 8> paramTypes;
translateTypes(type.getParams(), paramTypes);
return llvm::FunctionType::get(translateType(type.getReturnType()),
paramTypes, type.isVarArg());
}
/// Translates the given integer type.
llvm::Type *translate(IntegerType type) {
return llvm::IntegerType::get(context, type.getWidth());
}
/// Translates the given pointer type.
llvm::Type *translate(LLVM::LLVMPointerType type) {
return llvm::PointerType::get(translateType(type.getElementType()),
type.getAddressSpace());
}
/// Translates the given structure type, supports both identified and literal
/// structs. This will _create_ a new identified structure every time, use
/// `convertType` if a structure with the same name must be looked up instead.
llvm::Type *translate(LLVM::LLVMStructType type) {
SmallVector<llvm::Type *, 8> subtypes;
if (!type.isIdentified()) {
translateTypes(type.getBody(), subtypes);
return llvm::StructType::get(context, subtypes, type.isPacked());
}
llvm::StructType *structType =
llvm::StructType::create(context, type.getName());
// Mark the type we just created as known so that recursive calls can pick
// it up and use directly.
knownTranslations.try_emplace(type, structType);
if (type.isOpaque())
return structType;
translateTypes(type.getBody(), subtypes);
structType->setBody(subtypes, type.isPacked());
return structType;
}
/// Translates the given built-in vector type compatible with LLVM.
llvm::Type *translate(VectorType type) {
assert(LLVM::isCompatibleVectorType(type) &&
"expected compatible with LLVM vector type");
return llvm::FixedVectorType::get(translateType(type.getElementType()),
type.getNumElements());
}
/// Translates the given fixed-vector type.
llvm::Type *translate(LLVM::LLVMFixedVectorType type) {
return llvm::FixedVectorType::get(translateType(type.getElementType()),
type.getNumElements());
}
/// Translates the given scalable-vector type.
llvm::Type *translate(LLVM::LLVMScalableVectorType type) {
return llvm::ScalableVectorType::get(translateType(type.getElementType()),
type.getMinNumElements());
}
/// Translates a list of types.
void translateTypes(ArrayRef<Type> types,
SmallVectorImpl<llvm::Type *> &result) {
result.reserve(result.size() + types.size());
for (auto type : types)
result.push_back(translateType(type));
}
/// Reference to the context in which the LLVM IR types are created.
llvm::LLVMContext &context;
/// Map of known translation. This serves a double purpose: caches translation
/// results to avoid repeated recursive calls and makes sure identified
/// structs with the same name (that is, equal) are resolved to an existing
/// type instead of creating a new type.
llvm::DenseMap<Type, llvm::Type *> knownTranslations;
};
} // end namespace detail
} // end namespace LLVM
} // end namespace mlir
LLVM::TypeToLLVMIRTranslator::TypeToLLVMIRTranslator(llvm::LLVMContext &context)
: impl(new detail::TypeToLLVMIRTranslatorImpl(context)) {}
LLVM::TypeToLLVMIRTranslator::~TypeToLLVMIRTranslator() {}
llvm::Type *LLVM::TypeToLLVMIRTranslator::translateType(Type type) {
return impl->translateType(type);
}
unsigned LLVM::TypeToLLVMIRTranslator::getPreferredAlignment(
Type type, const llvm::DataLayout &layout) {
return layout.getPrefTypeAlignment(translateType(type));
}
| 2,948 |
432 | /*
* Copyright 2020 <NAME>, <EMAIL>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "api_core.h"
#include "stm32l4_systick.h"
namespace debugger {
STM32L4_SysTick::STM32L4_SysTick(const char *name) : IService(name),
STK_CTRL(this, "STK_CTRL", 0x00),
STK_LOAD(this, "STK_LOAD", 0x04),
STK_VAL(this, "STK_VAL", 0x08),
STK_CALIB(this, "STK_CALIB", 0x0C) {
registerAttribute("CPU", &cpu_);
registerAttribute("IrqLine", &irqLine_);
lastReloadTime_ = 0;
}
void STM32L4_SysTick::postinitService() {
uint64_t baseaddr = 0xE000E010;
STK_CTRL.setBaseAddress(baseaddr + 0x00);
STK_LOAD.setBaseAddress(baseaddr + 0x04);
STK_VAL.setBaseAddress(baseaddr + 0x08);
STK_CALIB.setBaseAddress(baseaddr + 0x0C);
iclk_ = static_cast<IClock *>(
RISCV_get_service_iface(cpu_.to_string(), IFACE_CLOCK));
if (!iclk_) {
RISCV_error("Can't find IClock interface %s", cpu_.to_string());
return;
}
icpu_ = static_cast<ICpuGeneric *>(
RISCV_get_service_iface(cpu_.to_string(), IFACE_CPU_GENERIC));
if (!icpu_) {
RISCV_error("Can't find ICpuRiscV interface %s", cpu_.to_string());
return;
}
}
void STM32L4_SysTick::stepCallback(uint64_t t) {
STK_CTRL_TYPE::value_type ctrl = STK_CTRL.getTyped();
uint64_t dt = getReloadSteps();
lastReloadTime_ = t;
ctrl.b.COUNTFLAG = 1;
STK_CTRL.setValue(ctrl.v);
if (ctrl.b.TICKINT) {
icpu_->raiseSignal(irqLine_.to_int());
}
if (ctrl.b.ENABLE) {
iclk_->moveStepCallback(static_cast<IClockListener *>(this), t + dt);
}
}
uint64_t STM32L4_SysTick::getReloadSteps() {
STK_CTRL_TYPE::value_type ctrl = STK_CTRL.getTyped();
uint64_t ret = (STK_LOAD.getValue().val & 0x00FFFFFF) + 1;
if (ctrl.b.CLKSOURCE == 0) {
ret *= 8;
}
return ret;
}
void STM32L4_SysTick::enableCounter() {
uint64_t dt = getReloadSteps();
lastReloadTime_ = iclk_->getStepCounter();
iclk_->moveStepCallback(static_cast<IClockListener *>(this),
lastReloadTime_ + dt);
RISCV_info("Enable Counter %.1f ms",
static_cast<double>(dt) / (0.001*iclk_->getFreqHz()));
}
void STM32L4_SysTick::disableCounter() {
RISCV_info("%s", "Disable Counter");
}
void STM32L4_SysTick::enableInterrupt() {
RISCV_info("Enable Interrupt %d", irqLine_.to_int());
}
void STM32L4_SysTick::disableInterrupt() {
RISCV_info("Disable Interrupt %d", irqLine_.to_int());
}
uint32_t STM32L4_SysTick::STK_CTRL_TYPE::aboutToRead(uint32_t cur_val) {
value_type t = getTyped();
uint32_t ret = t.v;
t.b.COUNTFLAG = 0;
setValue(t.v);
return ret;
}
uint32_t STM32L4_SysTick::STK_CTRL_TYPE::aboutToWrite(uint32_t cur_val) {
value_type next;
value_type old = getTyped();
STM32L4_SysTick *p = static_cast<STM32L4_SysTick *>(parent_);
next.v = cur_val;
setValue(cur_val);
if (!old.b.ENABLE && next.b.ENABLE) {
p->enableCounter();
} else if (old.b.ENABLE && !next.b.ENABLE) {
p->disableCounter();
}
if (!old.b.TICKINT && next.b.TICKINT) {
p->enableInterrupt();
} else if (old.b.TICKINT && !next.b.TICKINT) {
p->disableInterrupt();
}
return cur_val;
}
} // namespace debugger
| 1,701 |
496 | <gh_stars>100-1000
/*
* Copyright (C) 2018 <NAME>
*
* Author: <NAME> <<EMAIL>>
*/
#ifndef LEXBOR_HTML_FIELD_SET_ELEMENT_H
#define LEXBOR_HTML_FIELD_SET_ELEMENT_H
#ifdef __cplusplus
extern "C" {
#endif
#include "lexbor/html/interface.h"
#include "lexbor/html/interfaces/element.h"
struct lxb_html_field_set_element {
lxb_html_element_t element;
};
LXB_API lxb_html_field_set_element_t *
lxb_html_field_set_element_interface_create(lxb_html_document_t *document);
LXB_API lxb_html_field_set_element_t *
lxb_html_field_set_element_interface_destroy(lxb_html_field_set_element_t *field_set_element);
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* LEXBOR_HTML_FIELD_SET_ELEMENT_H */
| 301 |
1,461 | <filename>source/UnitTests.cpp
#define CATCH_CONFIG_RUNNER // This tells Catch to provide a main() - only do this in one cpp file
#define CATCH_CONFIG_NO_POSIX_SIGNALS
#include "../external/Catch2/single_include/catch2/catch.hpp"
#include "UnitTests.h"
#include "imgui_support.h"
static Catch::Session s_session; // There must be exactly one instance
void UnitTestsSetArgs(int argc, const char * argv[])
{
s_session.applyCommandLine(argc, argv);
}
int RunUnitTests()
{
s_session.config().showDurations();
int result = s_session.run();
return result;
}
static bool _window_open_unittests = true;
void onGuiUnitTests()
{
ImGui::SetNextWindowSize(ImVec2(500, 440), ImGuiCond_FirstUseEver);
ImGuiWindowFlags window_flags = 0; //ImGuiWindowFlags_MenuBar;
// ImGui::SetNextWindowPos(ImVec2(0,0)); //, ImGuiCond_Always);
// ImGui::SetNextWindowContentSize(ImVec2(800,600)); //, ImGuiCond_Always);
if (!ImGui::Begin("Unit Tests", &_window_open_unittests, window_flags))
{
ImGui::End();
return;
}
if (ImGui::Button("Run"))
{
RunUnitTests();
}
ImGui::End();
}
| 500 |
7,089 | from setuptools import setup
setup(
name="pep518_twin_forkbombs_second",
version="238",
py_modules=["pep518_twin_forkbombs_second"],
)
| 62 |
1,041 | package org.tests.model.basic.cache;
import io.ebean.annotation.Cache;
import org.tests.model.basic.BasicDomain;
import javax.persistence.DiscriminatorColumn;
import javax.persistence.Entity;
import javax.persistence.Inheritance;
@Cache(enableQueryCache = true)
@Entity
@Inheritance
@DiscriminatorColumn(length = 3)
public abstract class CInhRoot extends BasicDomain {
private static final long serialVersionUID = -4673953370819311120L;
private String licenseNumber;
public String getLicenseNumber() {
return licenseNumber;
}
public void setLicenseNumber(String licenseNumber) {
this.licenseNumber = licenseNumber;
}
}
| 196 |
1,755 | /*
* Copyright(C) 1999-2020 National Technology & Engineering Solutions
* of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with
* NTESS, the U.S. Government retains certain rights in this software.
*
* See packages/seacas/LICENSE for details
*/
#include "exodusII.h" // for ex_get_partial_var, etc
#include "exodusII_int.h"
/*!
\ingroup ResultsData
* \deprecated Use ex_get_partial_var()(exoid, time_step, EX_ELEM_BLOCK, elem_var_index,
elem_blk_id, start_elem_num, num_elem, elem_var_vals) instead
*/
int ex_get_n_elem_var(int exoid, int time_step, int elem_var_index, ex_entity_id elem_blk_id,
int64_t num_elem_this_blk, int64_t start_elem_num, int64_t num_elem,
void *elem_var_vals)
{
EX_UNUSED(num_elem_this_blk);
return ex_get_partial_var(exoid, time_step, EX_ELEM_BLOCK, elem_var_index, elem_blk_id,
start_elem_num, num_elem, elem_var_vals);
}
| 429 |
501 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.sshd.client.session;
import java.io.OutputStream;
import java.nio.charset.StandardCharsets;
import java.rmi.RemoteException;
import java.rmi.ServerException;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.sshd.client.SshClient;
import org.apache.sshd.client.future.AuthFuture;
import org.apache.sshd.common.AttributeRepository;
import org.apache.sshd.common.AttributeRepository.AttributeKey;
import org.apache.sshd.common.session.Session;
import org.apache.sshd.common.session.SessionListener;
import org.apache.sshd.core.CoreModuleProperties;
import org.apache.sshd.server.SshServer;
import org.apache.sshd.server.auth.keyboard.KeyboardInteractiveAuthenticator;
import org.apache.sshd.server.auth.pubkey.AcceptAllPublickeyAuthenticator;
import org.apache.sshd.util.test.BaseTestSupport;
import org.apache.sshd.util.test.BogusPasswordAuthenticator;
import org.apache.sshd.util.test.CommandExecutionHelper;
import org.apache.sshd.util.test.CoreTestSupportUtils;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.FixMethodOrder;
import org.junit.Test;
import org.junit.runners.MethodSorters;
/**
* @author <a href="mailto:<EMAIL>">Apache MINA SSHD Project</a>
*/
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
public class ClientSessionTest extends BaseTestSupport {
private static SshServer sshd;
private static SshClient client;
private static int port;
public ClientSessionTest() {
super();
}
@BeforeClass
public static void setupClientAndServer() throws Exception {
sshd = CoreTestSupportUtils.setupTestServer(ClientSessionTest.class);
sshd.start();
port = sshd.getPort();
client = CoreTestSupportUtils.setupTestClient(ClientSessionTest.class);
client.start();
}
@AfterClass
public static void tearDownClientAndServer() throws Exception {
if (sshd != null) {
try {
sshd.stop(true);
} finally {
sshd = null;
}
}
if (client != null) {
try {
client.stop();
} finally {
client = null;
}
}
}
@Before
public void setUp() {
sshd.setPasswordAuthenticator(BogusPasswordAuthenticator.INSTANCE);
sshd.setPublickeyAuthenticator(AcceptAllPublickeyAuthenticator.INSTANCE);
sshd.setKeyboardInteractiveAuthenticator(KeyboardInteractiveAuthenticator.NONE);
}
@Test
public void testDefaultExecuteCommandMethod() throws Exception {
String expectedCommand = getCurrentTestName() + "-CMD";
String expectedResponse = getCurrentTestName() + "-RSP";
sshd.setCommandFactory((session, command) -> new CommandExecutionHelper(command) {
private boolean cmdProcessed;
@Override
protected boolean handleCommandLine(String command) throws Exception {
assertEquals("Mismatched incoming command", expectedCommand, command);
assertFalse("Duplicated command call", cmdProcessed);
OutputStream stdout = getOutputStream();
stdout.write(expectedResponse.getBytes(StandardCharsets.US_ASCII));
stdout.flush();
cmdProcessed = true;
return false;
}
});
try (ClientSession session = client.connect(getCurrentTestName(), TEST_LOCALHOST, port)
.verify(CONNECT_TIMEOUT)
.getSession()) {
session.addPasswordIdentity(getCurrentTestName());
session.auth().verify(AUTH_TIMEOUT);
// NOTE !!! The LF is only because we are using a buffered reader on the server end to read the command
String actualResponse = session.executeRemoteCommand(expectedCommand + "\n");
assertEquals("Mismatched command response", expectedResponse, actualResponse);
}
}
@Test
public void testExceptionThrownIfRemoteStderrWrittenTo() throws Exception {
String expectedCommand = getCurrentTestName() + "-CMD";
String expectedErrorMessage = getCurrentTestName() + "-ERR";
sshd.setCommandFactory((session, command) -> new CommandExecutionHelper(command) {
private boolean cmdProcessed;
@Override
protected boolean handleCommandLine(String command) throws Exception {
assertEquals("Mismatched incoming command", expectedCommand, command);
assertFalse("Duplicated command call", cmdProcessed);
OutputStream stderr = getErrorStream();
stderr.write(expectedErrorMessage.getBytes(StandardCharsets.US_ASCII));
stderr.flush();
cmdProcessed = true;
return false;
}
});
String actualErrorMessage = null;
try (ClientSession session = client.connect(getCurrentTestName(), TEST_LOCALHOST, port)
.verify(CONNECT_TIMEOUT)
.getSession()) {
session.addPasswordIdentity(getCurrentTestName());
session.auth().verify(AUTH_TIMEOUT);
// NOTE !!! The LF is only because we are using a buffered reader on the server end to read the command
String response = session.executeRemoteCommand(expectedCommand + "\n");
fail("Unexpected successful response: " + response);
} catch (Exception e) {
if (!(e instanceof RemoteException)) {
throw e;
}
Throwable cause = e.getCause();
if (!(cause instanceof ServerException)) {
throw e;
}
actualErrorMessage = cause.getMessage();
}
assertEquals("Mismatched captured error message", expectedErrorMessage, actualErrorMessage);
}
@Test
public void testExceptionThrownIfNonZeroExitStatus() throws Exception {
String expectedCommand = getCurrentTestName() + "-CMD";
int expectedErrorCode = 7365;
sshd.setCommandFactory((session, command) -> new CommandExecutionHelper(command) {
private boolean cmdProcessed;
@Override
protected void onExit(int exitValue, String exitMessage) {
super.onExit((exitValue == 0) ? expectedErrorCode : exitValue, exitMessage);
}
@Override
protected boolean handleCommandLine(String command) throws Exception {
assertEquals("Mismatched incoming command", expectedCommand, command);
assertFalse("Duplicated command call", cmdProcessed);
OutputStream stdout = getOutputStream();
stdout.write(command.getBytes(StandardCharsets.US_ASCII));
stdout.flush();
cmdProcessed = true;
return false;
}
});
String actualErrorMessage = null;
try (ClientSession session = client.connect(getCurrentTestName(), TEST_LOCALHOST, port)
.verify(CONNECT_TIMEOUT)
.getSession()) {
session.addPasswordIdentity(getCurrentTestName());
session.auth().verify(AUTH_TIMEOUT);
// NOTE !!! The LF is only because we are using a buffered reader on the server end to read the command
String response = session.executeRemoteCommand(expectedCommand + "\n");
fail("Unexpected successful response: " + response);
} catch (Exception e) {
if (!(e instanceof RemoteException)) {
throw e;
}
Throwable cause = e.getCause();
if (!(cause instanceof ServerException)) {
throw e;
}
actualErrorMessage = cause.getMessage();
}
assertEquals("Mismatched captured error code", Integer.toString(expectedErrorCode), actualErrorMessage);
}
@Test // see SSHD-859
public void testConnectionContextPropagation() throws Exception {
AttributeRepository expected = AttributeRepository.ofKeyValuePair(
new AttributeKey<String>(), getCurrentTestName());
AtomicInteger creationCount = new AtomicInteger(0);
SessionListener listener = new SessionListener() {
@Override
public void sessionCreated(Session session) {
AttributeRepository actual = ((ClientSession) session).getConnectionContext();
assertSame("Mismatched connection context", expected, actual);
creationCount.incrementAndGet();
}
};
try {
client.addSessionListener(listener);
try (ClientSession session = client.connect(getCurrentTestName(), TEST_LOCALHOST, port, expected)
.verify(CONNECT_TIMEOUT)
.getSession()) {
session.addPasswordIdentity(<PASSWORD>());
session.auth().verify(AUTH_TIMEOUT);
assertEquals("Session listener invocation count mismatch", 1, creationCount.getAndSet(0));
}
} finally {
client.removeSessionListener(listener);
}
}
@Test // SSHD-1050
public void testAuthGetsNotifiedIfErrorBeforeFirstAuth() throws Exception {
testEarlyErrorAuthAttempts(1);
}
@Test // SSHD-1050
public void testSecondAuthNotifiedAfterEarlyError() throws Exception {
testEarlyErrorAuthAttempts(3);
}
private void testEarlyErrorAuthAttempts(int maxAttempts) throws Exception {
int limit = CoreModuleProperties.MAX_IDENTIFICATION_SIZE.getRequired(sshd);
String line = getClass().getCanonicalName() + "#" + getCurrentTestName();
StringBuilder sb = new StringBuilder(limit + line.length());
while (sb.length() <= limit) {
if (sb.length() > 0) {
sb.append(CoreModuleProperties.SERVER_EXTRA_IDENT_LINES_SEPARATOR);
}
sb.append(line);
}
CoreModuleProperties.SERVER_EXTRA_IDENTIFICATION_LINES.set(sshd, sb.toString());
try (ClientSession session = client.connect(getCurrentTestName(), TEST_LOCALHOST, port)
.verify(CONNECT_TIMEOUT)
.getSession()) {
session.addPasswordIdentity(getCurrentTestName());
// Give time to the client to signal the overflow in server identification
Thread.sleep(AUTH_TIMEOUT.toMillis() / 2L);
for (int index = 1; index <= maxAttempts; index++) {
String authId = "Auth " + index + "/" + maxAttempts;
outputDebugMessage("%s(%s)", getCurrentTestName(), authId);
AuthFuture future = session.auth();
assertTrue(authId + " not completed on time", future.await(AUTH_TIMEOUT));
assertTrue(authId + " has no result", future.isDone());
assertFalse(authId + " unexpected success", future.isSuccess());
assertTrue(authId + " not marked as failed", future.isFailure());
Throwable exception = future.getException();
String message = exception.getMessage();
assertTrue(authId + " invalid exception message: " + message, message.contains("too many header lines"));
}
} finally {
CoreModuleProperties.SERVER_EXTRA_IDENTIFICATION_LINES.set(sshd, null);
}
}
}
| 4,904 |
585 | <reponame>madrob/solr<gh_stars>100-1000
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.handler;
import java.io.IOException;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Date;
import java.util.LinkedList;
import java.util.List;
import java.util.Locale;
import java.util.stream.Collectors;
import org.apache.commons.lang3.StringUtils;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.client.solrj.io.Tuple;
import org.apache.solr.client.solrj.io.stream.ExceptionStream;
import org.apache.solr.client.solrj.io.stream.SolrStream;
import org.apache.solr.client.solrj.io.stream.TupleStream;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.UpdateRequest;
import org.apache.solr.cloud.SolrCloudTestCase;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.params.CommonParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.params.SolrParams;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
@Slow
@SolrTestCaseJ4.SuppressSSL
@LuceneTestCase.SuppressCodecs({"Lucene3x", "Lucene40", "Lucene41", "Lucene42", "Lucene45"})
public class TestSQLHandler extends SolrCloudTestCase {
private static final String COLLECTIONORALIAS = "collection1";
private static final int TIMEOUT = DEFAULT_TIMEOUT;
private static final String id = "id";
private static boolean useAlias;
@BeforeClass
public static void setupCluster() throws Exception {
configureCluster(4)
.addConfig("conf", configset("sql"))
.configure();
String collection;
useAlias = random().nextBoolean();
if (useAlias) {
collection = COLLECTIONORALIAS + "_collection";
} else {
collection = COLLECTIONORALIAS;
}
CollectionAdminRequest.createCollection(collection, "conf", 2, 1)
.setPerReplicaState(SolrCloudTestCase.USE_PER_REPLICA_STATE)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection(collection, 2, 2);
if (useAlias) {
CollectionAdminRequest.createAlias(COLLECTIONORALIAS, collection).process(cluster.getSolrClient());
}
}
public static SolrParams mapParams(String... vals) {
ModifiableSolrParams params = new ModifiableSolrParams();
assertEquals("Parameters passed in here must be in pairs!", 0, (vals.length % 2));
for (int idx = 0; idx < vals.length; idx += 2) {
params.add(vals[idx], vals[idx + 1]);
}
return params;
}
@Before
public void cleanIndex() throws Exception {
new UpdateRequest()
.deleteByQuery("*:*")
.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
}
@Test
public void testBasicSelect() throws Exception {
new UpdateRequest()
.add("id", "1", "text_t", "XXXX XXXX", "str_s", "a", "field_i", "7", "field_f", "7.5", "field_d", "7.5", "field_l", "7")
.add("id", "2", "text_t", "XXXX XXXX", "str_s", "b", "field_i", "8", "field_f", "8.5", "field_d", "8.5", "field_l", "8")
.add("id", "3", "text_t", "XXXX XXXX", "str_s", "a", "field_i", "20", "field_f", "20.5", "field_d", "20.5", "field_l", "20")
.add("id", "4", "text_t", "XXXX XXXX", "str_s", "b", "field_i", "11", "field_f", "11.5", "field_d", "11.5", "field_l", "11")
.add("id", "5", "text_t", "XXXX XXXX", "str_s", "c", "field_i", "30", "field_f", "30.5", "field_d", "30.5", "field_l", "30")
.add("id", "6", "text_t", "XXXX XXXX", "str_s", "c", "field_i", "40", "field_f", "40.5", "field_d", "40.5", "field_l", "40")
.add("id", "7", "text_t", "XXXX XXXX", "str_s", "c", "field_i", "50", "field_f", "50.5", "field_d", "50.5", "field_l", "50")
.add("id", "8", "text_t", "XXXX XXXX", "str_s", "c", "field_i", "60", "field_f", "60.5", "field_d", "60.5", "field_l", "60")
.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
SolrParams sParams = mapParams(CommonParams.QT, "/sql",
"stmt",
"select id, field_i, str_s, field_f, field_d, field_l from collection1 where (text_t='(XXXX)' OR text_t='XXXX') AND text_t='XXXX' order by field_i desc");
String baseUrl = cluster.getJettySolrRunners().get(0).getBaseUrl().toString() + "/" + COLLECTIONORALIAS;
List<Tuple> tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 8);
Tuple tuple;
tuple = tuples.get(0);
assertEquals(tuple.getLong("id").longValue(), 8);
assertEquals(tuple.getLong("field_i").longValue(), 60);
assert (tuple.get("str_s").equals("c"));
assertEquals(tuple.getLong("field_i").longValue(), 60L);
assertEquals(tuple.getDouble("field_f"), 60.5, 0.0);
assertEquals(tuple.getDouble("field_d"), 60.5, 0.0);
assertEquals(tuple.getLong("field_l").longValue(), 60);
tuple = tuples.get(1);
assertEquals(tuple.getLong("id").longValue(), 7);
assertEquals(tuple.getLong("field_i").longValue(), 50);
assert (tuple.get("str_s").equals("c"));
assertEquals(tuple.getLong("field_i").longValue(), 50);
assertEquals(tuple.getDouble("field_f"), 50.5, 0.0);
assertEquals(tuple.getDouble("field_d"), 50.5, 0.0);
assertEquals(tuple.getLong("field_l").longValue(), 50);
tuple = tuples.get(2);
assertEquals(tuple.getLong("id").longValue(), 6);
assertEquals(tuple.getLong("field_i").longValue(), 40);
assert (tuple.get("str_s").equals("c"));
assertEquals(tuple.getLong("field_i").longValue(), 40);
assertEquals(tuple.getDouble("field_f"), 40.5, 0.0);
assertEquals(tuple.getDouble("field_d"), 40.5, 0.0);
assertEquals(tuple.getLong("field_l").longValue(), 40);
tuple = tuples.get(3);
assertEquals(tuple.getLong("id").longValue(), 5);
assertEquals(tuple.getLong("field_i").longValue(), 30);
assert (tuple.get("str_s").equals("c"));
assertEquals(tuple.getLong("field_i").longValue(), 30);
assertEquals(tuple.getDouble("field_f"), 30.5, 0.0);
assertEquals(tuple.getDouble("field_d"), 30.5, 0.0);
assertEquals(tuple.getLong("field_l").longValue(), 30);
tuple = tuples.get(4);
assertEquals(tuple.getLong("id").longValue(), 3);
assertEquals(tuple.getLong("field_i").longValue(), 20);
assert (tuple.get("str_s").equals("a"));
assertEquals(tuple.getLong("field_i").longValue(), 20);
assertEquals(tuple.getDouble("field_f"), 20.5, 0.0);
assertEquals(tuple.getDouble("field_d"), 20.5, 0.0);
assertEquals(tuple.getLong("field_l").longValue(), 20);
tuple = tuples.get(5);
assertEquals(tuple.getLong("id").longValue(), 4);
assertEquals(tuple.getLong("field_i").longValue(), 11);
assert (tuple.get("str_s").equals("b"));
assertEquals(tuple.getLong("field_i").longValue(), 11);
assertEquals(tuple.getDouble("field_f"), 11.5, 0.0);
assertEquals(tuple.getDouble("field_d"), 11.5, 0.0);
assertEquals(tuple.getLong("field_l").longValue(), 11);
tuple = tuples.get(6);
assertEquals(tuple.getLong("id").longValue(), 2);
assertEquals(tuple.getLong("field_i").longValue(), 8);
assert (tuple.get("str_s").equals("b"));
assertEquals(tuple.getLong("field_i").longValue(), 8);
assertEquals(tuple.getDouble("field_f"), 8.5, 0.0);
assertEquals(tuple.getDouble("field_d"), 8.5, 0.0);
assertEquals(tuple.getLong("field_l").longValue(), 8);
tuple = tuples.get(7);
assertEquals(tuple.getLong("id").longValue(), 1);
assertEquals(tuple.getLong("field_i").longValue(), 7);
assert (tuple.get("str_s").equals("a"));
assertEquals(tuple.getLong("field_i").longValue(), 7);
assertEquals(tuple.getDouble("field_f"), 7.5, 0.0);
assertEquals(tuple.getDouble("field_d"), 7.5, 0.0);
assertEquals(tuple.getLong("field_l").longValue(), 7);
// Assert field order
//assertResponseContains(clients.get(0), sParams, "{\"docs\":[{\"id\":\"8\",\"field_i\":60,\"str_s\":\"c\",\"field_i\":60,\"field_f\":60.5,\"field_d\":60.5,\"field_l\":60}");
sParams = mapParams(CommonParams.QT, "/sql", "stmt",
"select id, field_i, str_s from collection1 where text_t='XXXX' order by id desc");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 8);
tuple = tuples.get(0);
assert (tuple.getLong("id") == 8);
assert (tuple.getLong("field_i") == 60);
assert (tuple.get("str_s").equals("c"));
tuple = tuples.get(1);
assert (tuple.getLong("id") == 7);
assert (tuple.getLong("field_i") == 50);
assert (tuple.get("str_s").equals("c"));
tuple = tuples.get(2);
assert (tuple.getLong("id") == 6);
assert (tuple.getLong("field_i") == 40);
assert (tuple.get("str_s").equals("c"));
tuple = tuples.get(3);
assert (tuple.getLong("id") == 5);
assert (tuple.getLong("field_i") == 30);
assert (tuple.get("str_s").equals("c"));
tuple = tuples.get(4);
assert (tuple.getLong("id") == 4);
assert (tuple.getLong("field_i") == 11);
assert (tuple.get("str_s").equals("b"));
tuple = tuples.get(5);
assert (tuple.getLong("id") == 3);
assert (tuple.getLong("field_i") == 20);
assert (tuple.get("str_s").equals("a"));
tuple = tuples.get(6);
assert (tuple.getLong("id") == 2);
assert (tuple.getLong("field_i") == 8);
assert (tuple.get("str_s").equals("b"));
tuple = tuples.get(7);
assert (tuple.getLong("id") == 1);
assert (tuple.getLong("field_i") == 7);
assert (tuple.get("str_s").equals("a"));
sParams = mapParams(CommonParams.QT, "/sql",
"stmt", "select id, field_i, str_s from collection1 where text_t='XXXX' order by field_i desc limit 1");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 1);
tuple = tuples.get(0);
assert (tuple.getLong("id") == 8);
assert (tuple.getLong("field_i") == 60);
assert (tuple.get("str_s").equals("c"));
sParams = mapParams(CommonParams.QT, "/sql", "stmt",
"select id, field_i, str_s from collection1 where text_t='XXXX' AND id='(1 2 3)' order by field_i desc");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 3);
tuple = tuples.get(0);
assert (tuple.getLong("id") == 3);
assert (tuple.getLong("field_i") == 20);
assert (tuple.get("str_s").equals("a"));
tuple = tuples.get(1);
assert (tuple.getLong("id") == 2);
assert (tuple.getLong("field_i") == 8);
assert (tuple.get("str_s").equals("b"));
tuple = tuples.get(2);
assert (tuple.getLong("id") == 1);
assert (tuple.getLong("field_i") == 7);
assert (tuple.get("str_s").equals("a"));
sParams = mapParams(CommonParams.QT, "/sql",
"stmt",
"select id as myId, field_i as myInt, str_s as myString from collection1 where text_t='XXXX' AND id='(1 2 3)' order by myInt desc");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 3);
tuple = tuples.get(0);
assert (tuple.getLong("myId") == 3);
assert (tuple.getLong("myInt") == 20);
assert (tuple.get("myString").equals("a"));
tuple = tuples.get(1);
assert (tuple.getLong("myId") == 2);
assert (tuple.getLong("myInt") == 8);
assert (tuple.get("myString").equals("b"));
tuple = tuples.get(2);
assert (tuple.getLong("myId") == 1);
assert (tuple.getLong("myInt") == 7);
assert (tuple.get("myString").equals("a"));
sParams = mapParams(CommonParams.QT, "/sql",
"stmt",
"select id as myId, field_i as myInt, str_s as myString from collection1 where text_t='XXXX' AND id='(1 2 3)' order by field_i desc");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 3);
tuple = tuples.get(0);
assert (tuple.getLong("myId") == 3);
assert (tuple.getLong("myInt") == 20);
assert (tuple.get("myString").equals("a"));
tuple = tuples.get(1);
assert (tuple.getLong("myId") == 2);
assert (tuple.getLong("myInt") == 8);
assert (tuple.get("myString").equals("b"));
tuple = tuples.get(2);
assert (tuple.getLong("myId") == 1);
assert (tuple.getLong("myInt") == 7);
assert (tuple.get("myString").equals("a"));
// SOLR-8845 - Test to make sure that 1 = 0 works for things like Spark SQL
sParams = mapParams(CommonParams.QT, "/sql",
"stmt", "select id, field_i, str_s from collection1 where 1 = 0");
tuples = getTuples(sParams, baseUrl);
assertEquals(0, tuples.size());
}
@Test
public void testWhere() throws Exception {
new UpdateRequest()
.add("id", "1", "text_t", "XXXX XXXX", "str_s", "a", "field_i", "7")
.add("id", "2", "text_t", "XXXX XXXX", "str_s", "b", "field_i", "8")
.add("id", "3", "text_t", "XXXX XXXX", "str_s", "a", "field_i", "20")
.add("id", "4", "text_t", "XXXX XXXX", "str_s", "b", "field_i", "11")
.add("id", "5", "text_t", "XXXX XXXX", "str_s", "c", "field_i", "30", "specialchars_s", "witha|pipe")
.add("id", "6", "text_t", "XXXX XXXX", "str_s", "c", "field_i", "40", "specialchars_s", "witha\\slash")
.add("id", "7", "text_t", "XXXX XXXX", "str_s", "c", "field_i", "50", "specialchars_s", "witha!bang")
.add("id", "8", "text_t", "XXXX XXXX", "str_s", "c", "field_i", "60", "specialchars_s", "witha\"quote")
.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
String baseUrl = cluster.getJettySolrRunners().get(0).getBaseUrl().toString() + "/" + COLLECTIONORALIAS;
// Equals
SolrParams sParams = mapParams(CommonParams.QT, "/sql",
"stmt", "select id from collection1 where id = 1 order by id asc");
List<Tuple> tuples = getTuples(sParams, baseUrl);
assertEquals(1, tuples.size());
Tuple tuple = tuples.get(0);
assertEquals("1", tuple.get("id"));
// Not Equals <>
sParams = mapParams(CommonParams.QT, "/sql",
"stmt", "select id from collection1 where id <> 1 order by id asc limit 10");
tuples = getTuples(sParams, baseUrl);
assertEquals(7, tuples.size());
tuple = tuples.get(0);
assertEquals("2", tuple.get("id"));
tuple = tuples.get(1);
assertEquals("3", tuple.get("id"));
tuple = tuples.get(2);
assertEquals("4", tuple.get("id"));
tuple = tuples.get(3);
assertEquals("5", tuple.get("id"));
tuple = tuples.get(4);
assertEquals("6", tuple.get("id"));
tuple = tuples.get(5);
assertEquals("7", tuple.get("id"));
tuple = tuples.get(6);
assertEquals("8", tuple.get("id"));
// TODO requires different Calcite SQL conformance level
// Not Equals !=
// sParams = mapParams(CommonParams.QT, "/sql",
// "stmt", "select id from collection1 where id != 1 order by id asc limit 10");
//
// tuples = getTuples(sParams);
//
// assertEquals(7, tuples.size());
//
// tuple = tuples.get(0);
// assertEquals(2L, tuple.get("id"));
// tuple = tuples.get(1);
// assertEquals(3L, tuple.get("id"));
// tuple = tuples.get(2);
// assertEquals(4L, tuple.get("id"));
// tuple = tuples.get(3);
// assertEquals(5L, tuple.get("id"));
// tuple = tuples.get(4);
// assertEquals(6L, tuple.get("id"));
// tuple = tuples.get(5);
// assertEquals(7L, tuple.get("id"));
// tuple = tuples.get(6);
// assertEquals(8L, tuple.get("id"));
// Less than
sParams = mapParams(CommonParams.QT, "/sql",
"stmt", "select id from collection1 where id < 2 order by id asc");
tuples = getTuples(sParams, baseUrl);
assertEquals(1, tuples.size());
tuple = tuples.get(0);
assertEquals("1", tuple.get("id"));
// Less than equal
sParams = mapParams(CommonParams.QT, "/sql",
"stmt", "select id from collection1 where id <= 2 order by id asc");
tuples = getTuples(sParams, baseUrl);
assertEquals(2, tuples.size());
tuple = tuples.get(0);
assertEquals("1", tuple.get("id"));
tuple = tuples.get(1);
assertEquals("2", tuple.get("id"));
// Greater than
sParams = mapParams(CommonParams.QT, "/sql",
"stmt", "select id from collection1 where id > 7 order by id asc");
tuples = getTuples(sParams, baseUrl);
assertEquals(1, tuples.size());
tuple = tuples.get(0);
assertEquals("8", tuple.get("id"));
// Greater than equal
sParams = mapParams(CommonParams.QT, "/sql",
"stmt", "select id from collection1 where id >= 7 order by id asc");
tuples = getTuples(sParams, baseUrl);
assertEquals(2, tuples.size());
tuple = tuples.get(0);
assertEquals("7", tuple.get("id"));
tuple = tuples.get(1);
assertEquals("8", tuple.get("id"));
expectResults("SELECT id FROM $ALIAS WHERE str_s = 'a'", 2);
expectResults("SELECT id FROM $ALIAS WHERE 'a' = str_s", 2);
expectResults("SELECT id FROM $ALIAS WHERE str_s <> 'c'", 4);
expectResults("SELECT id FROM $ALIAS WHERE 'c' <> str_s", 4);
expectResults("SELECT id FROM $ALIAS WHERE specialchars_s = 'witha\"quote'", 1);
expectResults("SELECT id FROM $ALIAS WHERE specialchars_s = 'witha|pipe'", 1);
expectResults("SELECT id FROM $ALIAS WHERE specialchars_s LIKE 'with%'", 4);
expectResults("SELECT id FROM $ALIAS WHERE specialchars_s LIKE 'witha|%'", 1);
}
@Test
public void testMixedCaseFields() throws Exception {
new UpdateRequest()
.add("id", "1", "Text_t", "XXXX XXXX", "Str_s", "a", "Field_i", "7")
.add("id", "2", "Text_t", "XXXX XXXX", "Str_s", "b", "Field_i", "8")
.add("id", "3", "Text_t", "XXXX XXXX", "Str_s", "a", "Field_i", "20")
.add("id", "4", "Text_t", "XXXX XXXX", "Str_s", "b", "Field_i", "11")
.add("id", "5", "Text_t", "XXXX XXXX", "Str_s", "c", "Field_i", "30")
.add("id", "6", "Text_t", "XXXX XXXX", "Str_s", "c", "Field_i", "40")
.add("id", "7", "Text_t", "XXXX XXXX", "Str_s", "c", "Field_i", "50")
.add("id", "8", "Text_t", "XXXX XXXX", "Str_s", "c", "Field_i", "60")
.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
String baseUrl = cluster.getJettySolrRunners().get(0).getBaseUrl().toString() + "/" + COLLECTIONORALIAS;
SolrParams sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce",
"stmt", "select id, Field_i, Str_s from collection1 where Text_t='XXXX' order by Field_i desc");
List<Tuple> tuples = getTuples(sParams, baseUrl);
assertEquals(tuples.toString(), 8, tuples.size());
Tuple tuple;
tuple = tuples.get(0);
assert (tuple.getLong("id") == 8);
assert (tuple.getLong("Field_i") == 60);
assert (tuple.get("Str_s").equals("c"));
tuple = tuples.get(1);
assert (tuple.getLong("id") == 7);
assert (tuple.getLong("Field_i") == 50);
assert (tuple.get("Str_s").equals("c"));
tuple = tuples.get(2);
assert (tuple.getLong("id") == 6);
assert (tuple.getLong("Field_i") == 40);
assert (tuple.get("Str_s").equals("c"));
tuple = tuples.get(3);
assert (tuple.getLong("id") == 5);
assert (tuple.getLong("Field_i") == 30);
assert (tuple.get("Str_s").equals("c"));
tuple = tuples.get(4);
assert (tuple.getLong("id") == 3);
assert (tuple.getLong("Field_i") == 20);
assert (tuple.get("Str_s").equals("a"));
tuple = tuples.get(5);
assert (tuple.getLong("id") == 4);
assert (tuple.getLong("Field_i") == 11);
assert (tuple.get("Str_s").equals("b"));
tuple = tuples.get(6);
assert (tuple.getLong("id") == 2);
assert (tuple.getLong("Field_i") == 8);
assert (tuple.get("Str_s").equals("b"));
tuple = tuples.get(7);
assert (tuple.getLong("id") == 1);
assert (tuple.getLong("Field_i") == 7);
assert (tuple.get("Str_s").equals("a"));
// TODO get sum(Field_i) as named one
sParams = mapParams(CommonParams.QT, "/sql",
"stmt",
"select Str_s, sum(Field_i) from collection1 where id='(1 8)' group by Str_s having (sum(Field_i) = 7 OR sum(Field_i) = 60) order by sum(Field_i) desc");
tuples = getTuples(sParams, baseUrl);
assertEquals(tuples.toString(), 2, tuples.size());
tuple = tuples.get(0);
assert (tuple.get("Str_s").equals("c"));
assert (tuple.getDouble("EXPR$1") == 60);
tuple = tuples.get(1);
assert (tuple.get("Str_s").equals("a"));
assert (tuple.getDouble("EXPR$1") == 7);
sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce",
"stmt",
"select Str_s, sum(Field_i) from collection1 where id='(1 8)' group by Str_s having (sum(Field_i) = 7 OR sum(Field_i) = 60) order by sum(Field_i) desc");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 2);
tuple = tuples.get(0);
assert (tuple.get("Str_s").equals("c"));
assert (tuple.getDouble("EXPR$1") == 60);
tuple = tuples.get(1);
assert (tuple.get("Str_s").equals("a"));
assert (tuple.getDouble("EXPR$1") == 7);
}
@Test
public void testSelectDistinctFacets() throws Exception {
new UpdateRequest()
.add("id", "1", "text_t", "XXXX XXXX", "str_s", "a", "field_i", "1")
.add("id", "2", "text_t", "XXXX XXXX", "str_s", "b", "field_i", "2")
.add("id", "3", "text_t", "XXXX XXXX", "str_s", "a", "field_i", "20")
.add("id", "4", "text_t", "XXXX XXXX", "str_s", "b", "field_i", "2")
.add("id", "5", "text_t", "XXXX XXXX", "str_s", "c", "field_i", "30")
.add("id", "6", "text_t", "XXXX XXXX", "str_s", "c", "field_i", "30")
.add("id", "7", "text_t", "XXXX XXXX", "str_s", "c", "field_i", "50")
.add("id", "8", "text_t", "XXXX XXXX", "str_s", "c", "field_i", "60")
.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
String baseUrl = cluster.getJettySolrRunners().get(0).getBaseUrl().toString() + "/" + COLLECTIONORALIAS;
SolrParams sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "facet",
"stmt", "select distinct str_s, field_i from collection1 order by str_s asc, field_i asc");
List<Tuple> tuples = getTuples(sParams, baseUrl);
// assert(false);
assert (tuples.size() == 6);
Tuple tuple;
tuple = tuples.get(0);
assert (tuple.get("str_s").equals("a"));
assert (tuple.getLong("field_i") == 1);
tuple = tuples.get(1);
assert (tuple.get("str_s").equals("a"));
assert (tuple.getLong("field_i") == 20);
tuple = tuples.get(2);
assert (tuple.get("str_s").equals("b"));
assert (tuple.getLong("field_i") == 2);
tuple = tuples.get(3);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 30);
tuple = tuples.get(4);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 50);
tuple = tuples.get(5);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 60);
// reverse the sort
sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "facet",
"stmt", "select distinct str_s, field_i from collection1 order by str_s desc, field_i desc");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 6);
tuple = tuples.get(0);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 60);
tuple = tuples.get(1);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 50);
tuple = tuples.get(2);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 30);
tuple = tuples.get(3);
assert (tuple.get("str_s").equals("b"));
assert (tuple.getLong("field_i") == 2);
tuple = tuples.get(4);
assert (tuple.get("str_s").equals("a"));
assert (tuple.getLong("field_i") == 20);
tuple = tuples.get(5);
assert (tuple.get("str_s").equals("a"));
assert (tuple.getLong("field_i") == 1);
// reverse the sort
sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "facet",
"stmt", "select distinct str_s as myString, field_i as myInt from collection1 order by str_s desc, myInt desc");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 6);
tuple = tuples.get(0);
assert (tuple.get("myString").equals("c"));
assert (tuple.getLong("myInt") == 60);
tuple = tuples.get(1);
assert (tuple.get("myString").equals("c"));
assert (tuple.getLong("myInt") == 50);
tuple = tuples.get(2);
assert (tuple.get("myString").equals("c"));
assert (tuple.getLong("myInt") == 30);
tuple = tuples.get(3);
assert (tuple.get("myString").equals("b"));
assert (tuple.getLong("myInt") == 2);
tuple = tuples.get(4);
assert (tuple.get("myString").equals("a"));
assert (tuple.getLong("myInt") == 20);
tuple = tuples.get(5);
assert (tuple.get("myString").equals("a"));
assert (tuple.getLong("myInt") == 1);
// test with limit
sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "facet",
"stmt", "select distinct str_s, field_i from collection1 order by str_s desc, field_i desc limit 2");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 2);
tuple = tuples.get(0);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 60);
tuple = tuples.get(1);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 50);
// Test without a sort. Sort should be asc by default.
sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "facet",
"stmt", "select distinct str_s, field_i from collection1");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 6);
tuple = tuples.get(0);
assert (tuple.get("str_s").equals("a"));
assert (tuple.getLong("field_i") == 1);
tuple = tuples.get(1);
assert (tuple.get("str_s").equals("a"));
assert (tuple.getLong("field_i") == 20);
tuple = tuples.get(2);
assert (tuple.get("str_s").equals("b"));
assert (tuple.getLong("field_i") == 2);
tuple = tuples.get(3);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 30);
tuple = tuples.get(4);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 50);
tuple = tuples.get(5);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 60);
// Test with a predicate.
sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "facet",
"stmt", "select distinct str_s, field_i from collection1 where str_s = 'a'");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 2);
tuple = tuples.get(0);
assert (tuple.get("str_s").equals("a"));
assert (tuple.getLong("field_i") == 1);
tuple = tuples.get(1);
assert (tuple.get("str_s").equals("a"));
assert (tuple.getLong("field_i") == 20);
}
@Test
public void testSelectDistinct() throws Exception {
new UpdateRequest()
.add("id", "1", "text_t", "XXXX XXXX", "str_s", "a", "field_i", "1")
.add("id", "2", "text_t", "XXXX XXXX", "str_s", "b", "field_i", "2")
.add("id", "3", "text_t", "XXXX XXXX", "str_s", "a", "field_i", "20")
.add("id", "4", "text_t", "XXXX XXXX", "str_s", "b", "field_i", "2")
.add("id", "5", "text_t", "XXXX XXXX", "str_s", "c", "field_i", "30")
.add("id", "6", "text_t", "XXXX XXXX", "str_s", "c", "field_i", "30")
.add("id", "7", "text_t", "XXXX XXXX", "str_s", "c", "field_i", "50")
.add("id", "8", "text_t", "XXXX XXXX", "str_s", "c", "field_i", "60")
.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
String baseUrl = cluster.getJettySolrRunners().get(0).getBaseUrl().toString() + "/" + COLLECTIONORALIAS;
SolrParams sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce",
"stmt", "select distinct str_s, field_i from collection1 order by str_s asc, field_i asc");
List<Tuple> tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 6);
Tuple tuple = tuples.get(0);
assert (tuple.get("str_s").equals("a"));
assert (tuple.getLong("field_i") == 1);
tuple = tuples.get(1);
assert (tuple.get("str_s").equals("a"));
assert (tuple.getLong("field_i") == 20);
tuple = tuples.get(2);
assert (tuple.get("str_s").equals("b"));
assert (tuple.getLong("field_i") == 2);
tuple = tuples.get(3);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 30);
tuple = tuples.get(4);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 50);
tuple = tuples.get(5);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 60);
// reverse the sort
sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce",
"stmt", "select distinct str_s, field_i from collection1 order by str_s desc, field_i desc");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 6);
tuple = tuples.get(0);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 60);
tuple = tuples.get(1);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 50);
tuple = tuples.get(2);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 30);
tuple = tuples.get(3);
assert (tuple.get("str_s").equals("b"));
assert (tuple.getLong("field_i") == 2);
tuple = tuples.get(4);
assert (tuple.get("str_s").equals("a"));
assert (tuple.getLong("field_i") == 20);
tuple = tuples.get(5);
assert (tuple.get("str_s").equals("a"));
assert (tuple.getLong("field_i") == 1);
sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce",
"stmt", "select distinct str_s as myString, field_i from collection1 order by myString desc, field_i desc");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 6);
tuple = tuples.get(0);
assert (tuple.get("myString").equals("c"));
assert (tuple.getLong("field_i") == 60);
tuple = tuples.get(1);
assert (tuple.get("myString").equals("c"));
assert (tuple.getLong("field_i") == 50);
tuple = tuples.get(2);
assert (tuple.get("myString").equals("c"));
assert (tuple.getLong("field_i") == 30);
tuple = tuples.get(3);
assert (tuple.get("myString").equals("b"));
assert (tuple.getLong("field_i") == 2);
tuple = tuples.get(4);
assert (tuple.get("myString").equals("a"));
assert (tuple.getLong("field_i") == 20);
tuple = tuples.get(5);
assert (tuple.get("myString").equals("a"));
assert (tuple.getLong("field_i") == 1);
// test with limit
sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce",
"stmt", "select distinct str_s, field_i from collection1 order by str_s desc, field_i desc limit 2");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 2);
tuple = tuples.get(0);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 60);
tuple = tuples.get(1);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 50);
// Test without a sort. Sort should be asc by default.
sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce",
"stmt", "select distinct str_s, field_i from collection1");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 6);
tuple = tuples.get(0);
assert (tuple.get("str_s").equals("a"));
assert (tuple.getLong("field_i") == 1);
tuple = tuples.get(1);
assert (tuple.get("str_s").equals("a"));
assert (tuple.getLong("field_i") == 20);
tuple = tuples.get(2);
assert (tuple.get("str_s").equals("b"));
assert (tuple.getLong("field_i") == 2);
tuple = tuples.get(3);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 30);
tuple = tuples.get(4);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 50);
tuple = tuples.get(5);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 60);
// Test with a predicate.
sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce",
"stmt", "select distinct str_s, field_i from collection1 where str_s = 'a'");
tuples = getTuples(sParams, baseUrl);
Assert.assertEquals (tuples.toString(), 2, tuples.size());
tuple = tuples.get(0);
assert (tuple.get("str_s").equals("a"));
assert (tuple.getLong("field_i") == 1);
tuple = tuples.get(1);
assert (tuple.get("str_s").equals("a"));
assert (tuple.getLong("field_i") == 20);
}
@Test
public void testParallelSelectDistinct() throws Exception {
new UpdateRequest()
.add("id", "1", "text_t", "XXXX XXXX", "str_s", "a", "field_i", "1")
.add("id", "2", "text_t", "XXXX XXXX", "str_s", "b", "field_i", "2")
.add("id", "3", "text_t", "XXXX XXXX", "str_s", "a", "field_i", "20")
.add("id", "4", "text_t", "XXXX XXXX", "str_s", "b", "field_i", "2")
.add("id", "5", "text_t", "XXXX XXXX", "str_s", "c", "field_i", "30")
.add("id", "6", "text_t", "XXXX XXXX", "str_s", "c", "field_i", "30")
.add("id", "7", "text_t", "XXXX XXXX", "str_s", "c", "field_i", "50")
.add("id", "8", "text_t", "XXXX XXXX", "str_s", "c", "field_i", "60")
.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
String baseUrl = cluster.getJettySolrRunners().get(0).getBaseUrl().toString() + "/" + COLLECTIONORALIAS;
SolrParams sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce",
"stmt", "select distinct str_s, field_i from collection1 order by str_s asc, field_i asc");
List<Tuple> tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 6);
Tuple tuple;
tuple = tuples.get(0);
assert (tuple.get("str_s").equals("a"));
assert (tuple.getLong("field_i") == 1);
tuple = tuples.get(1);
assert (tuple.get("str_s").equals("a"));
assert (tuple.getLong("field_i") == 20);
tuple = tuples.get(2);
assert (tuple.get("str_s").equals("b"));
assert (tuple.getLong("field_i") == 2);
tuple = tuples.get(3);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 30);
tuple = tuples.get(4);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 50);
tuple = tuples.get(5);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 60);
// reverse the sort
sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce",
"stmt", "select distinct str_s, field_i from collection1 order by str_s desc, field_i desc");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 6);
tuple = tuples.get(0);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 60);
tuple = tuples.get(1);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 50);
tuple = tuples.get(2);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 30);
tuple = tuples.get(3);
assert (tuple.get("str_s").equals("b"));
assert (tuple.getLong("field_i") == 2);
tuple = tuples.get(4);
assert (tuple.get("str_s").equals("a"));
assert (tuple.getLong("field_i") == 20);
tuple = tuples.get(5);
assert (tuple.get("str_s").equals("a"));
assert (tuple.getLong("field_i") == 1);
// reverse the sort
sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce",
"stmt", "select distinct str_s as myString, field_i from collection1 order by myString desc, field_i desc");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 6);
tuple = tuples.get(0);
assert (tuple.get("myString").equals("c"));
assert (tuple.getLong("field_i") == 60);
tuple = tuples.get(1);
assert (tuple.get("myString").equals("c"));
assert (tuple.getLong("field_i") == 50);
tuple = tuples.get(2);
assert (tuple.get("myString").equals("c"));
assert (tuple.getLong("field_i") == 30);
tuple = tuples.get(3);
assert (tuple.get("myString").equals("b"));
assert (tuple.getLong("field_i") == 2);
tuple = tuples.get(4);
assert (tuple.get("myString").equals("a"));
assert (tuple.getLong("field_i") == 20);
tuple = tuples.get(5);
assert (tuple.get("myString").equals("a"));
assert (tuple.getLong("field_i") == 1);
// test with limit
sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce",
"stmt", "select distinct str_s, field_i from collection1 order by str_s desc, field_i desc limit 2");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 2);
tuple = tuples.get(0);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 60);
tuple = tuples.get(1);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 50);
// Test without a sort. Sort should be asc by default.
sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce",
"stmt", "select distinct str_s, field_i from collection1");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 6);
tuple = tuples.get(0);
assert (tuple.get("str_s").equals("a"));
assert (tuple.getLong("field_i") == 1);
tuple = tuples.get(1);
assert (tuple.get("str_s").equals("a"));
assert (tuple.getLong("field_i") == 20);
tuple = tuples.get(2);
assert (tuple.get("str_s").equals("b"));
assert (tuple.getLong("field_i") == 2);
tuple = tuples.get(3);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 30);
tuple = tuples.get(4);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 50);
tuple = tuples.get(5);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getLong("field_i") == 60);
// Test with a predicate.
sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce",
"stmt", "select distinct str_s, field_i from collection1 where str_s = 'a'");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 2);
tuple = tuples.get(0);
assert (tuple.get("str_s").equals("a"));
assert (tuple.getLong("field_i") == 1);
tuple = tuples.get(1);
assert (tuple.get("str_s").equals("a"));
assert (tuple.getLong("field_i") == 20);
}
@Test
public void testBasicGroupingFacets() throws Exception {
new UpdateRequest()
.add("id", "1", "text_t", "XXXX XXXX", "str_s", "a", "field_i", "7")
.add("id", "2", "text_t", "XXXX XXXX", "str_s", "b", "field_i", "8")
.add("id", "3", "text_t", "XXXX XXXX", "str_s", "a", "field_i", "20")
.add("id", "4", "text_t", "XXXX XXXX", "str_s", "b", "field_i", "11")
.add("id", "5", "text_t", "XXXX XXXX", "str_s", "c", "field_i", "30")
.add("id", "6", "text_t", "XXXX XXXX", "str_s", "c", "field_i", "40")
.add("id", "7", "text_t", "XXXX XXXX", "str_s", "c", "field_i", "50")
.add("id", "8", "text_t", "XXXX XXXX", "str_s", "c", "field_i", "60")
.add("id", "9", "text_t", "XXXX XXXY", "str_s", "d", "field_i", "70")
.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
String baseUrl = cluster.getJettySolrRunners().get(0).getBaseUrl().toString() + "/" + COLLECTIONORALIAS;
SolrParams sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "facet",
"stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), " +
"cast(avg(1.0 * field_i) as float) from collection1 where text_t='XXXX' group by str_s " +
"order by sum(field_i) asc limit 2");
List<Tuple> tuples = getTuples(sParams, baseUrl);
// Only two results because of the limit.
assert (tuples.size() == 2);
Tuple tuple;
tuple = tuples.get(0);
assert (tuple.get("str_s").equals("b"));
assert (tuple.getDouble("EXPR$1") == 2); // count(*)
assert (tuple.getDouble("EXPR$2") == 19); // sum(field_i)
assert (tuple.getDouble("EXPR$3") == 8); // min(field_i)
assert (tuple.getDouble("EXPR$4") == 11); // max(field_i)
assert (tuple.getDouble("EXPR$5") == 9.5D); // avg(field_i)
tuple = tuples.get(1);
assert (tuple.get("str_s").equals("a"));
assert (tuple.getDouble("EXPR$1") == 2); // count(*)
assert (tuple.getDouble("EXPR$2") == 27); // sum(field_i)
assert (tuple.getDouble("EXPR$3") == 7); // min(field_i)
assert (tuple.getDouble("EXPR$4") == 20); // max(field_i)
assert (tuple.getDouble("EXPR$5") == 13.5D); // avg(field_i)
sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "facet",
"stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), " +
"avg(field_i) from collection1 where text_t='XXXX' group by str_s " +
"order by sum(field_i) asc limit 2");
tuples = getTuples(sParams, baseUrl);
// Only two results because of the limit.
assert (tuples.size() == 2);
tuple = tuples.get(0);
assert (tuple.get("str_s").equals("b"));
assert (tuple.getDouble("EXPR$1") == 2); // count(*)
assert (tuple.getDouble("EXPR$2") == 19); // sum(field_i)
assert (tuple.getDouble("EXPR$3") == 8); // min(field_i)
assert (tuple.getDouble("EXPR$4") == 11); // max(field_i)
assert (tuple.getDouble("EXPR$5") == 10); // avg(field_i)
tuple = tuples.get(1);
assert (tuple.get("str_s").equals("a"));
assert (tuple.getDouble("EXPR$1") == 2); // count(*)
assert (tuple.getDouble("EXPR$2") == 27); // sum(field_i)
assert (tuple.getDouble("EXPR$3") == 7); // min(field_i)
assert (tuple.getDouble("EXPR$4") == 20); // max(field_i)
assert (tuple.getDouble("EXPR$5") == 14); // avg(field_i)
sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "facet",
"stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), "
+ "cast(avg(1.0 * field_i) as float) from collection1 where (text_t='XXXX' AND NOT (text_t='XXXY')) "
+ "group by str_s order by str_s desc");
tuples = getTuples(sParams, baseUrl);
// The sort by and order by match and no limit is applied. All the Tuples should be returned in
// this scenario.
assertEquals(tuples.toString(), 3, tuples.size());
tuple = tuples.get(0);
assert (tuple.get("str_s").equals("c"));
assert (tuple.getDouble("EXPR$1") == 4); // count(*)
assert (tuple.getDouble("EXPR$2") == 180); // sum(field_i)
assert (tuple.getDouble("EXPR$3") == 30); // min(field_i)
assert (tuple.getDouble("EXPR$4") == 60); // max(field_i)
assert (tuple.getDouble("EXPR$5") == 45); // avg(field_i)
tuple = tuples.get(1);
assert (tuple.get("str_s").equals("b"));
assert (tuple.getDouble("EXPR$1") == 2); // count(*)
assert (tuple.getDouble("EXPR$2") == 19); // sum(field_i)
assert (tuple.getDouble("EXPR$3") == 8); // min(field_i)
assert (tuple.getDouble("EXPR$4") == 11); // max(field_i)
assert (tuple.getDouble("EXPR$5") == 9.5D); // avg(field_i)
tuple = tuples.get(2);
assert (tuple.get("str_s").equals("a"));
assert (tuple.getDouble("EXPR$1") == 2); // count(*)
assert (tuple.getDouble("EXPR$2") == 27); // sum(field_i)
assert (tuple.getDouble("EXPR$3") == 7); // min(field_i)
assert (tuple.getDouble("EXPR$4") == 20); // max(field_i)
assert (tuple.getDouble("EXPR$5") == 13.5D); // avg(field_i)
sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "facet",
"stmt", "select str_s as myString, count(*), sum(field_i) as mySum, min(field_i), max(field_i), "
+ "cast(avg(1.0 * field_i) as float) from collection1 where (text_t='XXXX' AND NOT (text_t='XXXY')) "
+ "group by str_s order by myString desc");
tuples = getTuples(sParams, baseUrl);
// The sort by and order by match and no limit is applied. All the Tuples should be returned in
// this scenario.
assert (tuples.size() == 3);
tuple = tuples.get(0);
assert (tuple.get("myString").equals("c"));
assert (tuple.getDouble("EXPR$1") == 4); // count(*)
assert (tuple.getDouble("mySum") == 180);
assert (tuple.getDouble("EXPR$3") == 30); // min(field_i)
assert (tuple.getDouble("EXPR$4") == 60); // max(field_i)
assert (tuple.getDouble("EXPR$5") == 45); // avg(field_i)
tuple = tuples.get(1);
assert (tuple.get("myString").equals("b"));
assert (tuple.getDouble("EXPR$1") == 2); // count(*)
assert (tuple.getDouble("mySum") == 19);
assert (tuple.getDouble("EXPR$3") == 8); // min(field_i)
assert (tuple.getDouble("EXPR$4") == 11); // max(field_i)
assert (tuple.getDouble("EXPR$5") == 9.5D); // avg(field_i)
tuple = tuples.get(2);
assert (tuple.get("myString").equals("a"));
assert (tuple.getDouble("EXPR$1") == 2); // count(*)
assert (tuple.getDouble("mySum") == 27);
assert (tuple.getDouble("EXPR$3") == 7); // min(field_i)
assert (tuple.getDouble("EXPR$4") == 20); // max(field_i)
assert (tuple.getDouble("EXPR$5") == 13.5D); // avg(field_i)
sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "facet",
"stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), " +
"cast(avg(1.0 * field_i) as float) from collection1 where text_t='XXXX' group by str_s having sum(field_i) = 19");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 1);
tuple = tuples.get(0);
assert (tuple.get("str_s").equals("b"));
assert (tuple.getDouble("EXPR$1") == 2); // count(*)
assert (tuple.getDouble("EXPR$2") == 19); // sum(field_i)
assert (tuple.getDouble("EXPR$3") == 8); // min(field_i)
assert (tuple.getDouble("EXPR$4") == 11); // max(field_i)
assert (tuple.getDouble("EXPR$5") == 9.5D); // avg(field_i)
sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "facet",
"stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), " +
"cast(avg(1.0 * field_i) as float) from collection1 where text_t='XXXX' group by str_s " +
"having ((sum(field_i) = 19) AND (min(field_i) = 8))");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 1);
tuple = tuples.get(0);
assert (tuple.get("str_s").equals("b"));
assert (tuple.getDouble("EXPR$1") == 2); // count(*)
assert (tuple.getDouble("EXPR$2") == 19); // sum(field_i)
assert (tuple.getDouble("EXPR$3") == 8); // min(field_i)
assert (tuple.getDouble("EXPR$4") == 11); // max(field_i)
assert (tuple.getDouble("EXPR$5") == 9.5D); // avg(field_i)
sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "facet",
"stmt", "select str_s, count(*), sum(field_i) as mySum, min(field_i), max(field_i), " +
"cast(avg(1.0 * field_i) as float) from collection1 where text_t='XXXX' group by str_s " +
"having ((sum(field_i) = 19) AND (min(field_i) = 8))");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 1);
tuple = tuples.get(0);
assert (tuple.get("str_s").equals("b"));
assert (tuple.getDouble("EXPR$1") == 2); // count(*)
assert (tuple.getDouble("mySum") == 19);
assert (tuple.getDouble("EXPR$3") == 8); // min(field_i)
assert (tuple.getDouble("EXPR$4") == 11); // max(field_i)
assert (tuple.getDouble("EXPR$5") == 9.5D); // avg(field_i)
sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "facet",
"stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), " +
"cast(avg(1.0 * field_i) as float) from collection1 where text_t='XXXX' group by str_s " +
"having ((sum(field_i) = 19) AND (min(field_i) = 100))");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 0);
}
@Test
public void testAggregatesWithoutGrouping() throws Exception {
new UpdateRequest()
.add(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "1")
.add(id, "2", "a_s", "hello0", "a_i", "2", "a_f", "2")
.add(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3")
.add(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4")
.add(id, "1", "a_s", "hello0", "a_i", "1", "a_f", "5")
.add(id, "5", "a_s", "hello3", "a_i", "10", "a_f", "6")
.add(id, "6", "a_s", "hello4", "a_i", "11", "a_f", "7")
.add(id, "7", "a_s", "hello3", "a_i", "12", "a_f", "8")
.add(id, "8", "a_s", "hello3", "a_i", "13", "a_f", "9")
.add(id, "9", "a_s", "hello0", "a_i", "14", "a_f", "10")
.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
String baseUrl = cluster.getJettySolrRunners().get(0).getBaseUrl().toString() + "/" + COLLECTIONORALIAS;
SolrParams sParams = mapParams(CommonParams.QT, "/sql", "stmt",
"select count(*), sum(a_i), min(a_i), max(a_i), cast(avg(1.0 * a_i) as float), sum(a_f), " +
"min(a_f), max(a_f), avg(a_f) from collection1");
List<Tuple> tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 1);
// Test Long and Double Sums
Tuple tuple = tuples.get(0);
Double count = tuple.getDouble("EXPR$0"); // count(*)
Double sumi = tuple.getDouble("EXPR$1"); // sum(a_i)
Double mini = tuple.getDouble("EXPR$2"); // min(a_i)
Double maxi = tuple.getDouble("EXPR$3"); // max(a_i)
Double avgi = tuple.getDouble("EXPR$4"); // avg(a_i)
Double sumf = tuple.getDouble("EXPR$5"); // sum(a_f)
Double minf = tuple.getDouble("EXPR$6"); // min(a_f)
Double maxf = tuple.getDouble("EXPR$7"); // max(a_f)
Double avgf = tuple.getDouble("EXPR$8"); // avg(a_f)
assertTrue(count == 10);
assertTrue(sumi == 70);
assertTrue(mini == 0.0D);
assertTrue(maxi == 14.0D);
assertTrue(avgi == 7.0D);
assertTrue(sumf == 55.0D);
assertTrue(minf == 1.0D);
assertTrue(maxf == 10.0D);
assertTrue(avgf == 5.5D);
sParams = mapParams(CommonParams.QT, "/sql",
"stmt", "select count(*) as myCount, sum(a_i) as mySum, min(a_i) as myMin, max(a_i) as myMax, " +
"cast(avg(1.0 * a_i) as float) as myAvg, sum(a_f), min(a_f), max(a_f), avg(a_f) from collection1");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 1);
// Test Long and Double Sums
tuple = tuples.get(0);
count = tuple.getDouble("myCount");
sumi = tuple.getDouble("mySum");
mini = tuple.getDouble("myMin");
maxi = tuple.getDouble("myMax");
avgi = tuple.getDouble("myAvg");
sumf = tuple.getDouble("EXPR$5"); // sum(a_f)
minf = tuple.getDouble("EXPR$6"); // min(a_f)
maxf = tuple.getDouble("EXPR$7"); // max(a_f)
avgf = tuple.getDouble("EXPR$8"); // avg(a_f)
assertTrue(count == 10);
assertTrue(mini == 0.0D);
assertTrue(maxi == 14.0D);
assertTrue(sumi == 70);
assertTrue(avgi == 7.0D);
assertTrue(sumf == 55.0D);
assertTrue(minf == 1.0D);
assertTrue(maxf == 10.0D);
assertTrue(avgf == 5.5D);
// Test without cast on average int field
sParams = mapParams(CommonParams.QT, "/sql",
"stmt", "select count(*) as myCount, sum(a_i) as mySum, min(a_i) as myMin, max(a_i) as myMax, " +
"avg(a_i) as myAvg, sum(a_f), min(a_f), max(a_f), avg(a_f) from collection1");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 1);
// Test Long and Double Sums
tuple = tuples.get(0);
count = tuple.getDouble("myCount");
sumi = tuple.getDouble("mySum");
mini = tuple.getDouble("myMin");
maxi = tuple.getDouble("myMax");
avgi = tuple.getDouble("myAvg");
assertTrue(tuple.get("myAvg") instanceof Long);
sumf = tuple.getDouble("EXPR$5"); // sum(a_f)
minf = tuple.getDouble("EXPR$6"); // min(a_f)
maxf = tuple.getDouble("EXPR$7"); // max(a_f)
avgf = tuple.getDouble("EXPR$8"); // avg(a_f)
assertTrue(count == 10);
assertTrue(mini == 0.0D);
assertTrue(maxi == 14.0D);
assertTrue(sumi == 70);
assertTrue(avgi == 7);
assertTrue(sumf == 55.0D);
assertTrue(minf == 1.0D);
assertTrue(maxf == 10.0D);
assertTrue(avgf == 5.5D);
// Test where clause hits
sParams = mapParams(CommonParams.QT, "/sql",
"stmt", "select count(*), sum(a_i), min(a_i), max(a_i), cast(avg(1.0 * a_i) as float), sum(a_f), " +
"min(a_f), max(a_f), avg(a_f) from collection1 where id = 2");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 1);
tuple = tuples.get(0);
count = tuple.getDouble("EXPR$0"); // count(*)
sumi = tuple.getDouble("EXPR$1"); // sum(a_i)
mini = tuple.getDouble("EXPR$2"); // min(a_i)
maxi = tuple.getDouble("EXPR$3"); // max(a_i)
avgi = tuple.getDouble("EXPR$4"); // avg(a_i)
sumf = tuple.getDouble("EXPR$5"); // sum(a_f)
minf = tuple.getDouble("EXPR$6"); // min(a_f)
maxf = tuple.getDouble("EXPR$7"); // max(a_f)
avgf = tuple.getDouble("EXPR$8"); // avg(a_f)
assertTrue(count == 1);
assertTrue(sumi == 2);
assertTrue(mini == 2);
assertTrue(maxi == 2);
assertTrue(avgi == 2.0D);
assertTrue(sumf == 2.0D);
assertTrue(minf == 2);
assertTrue(maxf == 2);
assertTrue(avgf == 2.0);
// Test zero hits
sParams = mapParams(CommonParams.QT, "/sql",
"stmt", "select count(*), sum(a_i), min(a_i), max(a_i), cast(avg(1.0 * a_i) as float), sum(a_f), " +
"min(a_f), max(a_f), avg(a_f) from collection1 where a_s = 'blah'");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 1);
tuple = tuples.get(0);
count = tuple.getDouble("EXPR$0"); // count(*)
sumi = tuple.getDouble("EXPR$1"); // sum(a_i)
mini = tuple.getDouble("EXPR$2"); // min(a_i)
maxi = tuple.getDouble("EXPR$3"); // max(a_i)
avgi = tuple.getDouble("EXPR$4"); // avg(a_i)
sumf = tuple.getDouble("EXPR$5"); // sum(a_f)
minf = tuple.getDouble("EXPR$6"); // min(a_f)
maxf = tuple.getDouble("EXPR$7"); // max(a_f)
avgf = tuple.getDouble("EXPR$8"); // avg(a_f)
assertTrue(count == 0);
assertTrue(sumi == null);
assertTrue(mini == null);
assertTrue(maxi == null);
assertTrue(avgi == null);
assertTrue(sumf == null);
assertTrue(minf == null);
assertTrue(maxf == null);
assertTrue(avgf == null);
}
@Test
public void testTimeSeriesGrouping() throws Exception {
new UpdateRequest()
.add(id, "1", "year_i", "2015", "month_i", "11", "day_i", "7", "item_i", "5")
.add(id, "2", "year_i", "2015", "month_i", "11", "day_i", "7", "item_i", "10")
.add(id, "3", "year_i", "2015", "month_i", "11", "day_i", "8", "item_i", "30")
.add(id, "4", "year_i", "2015", "month_i", "11", "day_i", "8", "item_i", "12")
.add(id, "5", "year_i", "2015", "month_i", "10", "day_i", "1", "item_i", "4")
.add(id, "6", "year_i", "2015", "month_i", "10", "day_i", "3", "item_i", "5")
.add(id, "7", "year_i", "2014", "month_i", "4", "day_i", "4", "item_i", "6")
.add(id, "8", "year_i", "2014", "month_i", "4", "day_i", "2", "item_i", "1")
.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
String baseUrl = cluster.getJettySolrRunners().get(0).getBaseUrl().toString() + "/" + COLLECTIONORALIAS;
SolrParams sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce",
"stmt", "select year_i, sum(item_i) from collection1 group by year_i order by year_i desc");
List<Tuple> tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 2);
Tuple tuple;
tuple = tuples.get(0);
assert (tuple.getLong("year_i") == 2015);
assert (tuple.getDouble("EXPR$1") == 66); // sum(item_i)
tuple = tuples.get(1);
assert (tuple.getLong("year_i") == 2014);
assert (tuple.getDouble("EXPR$1") == 7); // sum(item_i)
sParams = mapParams(CommonParams.QT, "/sql",
"stmt", "select year_i, month_i, sum(item_i) from collection1 group by year_i, month_i " +
"order by year_i desc, month_i desc");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 3);
tuple = tuples.get(0);
assert (tuple.getLong("year_i") == 2015);
assert (tuple.getLong("month_i") == 11);
assert (tuple.getDouble("EXPR$2") == 57); // sum(item_i)
tuple = tuples.get(1);
assert (tuple.getLong("year_i") == 2015);
assert (tuple.getLong("month_i") == 10);
assert (tuple.getDouble("EXPR$2") == 9); // sum(item_i)
tuple = tuples.get(2);
assert (tuple.getLong("year_i") == 2014);
assert (tuple.getLong("month_i") == 4);
assert (tuple.getDouble("EXPR$2") == 7); // sum(item_i)
sParams = mapParams(CommonParams.QT, "/sql",
"stmt", "select year_i, month_i, day_i, sum(item_i) from collection1 group by year_i, month_i, day_i " +
"order by year_i desc, month_i desc, day_i desc");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 6);
tuple = tuples.get(0);
assert (tuple.getLong("year_i") == 2015);
assert (tuple.getLong("month_i") == 11);
assert (tuple.getLong("day_i") == 8);
assert (tuple.getDouble("EXPR$3") == 42); // sum(item_i)
tuple = tuples.get(1);
assert (tuple.getLong("year_i") == 2015);
assert (tuple.getLong("month_i") == 11);
assert (tuple.getLong("day_i") == 7);
assert (tuple.getDouble("EXPR$3") == 15); // sum(item_i)
tuple = tuples.get(2);
assert (tuple.getLong("year_i") == 2015);
assert (tuple.getLong("month_i") == 10);
assert (tuple.getLong("day_i") == 3);
assert (tuple.getDouble("EXPR$3") == 5); // sum(item_i)
tuple = tuples.get(3);
assert (tuple.getLong("year_i") == 2015);
assert (tuple.getLong("month_i") == 10);
assert (tuple.getLong("day_i") == 1);
assert (tuple.getDouble("EXPR$3") == 4); // sum(item_i)
tuple = tuples.get(4);
assert (tuple.getLong("year_i") == 2014);
assert (tuple.getLong("month_i") == 4);
assert (tuple.getLong("day_i") == 4);
assert (tuple.getDouble("EXPR$3") == 6); // sum(item_i)
tuple = tuples.get(5);
assert (tuple.getLong("year_i") == 2014);
assert (tuple.getLong("month_i") == 4);
assert (tuple.getLong("day_i") == 2);
assert (tuple.getDouble("EXPR$3") == 1); // sum(item_i)
}
@Test
public void testSQLException() throws Exception {
new UpdateRequest()
.add(id, "1", "text_t", "XXXX XXXX", "str_s", "a", "field_i", "7")
.add(id, "2", "text_t", "XXXX XXXX", "str_s", "b", "field_i", "8")
.add(id, "3", "text_t", "XXXX XXXX", "str_s", "a", "field_i", "20")
.add(id, "4", "text_t", "XXXX XXXX", "str_s", "b", "field_i", "11")
.add(id, "5", "text_t", "XXXX XXXX", "str_s", "c", "field_i", "30")
.add(id, "6", "text_t", "XXXX XXXX", "str_s", "c", "field_i", "40")
.add(id, "7", "text_t", "XXXX XXXX", "str_s", "c", "field_i", "50")
.add(id, "8", "text_t", "XXXX XXXX", "str_s", "c", "field_i", "60")
.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
String baseUrl = cluster.getJettySolrRunners().get(0).getBaseUrl().toString() + "/" + COLLECTIONORALIAS;
SolrParams sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce",
"stmt", "select id, str_s from collection1 where text_t='XXXX' order by field_iff desc");
SolrStream solrStream = new SolrStream(baseUrl, sParams);
Tuple tuple = getTuple(new ExceptionStream(solrStream));
assert (tuple.EOF);
assert (tuple.EXCEPTION);
assert (tuple.getException().contains("Column 'field_iff' not found in any table"));
sParams = mapParams(CommonParams.QT, "/sql",
"stmt", "select id, field_iff, str_s from collection1 where text_t='XXXX' order by field_iff desc");
solrStream = new SolrStream(baseUrl, sParams);
tuple = getTuple(new ExceptionStream(solrStream));
assert (tuple.EOF);
assert (tuple.EXCEPTION);
assert (tuple.getException().contains("Column 'field_iff' not found in any table"));
sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce",
"stmt",
"select str_s, count(*), sum(field_iff), min(field_i), max(field_i), cast(avg(1.0 * field_i) as float) from collection1 where text_t='XXXX' group by str_s having ((sum(field_iff) = 19) AND (min(field_i) = 8))");
solrStream = new SolrStream(baseUrl, sParams);
tuple = getTuple(new ExceptionStream(solrStream));
assert (tuple.EOF);
assert (tuple.EXCEPTION);
assert (tuple.getException().contains("Column 'field_iff' not found in any table"));
sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce",
"stmt",
"select str_s, count(*), blah(field_i), min(field_i), max(field_i), cast(avg(1.0 * field_i) as float) from collection1 where text_t='XXXX' group by str_s having ((sum(field_i) = 19) AND (min(field_i) = 8))");
solrStream = new SolrStream(baseUrl, sParams);
tuple = getTuple(new ExceptionStream(solrStream));
assert (tuple.EOF);
assert (tuple.EXCEPTION);
assert (tuple.getException().contains("No match found for function signature blah"));
// verify exception message formatting with wildcard query
sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce",
"stmt",
"select str_s from collection1 where not_a_field LIKE 'foo%'");
solrStream = new SolrStream(baseUrl, sParams);
tuple = getTuple(new ExceptionStream(solrStream));
assert (tuple.EOF);
assert (tuple.EXCEPTION);
assert (tuple.getException().contains("Column 'not_a_field' not found in any table"));
}
@Test
public void testTimeSeriesGroupingFacet() throws Exception {
new UpdateRequest()
.add(id, "1", "year_i", "2015", "month_i", "11", "day_i", "7", "item_i", "5")
.add(id, "2", "year_i", "2015", "month_i", "11", "day_i", "7", "item_i", "10")
.add(id, "3", "year_i", "2015", "month_i", "11", "day_i", "8", "item_i", "30")
.add(id, "4", "year_i", "2015", "month_i", "11", "day_i", "8", "item_i", "12")
.add(id, "5", "year_i", "2015", "month_i", "10", "day_i", "1", "item_i", "4")
.add(id, "6", "year_i", "2015", "month_i", "10", "day_i", "3", "item_i", "5")
.add(id, "7", "year_i", "2014", "month_i", "4", "day_i", "4", "item_i", "6")
.add(id, "8", "year_i", "2014", "month_i", "4", "day_i", "2", "item_i", "1")
.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
String baseUrl = cluster.getJettySolrRunners().get(0).getBaseUrl().toString() + "/" + COLLECTIONORALIAS;
SolrParams sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "facet",
"stmt", "select year_i, sum(item_i) from collection1 group by year_i order by year_i desc");
List<Tuple> tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 2);
Tuple tuple;
tuple = tuples.get(0);
assert (tuple.getLong("year_i") == 2015);
assert (tuple.getDouble("EXPR$1") == 66); // sum(item_i)
tuple = tuples.get(1);
assert (tuple.getLong("year_i") == 2014);
assert (tuple.getDouble("EXPR$1") == 7); // sum(item_i)
sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "facet",
"stmt", "select year_i, month_i, sum(item_i) from collection1 group by year_i, month_i " +
"order by year_i desc, month_i desc");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 3);
tuple = tuples.get(0);
assert (tuple.getLong("year_i") == 2015);
assert (tuple.getLong("month_i") == 11);
assert (tuple.getDouble("EXPR$2") == 57); // sum(item_i)
tuple = tuples.get(1);
assert (tuple.getLong("year_i") == 2015);
assert (tuple.getLong("month_i") == 10);
assert (tuple.getDouble("EXPR$2") == 9); // sum(item_i)
tuple = tuples.get(2);
assert (tuple.getLong("year_i") == 2014);
assert (tuple.getLong("month_i") == 4);
assert (tuple.getDouble("EXPR$2") == 7); // sum(item_i)
sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "facet",
"stmt", "select year_i, month_i, day_i, sum(item_i) from collection1 group by year_i, month_i, day_i " +
"order by year_i desc, month_i desc, day_i desc");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 6);
tuple = tuples.get(0);
assert (tuple.getLong("year_i") == 2015);
assert (tuple.getLong("month_i") == 11);
assert (tuple.getLong("day_i") == 8);
assert (tuple.getDouble("EXPR$3") == 42); // sum(item_i)
tuple = tuples.get(1);
assert (tuple.getLong("year_i") == 2015);
assert (tuple.getLong("month_i") == 11);
assert (tuple.getLong("day_i") == 7);
assert (tuple.getDouble("EXPR$3") == 15); // sum(item_i)
tuple = tuples.get(2);
assert (tuple.getLong("year_i") == 2015);
assert (tuple.getLong("month_i") == 10);
assert (tuple.getLong("day_i") == 3);
assert (tuple.getDouble("EXPR$3") == 5); // sum(item_i)
tuple = tuples.get(3);
assert (tuple.getLong("year_i") == 2015);
assert (tuple.getLong("month_i") == 10);
assert (tuple.getLong("day_i") == 1);
assert (tuple.getDouble("EXPR$3") == 4); // sum(item_i)
tuple = tuples.get(4);
assert (tuple.getLong("year_i") == 2014);
assert (tuple.getLong("month_i") == 4);
assert (tuple.getLong("day_i") == 4);
assert (tuple.getDouble("EXPR$3") == 6); // sum(item_i)
tuple = tuples.get(5);
assert (tuple.getLong("year_i") == 2014);
assert (tuple.getLong("month_i") == 4);
assert (tuple.getLong("day_i") == 2);
assert (tuple.getDouble("EXPR$3") == 1); // sum(item_i)
}
@Test
public void testParallelTimeSeriesGrouping() throws Exception {
new UpdateRequest()
.add(id, "1", "year_i", "2015", "month_i", "11", "day_i", "7", "item_i", "5")
.add(id, "2", "year_i", "2015", "month_i", "11", "day_i", "7", "item_i", "10")
.add(id, "3", "year_i", "2015", "month_i", "11", "day_i", "8", "item_i", "30")
.add(id, "4", "year_i", "2015", "month_i", "11", "day_i", "8", "item_i", "12")
.add(id, "5", "year_i", "2015", "month_i", "10", "day_i", "1", "item_i", "4")
.add(id, "6", "year_i", "2015", "month_i", "10", "day_i", "3", "item_i", "5")
.add(id, "7", "year_i", "2014", "month_i", "4", "day_i", "4", "item_i", "6")
.add(id, "8", "year_i", "2014", "month_i", "4", "day_i", "2", "item_i", "1")
.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
String baseUrl = cluster.getJettySolrRunners().get(0).getBaseUrl().toString() + "/" + COLLECTIONORALIAS;
SolrParams sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce",
"stmt", "select year_i, sum(item_i) from collection1 group by year_i order by year_i desc");
List<Tuple> tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 2);
Tuple tuple;
tuple = tuples.get(0);
assert (tuple.getLong("year_i") == 2015);
assert (tuple.get("year_i") instanceof Long); // SOLR-8601, This tests that the bucket is actually a Long and not
// parsed from a String.
assert (tuple.getDouble("EXPR$1") == 66); // sum(item_i)
tuple = tuples.get(1);
assert (tuple.getLong("year_i") == 2014);
assert (tuple.getDouble("EXPR$1") == 7); // sum(item_i)
sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce",
"stmt", "select year_i, month_i, sum(item_i) from collection1 group by year_i, month_i " +
"order by year_i desc, month_i desc");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 3);
tuple = tuples.get(0);
assert (tuple.getLong("year_i") == 2015);
assert (tuple.getLong("month_i") == 11);
assert (tuple.get("year_i") instanceof Long);
assert (tuple.get("month_i") instanceof Long);
assert (tuple.getDouble("EXPR$2") == 57); // sum(item_i)
tuple = tuples.get(1);
assert (tuple.getLong("year_i") == 2015);
assert (tuple.getLong("month_i") == 10);
assert (tuple.getDouble("EXPR$2") == 9); // sum(item_i)
tuple = tuples.get(2);
assert (tuple.getLong("year_i") == 2014);
assert (tuple.getLong("month_i") == 4);
assert (tuple.getDouble("EXPR$2") == 7); // sum(item_i)
sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce",
"stmt", "select year_i, month_i, day_i, sum(item_i) from collection1 group by year_i, month_i, day_i " +
"order by year_i desc, month_i desc, day_i desc");
tuples = getTuples(sParams, baseUrl);
assert (tuples.size() == 6);
tuple = tuples.get(0);
assert (tuple.getLong("year_i") == 2015);
assert (tuple.getLong("month_i") == 11);
assert (tuple.getLong("day_i") == 8);
assert (tuple.getDouble("EXPR$3") == 42); // sum(item_i)
tuple = tuples.get(1);
assert (tuple.getLong("year_i") == 2015);
assert (tuple.getLong("month_i") == 11);
assert (tuple.getLong("day_i") == 7);
assert (tuple.getDouble("EXPR$3") == 15); // sum(item_i)
tuple = tuples.get(2);
assert (tuple.getLong("year_i") == 2015);
assert (tuple.getLong("month_i") == 10);
assert (tuple.getLong("day_i") == 3);
assert (tuple.getDouble("EXPR$3") == 5); // sum(item_i)
tuple = tuples.get(3);
assert (tuple.getLong("year_i") == 2015);
assert (tuple.getLong("month_i") == 10);
assert (tuple.getLong("day_i") == 1);
assert (tuple.getDouble("EXPR$3") == 4); // sum(item_i)
tuple = tuples.get(4);
assert (tuple.getLong("year_i") == 2014);
assert (tuple.getLong("month_i") == 4);
assert (tuple.getLong("day_i") == 4);
assert (tuple.getDouble("EXPR$3") == 6); // sum(item_i)
tuple = tuples.get(5);
assert (tuple.getLong("year_i") == 2014);
assert (tuple.getLong("month_i") == 4);
assert (tuple.getLong("day_i") == 2);
assert (tuple.getDouble("EXPR$3") == 1); // sum(item_i)
}
protected List<Tuple> getTuples(final SolrParams params, String baseUrl) throws IOException {
List<Tuple> tuples = new LinkedList<>();
try (TupleStream tupleStream = new SolrStream(baseUrl, params)) {
tupleStream.open();
for (; ; ) {
Tuple t = tupleStream.read();
if (t.EOF) {
break;
} else {
tuples.add(t);
}
}
}
return tuples;
}
protected Tuple getTuple(TupleStream tupleStream) throws IOException {
tupleStream.open();
Tuple t = tupleStream.read();
tupleStream.close();
return t;
}
@Test
public void testIn() throws Exception {
new UpdateRequest()
.add("id", "1", "text_t", "foobar", "str_s", "a")
.add("id", "2", "text_t", "foobaz", "str_s", "b")
.add("id", "3", "text_t", "foobaz", "str_s", "c")
.add("id", "4", "text_t", "foobaz", "str_s", "d")
.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
SolrParams sParams = mapParams(CommonParams.QT, "/sql",
"stmt",
"select id from collection1 where str_s IN ('a','b','c')");
String baseUrl = cluster.getJettySolrRunners().get(0).getBaseUrl().toString() + "/" + COLLECTIONORALIAS;
List<Tuple> tuples = getTuples(sParams, baseUrl);
assertEquals(3, tuples.size());
}
private String sqlUrl() {
return cluster.getJettySolrRunners().get(0).getBaseUrl().toString() + "/" + COLLECTIONORALIAS;
}
private List<Tuple> expectResults(String sql, final int expectedCount) throws Exception {
String sqlStmt = sql.replace("$ALIAS", COLLECTIONORALIAS);
SolrParams params = mapParams(CommonParams.QT, "/sql", "stmt", sqlStmt);
List<Tuple> tuples = getTuples(params, sqlUrl());
assertEquals(expectedCount, tuples.size());
return tuples;
}
@Test
public void testColIsNotNull() throws Exception {
new UpdateRequest()
.add("id", "1", "b_s", "foobar")
.add("id", "2", "b_s", "foobaz")
.add("id", "3")
.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
expectResults("SELECT b_s FROM $ALIAS WHERE b_s IS NOT NULL", 2);
}
@Test
public void testColIsNull() throws Exception {
new UpdateRequest()
.add("id", "1", "b_s", "foobar")
.add("id", "2")
.add("id", "3", "b_s", "foobaz")
.add("id", "4")
.add("id", "5", "b_s", "bazbar")
.add("id", "6")
.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
expectResults("SELECT id FROM $ALIAS WHERE b_s IS NULL", 3);
}
@Test
public void testLike() throws Exception {
new UpdateRequest()
.add("id", "1", "a_s", "hello-1", "b_s", "foo")
.add("id", "2", "a_s", "world-2", "b_s", "foo")
.add("id", "3", "a_s", "hello-3", "b_s", "foo")
.add("id", "4", "a_s", "world-4", "b_s", "foo")
.add("id", "5", "a_s", "hello-5", "b_s", "foo")
.add("id", "6", "a_s", "world-6", "b_s", "bar")
.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
expectResults("SELECT a_s FROM $ALIAS WHERE a_s LIKE 'h_llo-%'", 3);
// not technically valid SQL but we support it for legacy purposes, see: SOLR-15463
expectResults("SELECT a_s FROM $ALIAS WHERE a_s='world-*'", 3);
// no results
expectResults("SELECT a_s FROM $ALIAS WHERE a_s LIKE '%MATCHNONE%'", 0);
// like but without wildcard, should still work
expectResults("SELECT b_s FROM $ALIAS WHERE b_s LIKE 'foo'", 5);
// NOT LIKE
expectResults("SELECT b_s FROM $ALIAS WHERE b_s NOT LIKE 'f%'", 1);
// leading wildcard
expectResults("SELECT b_s FROM $ALIAS WHERE b_s LIKE '%oo'", 5);
// user supplied parens around arg, no double-quotes ...
expectResults("SELECT b_s FROM $ALIAS WHERE b_s LIKE '(fo%)'", 5);
expectResults("SELECT b_s FROM $ALIAS WHERE b_s LIKE '(ba*)'", 1);
}
@Test
public void testBetween() throws Exception {
new UpdateRequest()
.add(withMultiValuedField("b_is", Arrays.asList(1, 5), "id", "1", "a_i", "1"))
.add(withMultiValuedField("b_is", Arrays.asList(2, 6), "id", "2", "a_i", "2"))
.add(withMultiValuedField("b_is", Arrays.asList(3, 7), "id", "3", "a_i", "3"))
.add(withMultiValuedField("b_is", Arrays.asList(4, 8), "id", "4", "a_i", "4"))
.add(withMultiValuedField("b_is", Arrays.asList(5, 9), "id", "5", "a_i", "5"))
.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
expectResults("SELECT a_i FROM $ALIAS WHERE a_i BETWEEN 2 AND 4", 3);
expectResults("SELECT a_i FROM $ALIAS WHERE a_i NOT BETWEEN 2 AND 4", 2);
expectResults("SELECT id FROM $ALIAS WHERE b_is BETWEEN 2 AND 4", 3);
expectResults("SELECT id FROM $ALIAS WHERE b_is BETWEEN 1 AND 9", 5);
expectResults("SELECT id FROM $ALIAS WHERE b_is BETWEEN 8 AND 10", 2);
expectResults("SELECT id FROM $ALIAS WHERE b_is >= 2 AND b_is <= 4", 3);
expectResults("SELECT id FROM $ALIAS WHERE b_is <= 4 AND b_is >= 2", 3);
expectResults("SELECT id FROM $ALIAS WHERE b_is <= 2 OR b_is >= 8", 4);
// tricky ~ with Solr, this should return 2 docs, but Calcite short-circuits this query and returns 0
// Calcite sees the predicate as disjoint from a single-valued field perspective ...
expectResults("SELECT id FROM $ALIAS WHERE b_is >= 5 AND b_is <= 2", 0);
// hacky work-around the aforementioned problem ^^
expectResults("SELECT id FROM $ALIAS WHERE b_is = '(+[5 TO *] +[* TO 2])'", 2);
}
private SolrInputDocument withMultiValuedField(String mvField, List<Object> values, String... fields) {
SolrInputDocument doc = new SolrInputDocument(fields);
doc.addField(mvField, values);
return doc;
}
@Test
public void testMultipleFilters() throws Exception {
new UpdateRequest()
.add("id", "1", "a_s", "hello-1", "b_s", "foo", "c_s", "bar", "d_s", "x")
.add("id", "2", "a_s", "world-2", "b_s", "foo", "a_i", "2", "d_s", "a")
.add("id", "3", "a_s", "hello-3", "b_s", "foo", "c_s", "bar", "d_s", "x")
.add("id", "4", "a_s", "world-4", "b_s", "foo", "a_i", "3", "d_s", "b")
.add("id", "5", "a_s", "hello-5", "b_s", "foo", "c_s", "bar", "d_s", "x")
.add("id", "6", "a_s", "world-6", "b_s", "bar", "a_i", "4", "d_s", "c")
.add("id", "7", "a_s", "hello-7", "b_s", "foo", "c_s", "baz blah", "d_s", "x")
.add("id", "8", "a_s", "world-8", "b_s", "bar", "a_i", "5", "d_s", "c")
.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
List<Tuple> tuples = expectResults("SELECT a_s FROM $ALIAS WHERE a_s LIKE 'world%' AND b_s IS NOT NULL AND c_s IS NULL AND a_i BETWEEN 2 AND 4 AND d_s IN ('a','b','c') ORDER BY id ASC LIMIT 10", 3);
assertEquals("world-2", tuples.get(0).getString("a_s"));
assertEquals("world-4", tuples.get(1).getString("a_s"));
assertEquals("world-6", tuples.get(2).getString("a_s"));
tuples = expectResults("SELECT a_s FROM $ALIAS WHERE a_s NOT LIKE 'hello%' AND b_s IS NOT NULL AND c_s IS NULL AND a_i NOT BETWEEN 2 AND 4 AND d_s IN ('a','b','c') ORDER BY id ASC LIMIT 10", 1);
assertEquals("world-8", tuples.get(0).getString("a_s"));
}
@Test
public void testCountWithFilters() throws Exception {
new UpdateRequest()
.add("id", "1", "a_s", "hello-1", "b_s", "foo", "c_s", "bar", "d_s", "x")
.add("id", "2", "a_s", "world-2", "b_s", "foo", "a_i", "2", "d_s", "a")
.add("id", "3", "a_s", "hello-3", "b_s", "foo", "c_s", "bar", "d_s", "x")
.add("id", "4", "a_s", "world-4", "b_s", "foo", "a_i", "3", "d_s", "b")
.add("id", "5", "a_s", "hello-5", "b_s", "foo", "c_s", "bar", "d_s", "x")
.add("id", "6", "a_s", "world-6", "b_s", "bar", "a_i", "4", "d_s", "c")
.add("id", "7", "a_s", "hello-7", "b_s", "foo", "c_s", "baz blah", "d_s", "x")
.add("id", "8", "a_s", "world-8", "b_s", "bar", "a_i", "5", "d_s", "c")
.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
List<Tuple> tuples = expectResults("SELECT COUNT(1) as `the_count` FROM $ALIAS as `alias` WHERE (`alias`.`b_s`='foo' AND `alias`.`a_s` LIKE 'hell%' AND `alias`.`c_s` IS NOT NULL) HAVING (COUNT(1) > 0)", 1);
assertEquals(4L, (long) tuples.get(0).getLong("the_count"));
}
@Test
public void testDateHandling() throws Exception {
new UpdateRequest()
.add("id", "1", "pdatex", "2021-06-01T00:00:00Z")
.add("id", "2", "pdatex", "2021-06-02T02:00:00Z")
.add("id", "3", "pdatex", "2021-06-03T03:00:00Z")
.add("id", "4", "pdatex", "2021-06-04T04:00:00Z")
.add("id", "5", "pdatex", "2021-06-01T01:01:00Z")
.add("id", "6", "pdatex", "2021-06-02T02:02:00Z")
.add("id", "7", "pdatex", "2021-06-03T03:03:00Z")
.add("id", "8", "pdatex", "2021-06-04T04:04:00Z")
.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
expectResults("SELECT id FROM $ALIAS WHERE pdatex IS NULL", 0);
expectResults("SELECT id, pdatex FROM $ALIAS WHERE pdatex IS NOT NULL", 8);
expectResults("SELECT id, pdatex FROM $ALIAS WHERE pdatex > '2021-06-02'", 6);
expectResults("SELECT id, pdatex FROM $ALIAS WHERE pdatex <= '2021-06-01'", 1);
expectResults("SELECT id, pdatex FROM $ALIAS WHERE pdatex > '2021-06-04 04:00:00'", 1);
expectResults("SELECT id, pdatex FROM $ALIAS WHERE pdatex = '2021-06-04 04:00:00'", 1);
expectResults("SELECT id, pdatex FROM $ALIAS WHERE pdatex = CAST('2021-06-04 04:04:00' as TIMESTAMP)", 1);
expectResults("SELECT id, pdatex FROM $ALIAS WHERE pdatex BETWEEN '2021-06-03' AND '2021-06-05'", 4);
}
@Test
public void testISO8601TimestampFiltering() throws Exception {
new UpdateRequest()
.add("id", "1", "pdatex", "2021-07-13T15:12:09.037Z")
.add("id", "2", "pdatex", "2021-07-13T15:12:10.037Z")
.add("id", "3", "pdatex", "2021-07-13T15:12:11.037Z")
.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
expectResults("SELECT id, pdatex FROM $ALIAS WHERE pdatex >= CAST('2021-07-13 15:12:10.037' as TIMESTAMP)", 2);
expectResults("SELECT id, pdatex FROM $ALIAS WHERE pdatex >= '2021-07-13T15:12:10.037Z'", 2);
expectResults("SELECT id, pdatex FROM $ALIAS WHERE pdatex < '2021-07-13T15:12:10.037Z'", 1);
expectResults("SELECT id, pdatex FROM $ALIAS WHERE pdatex = '2021-07-13T15:12:10.037Z'", 1);
expectResults("SELECT id, pdatex FROM $ALIAS WHERE pdatex <> '2021-07-13T15:12:10.037Z'", 2);
expectResults("SELECT id, pdatex FROM $ALIAS WHERE pdatex BETWEEN '2021-07-13T15:12:09.037Z' AND '2021-07-13T15:12:10.037Z' ORDER BY pdatex ASC", 2);
expectResults("SELECT id, pdatex FROM $ALIAS WHERE pdatex >= '2021-07-13T15:12:10.037Z'", 2);
expectResults("SELECT id, pdatex FROM $ALIAS WHERE pdatex >= '2021-07-13T15:12:10.037Z' ORDER BY pdatex ASC LIMIT 10", 2);
}
@Test
public void testAggsOnCustomFieldType() throws Exception {
new UpdateRequest()
.add(withMultiValuedField("pintxs", Arrays.asList(1,5),"id", "1", "tintx", "1", "pintx", "2", "tfloatx", "3.33", "pfloatx", "3.33", "tlongx", "1623875868000", "plongx", "1623875868000", "tdoublex", "3.14159265359", "pdoublex", "3.14159265359", "stringx", "A", "textx", "aaa", "pdatex", "2021-06-17T00:00:00Z"))
.add(withMultiValuedField("pintxs", Arrays.asList(2,6),"id", "2", "tintx", "2", "pintx", "4", "tfloatx", "4.44", "pfloatx", "4.44", "tlongx", "1723875868000", "plongx", "1723875868000", "tdoublex", "6.14159265359", "pdoublex", "6.14159265359", "stringx", "B", "textx", "bbb", "pdatex", "2021-06-18T00:00:00Z"))
.add(withMultiValuedField("pintxs", Arrays.asList(3,7),"id", "3", "tintx", "3", "pintx", "6", "tfloatx", "5.55", "pfloatx", "5.55", "tlongx", "1823875868000", "plongx", "1823875868000", "tdoublex", "9.14159265359", "pdoublex", "9.14159265359", "stringx", "C", "textx", "ccc", "pdatex", "2021-06-19T00:00:00Z"))
.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
String dateStatsSql = "min(pdatex) as min_pdatex, max(pdatex) as max_pdatex";
String numTypeStatsSql = toStatsSql(Arrays.asList("intx", "floatx", "longx", "doublex"));
String sql = "SELECT min(pintxs) as min_pintxs, max(pintxs) as max_pintxs, "+
min("stringx")+", "+max("stringx")+", "+min("textx")+", "+max("textx")+", "+numTypeStatsSql+", "+dateStatsSql+" FROM $ALIAS";
List<Tuple> tuples = expectResults(sql, 1);
Tuple stats = tuples.get(0);
assertEquals("A", stats.getString("min_stringx"));
assertEquals("C", stats.getString("max_stringx"));
assertEquals("aaa", stats.getString("min_textx"));
assertEquals("ccc", stats.getString("max_textx"));
assertEquals(1L, (long) stats.getLong("min_tintx"));
assertEquals(3L, (long) stats.getLong("max_tintx"));
assertEquals(2L, (long) stats.getLong("min_pintx"));
assertEquals(6L, (long) stats.getLong("max_pintx"));
assertEquals(1L, (long) stats.getLong("min_pintxs"));
assertEquals(7L, (long) stats.getLong("max_pintxs"));
assertEquals(1623875868000L, (long) stats.getLong("min_tlongx"));
assertEquals(1823875868000L, (long) stats.getLong("max_tlongx"));
assertEquals(1623875868000L, (long) stats.getLong("min_plongx"));
assertEquals(1823875868000L, (long) stats.getLong("max_plongx"));
final double delta = 0.00001d;
assertEquals(3.33d, stats.getDouble("min_tfloatx"), delta);
assertEquals(5.55d, stats.getDouble("max_tfloatx"), delta);
assertEquals(3.33d, stats.getDouble("min_pfloatx"), delta);
assertEquals(5.55d, stats.getDouble("max_pfloatx"), delta);
assertEquals(3.14159265359d, stats.getDouble("min_tdoublex"), delta);
assertEquals(9.14159265359d, stats.getDouble("max_tdoublex"), delta);
assertEquals(3.14159265359d, stats.getDouble("min_pdoublex"), delta);
assertEquals(9.14159265359d, stats.getDouble("max_pdoublex"), delta);
assertNotNull(stats.getDate("min_pdatex"));
assertNotNull(stats.getDate("max_pdatex"));
}
private String toStatsSql(List<String> types) {
StringBuilder sb = new StringBuilder();
for (String type : types) {
if (sb.length() > 0) {
sb.append(", ");
}
sb.append(min("t"+type)).append(", ").append(min("p"+type));
sb.append(", ").append(max("t"+type)).append(", ").append(max("p"+type));
}
return sb.toString();
}
private String min(String type) {
return String.format(Locale.ROOT, "min(%s) as min_%s", type, type);
}
private String max(String type) {
return String.format(Locale.ROOT, "max(%s) as max_%s", type, type);
}
@Test
public void testOffsetAndFetch() throws Exception {
new UpdateRequest()
.add("id", "01")
.add("id", "02")
.add("id", "03")
.add("id", "04")
.add("id", "05")
.add("id", "06")
.add("id", "07")
.add("id", "08")
.add("id", "09")
.add("id", "10")
.add("id", "11")
.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
final int numDocs = 11;
List<Tuple> results = expectResults("SELECT id FROM $ALIAS ORDER BY id DESC OFFSET 0 FETCH NEXT 5 ROWS ONLY", 5);
assertEquals("11", results.get(0).getString("id"));
assertEquals("10", results.get(1).getString("id"));
assertEquals("09", results.get(2).getString("id"));
assertEquals("08", results.get(3).getString("id"));
assertEquals("07", results.get(4).getString("id"));
// no explicit offset, but defaults to 0 if using FETCH!
results = expectResults("SELECT id FROM $ALIAS ORDER BY id DESC FETCH NEXT 5 ROWS ONLY", 5);
assertEquals("11", results.get(0).getString("id"));
assertEquals("10", results.get(1).getString("id"));
assertEquals("09", results.get(2).getString("id"));
assertEquals("08", results.get(3).getString("id"));
assertEquals("07", results.get(4).getString("id"));
results = expectResults("SELECT id FROM $ALIAS ORDER BY id DESC OFFSET 5 FETCH NEXT 5 ROWS ONLY", 5);
assertEquals("06", results.get(0).getString("id"));
assertEquals("05", results.get(1).getString("id"));
assertEquals("04", results.get(2).getString("id"));
assertEquals("03", results.get(3).getString("id"));
assertEquals("02", results.get(4).getString("id"));
results = expectResults("SELECT id FROM $ALIAS ORDER BY id DESC OFFSET 10 FETCH NEXT 5 ROWS ONLY", 1);
assertEquals("01", results.get(0).getString("id"));
expectResults("SELECT id FROM $ALIAS ORDER BY id DESC LIMIT "+numDocs, numDocs);
for (int i=0; i < numDocs; i++) {
results = expectResults("SELECT id FROM $ALIAS ORDER BY id ASC OFFSET "+i+" FETCH NEXT 1 ROW ONLY", 1);
String id = results.get(0).getString("id");
if (id.startsWith("0")) id = id.substring(1);
assertEquals(i+1, Integer.parseInt(id));
}
// just past the end of the results
expectResults("SELECT id FROM $ALIAS ORDER BY id DESC OFFSET "+numDocs+" FETCH NEXT 5 ROWS ONLY", 0);
// Solr doesn't support OFFSET w/o LIMIT
expectThrows(IOException.class, () -> expectResults("SELECT id FROM $ALIAS ORDER BY id DESC OFFSET 5", 5));
}
@Test
public void testCountDistinct() throws Exception {
UpdateRequest updateRequest = new UpdateRequest();
final int cardinality = 5;
final int maxDocs = 100; // keep this an even # b/c we divide by 2 in this test
final String padFmt = "%03d";
for (int i = 0; i < maxDocs; i++) {
updateRequest = addDocForDistinctTests(i, updateRequest, cardinality, padFmt);
}
updateRequest.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
List<Tuple> tuples = expectResults("SELECT COUNT(1) AS total_rows, COUNT(distinct str_s) AS distinct_str, MIN(str_s) AS min_str, MAX(str_s) AS max_str FROM $ALIAS", 1);
Tuple firstRow = tuples.get(0);
assertEquals(maxDocs, (long) firstRow.getLong("total_rows"));
assertEquals(cardinality, (long) firstRow.getLong("distinct_str"));
String expectedMin = String.format(Locale.ROOT, padFmt, 0);
String expectedMax = String.format(Locale.ROOT, padFmt, cardinality - 1); // max is card-1
assertEquals(expectedMin, firstRow.getString("min_str"));
assertEquals(expectedMax, firstRow.getString("max_str"));
tuples = expectResults("SELECT DISTINCT str_s FROM $ALIAS ORDER BY str_s ASC", cardinality);
for (int t = 0; t < tuples.size(); t++) {
assertEquals(String.format(Locale.ROOT, padFmt, t), tuples.get(t).getString("str_s"));
}
tuples = expectResults("SELECT APPROX_COUNT_DISTINCT(distinct str_s) AS approx_distinct FROM $ALIAS", 1);
firstRow = tuples.get(0);
assertEquals(cardinality, (long) firstRow.getLong("approx_distinct"));
tuples = expectResults("SELECT country_s, COUNT(*) AS count_per_bucket FROM $ALIAS GROUP BY country_s", 2);
assertEquals(maxDocs/2L, (long)tuples.get(0).getLong("count_per_bucket"));
assertEquals(maxDocs/2L, (long)tuples.get(1).getLong("count_per_bucket"));
}
private UpdateRequest addDocForDistinctTests(int id, UpdateRequest updateRequest, int cardinality, String padFmt) {
String country = id % 2 == 0 ? "US" : "CA";
return updateRequest.add("id", String.valueOf(id), "str_s", String.format(Locale.ROOT, padFmt, id % cardinality), "country_s", country);
}
@Test
public void testSelectStarWithLimit() throws Exception {
new UpdateRequest()
.add("id", "1", "a_s", "hello-1", "b_s", "foo", "c_s", "bar", "d_s", "x")
.add("id", "2", "a_s", "world-2", "b_s", "foo", "a_i", "2", "d_s", "a")
.add("id", "3", "a_s", "hello-3", "b_s", "foo", "c_s", "bar", "d_s", "x")
.add("id", "4", "a_s", "world-4", "b_s", "foo", "a_i", "3", "d_s", "b")
.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
expectResults("SELECT * FROM $ALIAS LIMIT 100", 4);
// select * w/o limit is not supported by Solr SQL
expectThrows(IOException.class, () -> expectResults("SELECT * FROM $ALIAS", -1));
}
@Test
public void testSelectEmptyField() throws Exception {
new UpdateRequest()
.add("id", "01", "notstored", "X", "dvonly", "Y")
.add("id", "02", "notstored", "X", "dvonly", "Y")
.add("id", "03", "notstored", "X", "dvonly", "Y")
.add("id", "04", "notstored", "X", "dvonly", "Y")
.add("id", "05", "notstored", "X", "dvonly", "Y")
.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
// stringx is declared in the schema but has no docs
expectResults("SELECT id, stringx FROM $ALIAS", 5);
expectResults("SELECT id, stringx FROM $ALIAS LIMIT 10", 5);
expectResults("SELECT id, stringx, dvonly FROM $ALIAS", 5);
expectResults("SELECT id, stringx, dvonly FROM $ALIAS LIMIT 10", 5);
// notafield_i matches a dynamic field pattern but has no docs, so don't allow this
expectThrows(IOException.class, () -> expectResults("SELECT id, stringx, notafield_i FROM $ALIAS", 5));
expectThrows(IOException.class, () -> expectResults("SELECT id, stringx, notstored FROM $ALIAS", 5));
}
@Test
public void testMultiValuedFieldHandling() throws Exception {
List<String> textmv = Arrays.asList("just some text here", "across multiple values", "the quick brown fox jumped over the lazy dog");
List<String> listOfTimestamps = Arrays.asList("2021-08-06T15:37:52Z", "2021-08-06T15:37:53Z", "2021-08-06T15:37:54Z");
List<Date> dates = listOfTimestamps.stream().map(ts -> new Date(Instant.parse(ts).toEpochMilli())).collect(Collectors.toList());
List<String> stringxmv = Arrays.asList("a", "b", "c");
List<String> stringsx = Arrays.asList("d", "e", "f");
List<Double> pdoublesx = Arrays.asList(1d, 2d, 3d);
List<Double> pdoublexmv = Arrays.asList(4d, 5d, 6d);
List<Boolean> booleans = Arrays.asList(false, true);
List<Long> evenLongs = Arrays.asList(2L, 4L, 6L);
List<Long> oddLongs = Arrays.asList(1L, 3L, 5L);
UpdateRequest update = new UpdateRequest();
final int maxDocs = 10;
for (int i = 0; i < maxDocs; i++) {
SolrInputDocument doc = new SolrInputDocument("id", String.valueOf(i));
if (i % 2 == 0) {
doc.setField("stringsx", stringsx);
doc.setField("pdoublexmv", pdoublexmv);
doc.setField("longs", evenLongs);
} else {
// stringsx & pdoublexmv null
doc.setField("longs", oddLongs);
}
doc.setField("stringxmv", stringxmv);
doc.setField("pdoublesx", pdoublesx);
doc.setField("pdatexs", dates);
doc.setField("textmv", textmv);
doc.setField("booleans", booleans);
update.add(doc);
}
update.add("id", String.valueOf(maxDocs)); // all multi-valued fields are null
update.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
expectResults("SELECT stringxmv, stringsx, booleans FROM $ALIAS WHERE stringxmv > 'a'", 10);
expectResults("SELECT stringxmv, stringsx, booleans FROM $ALIAS WHERE stringxmv NOT IN ('a')", 1);
expectResults("SELECT stringxmv, stringsx, booleans FROM $ALIAS WHERE stringxmv > 'a' LIMIT 10", 10);
expectResults("SELECT stringxmv, stringsx, booleans FROM $ALIAS WHERE stringxmv NOT IN ('a') LIMIT 10", 1);
// can't sort by a mv field
expectThrows(IOException.class,
() -> expectResults("SELECT stringxmv FROM $ALIAS WHERE stringxmv IS NOT NULL ORDER BY stringxmv ASC", 0));
// even id's have these fields, odd's are null ...
expectListInResults("0", "stringsx", stringsx, -1, 5);
expectListInResults("0", "pdoublexmv", pdoublexmv, -1, 5);
expectListInResults("1", "stringsx", null, -1, 0);
expectListInResults("1", "pdoublexmv", null, -1, 0);
expectListInResults("2", "stringsx", stringsx, 10, 5);
expectListInResults("2", "pdoublexmv", pdoublexmv, 10, 5);
expectListInResults("1", "stringxmv", stringxmv, -1, 10);
expectListInResults("1", "pdoublesx", pdoublesx, -1, 10);
expectListInResults("1", "pdatexs", listOfTimestamps, -1, 10);
expectListInResults("1", "booleans", booleans, -1, 10);
expectListInResults("1", "longs", oddLongs, -1, 5);
expectListInResults("2", "stringxmv", stringxmv, 10, 10);
expectListInResults("2", "pdoublesx", pdoublesx, 10, 10);
expectListInResults("2", "pdatexs", listOfTimestamps, 10, 10);
expectListInResults("2", "textmv", textmv, 10, 10);
expectListInResults("2", "booleans", booleans, 10, 10);
expectListInResults("2", "longs", evenLongs, 10, 5);
expectAggCount("stringxmv", 3);
expectAggCount("stringsx", 3);
expectAggCount("pdoublesx", 3);
expectAggCount("pdoublexmv", 3);
expectAggCount("pdatexs", 3);
expectAggCount("booleans", 2);
expectAggCount("longs", 6);
}
private void expectListInResults(String id, String mvField, List<?> expected, int limit, int expCount) throws Exception {
String projection = limit > 0 ? "*" : "id," + mvField;
String sql = "SELECT " + projection + " FROM $ALIAS WHERE id='" + id + "'";
if (limit > 0) sql += " LIMIT " + limit;
List<Tuple> results = expectResults(sql, 1);
if (expected != null) {
assertEquals(expected, results.get(0).get(mvField));
} else {
assertNull(results.get(0).get(mvField));
}
if (expected != null) {
String crit = "'" + expected.get(0) + "'";
sql = "SELECT " + projection + " FROM $ALIAS WHERE " + mvField + "=" + crit;
if (limit > 0) sql += " LIMIT " + limit;
expectResults(sql, expCount);
// test "IN" operator but skip for text analyzed fields
if (!"textmv".equals(mvField)) {
String inClause = expected.stream().map(o -> "'" + o + "'").collect(Collectors.joining(","));
sql = "SELECT " + projection + " FROM $ALIAS WHERE " + mvField + " IN (" + inClause + ")";
if (limit > 0) sql += " LIMIT " + limit;
expectResults(sql, expCount);
}
}
}
private void expectAggCount(String mvField, int expCount) throws Exception {
expectResults("SELECT COUNT(*), " + mvField + " FROM $ALIAS GROUP BY " + mvField, expCount);
}
@Test
public void testManyInValues() throws Exception {
int maxSize = 1000;
int width = 4;
List<String> bigList = new ArrayList<>(maxSize);
for (int i=0; i < maxSize; i++) {
bigList.add(StringUtils.leftPad(String.valueOf(i), width, "0"));
}
UpdateRequest update = new UpdateRequest();
final int maxDocs = 10;
for (int i = 0; i < maxDocs; i++) {
SolrInputDocument doc = new SolrInputDocument("id", String.valueOf(i));
doc.setField("stringxmv", bigList);
update.add(doc);
}
update.add("id", String.valueOf(maxDocs)); // no stringxmv
SolrInputDocument doc = new SolrInputDocument("id", String.valueOf(maxDocs+1));
doc.setField("stringxmv", Arrays.asList("a", "b", "c"));
update.add(doc);
doc = new SolrInputDocument("id", String.valueOf(maxDocs+2));
doc.setField("stringxmv", Arrays.asList("d", "e", "f"));
update.add(doc);
update.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
int numIn = 200;
List<String> bigInList = new ArrayList<>(bigList);
Collections.shuffle(bigInList, random());
bigInList = bigInList.subList(0, numIn).stream().map(s -> "'"+s+"'").collect(Collectors.toList());
String inClause = String.join(",", bigInList);
String sql = "SELECT id FROM $ALIAS WHERE stringxmv IN ("+inClause+") ORDER BY id ASC";
expectResults(sql, maxDocs);
sql = "SELECT * FROM $ALIAS WHERE stringxmv IN ("+inClause+") ORDER BY id ASC LIMIT "+maxDocs;
expectResults(sql, maxDocs);
sql = "SELECT id FROM $ALIAS WHERE stringxmv NOT IN ("+inClause+") ORDER BY id ASC";
expectResults(sql, 3);
sql = "SELECT id FROM $ALIAS WHERE stringxmv IS NOT NULL AND stringxmv NOT IN ("+inClause+") ORDER BY id ASC";
expectResults(sql, 2);
sql = "SELECT * FROM $ALIAS WHERE stringxmv IN ('a','d') ORDER BY id ASC LIMIT 10";
expectResults(sql, 2);
}
@Test
public void testNotAndOrLogic() throws Exception {
new UpdateRequest()
.add("id", "1", "a_s", "hello-1", "b_s", "foo", "c_s", "bar", "d_s", "x")
.add("id", "2", "a_s", "world-2", "b_s", "foo", "a_i", "2", "d_s", "a")
.add("id", "3", "a_s", "hello-3", "b_s", "foo", "c_s", "bar", "d_s", "x")
.add("id", "4", "a_s", "world-4", "b_s", "foo", "a_i", "3", "d_s", "b")
.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
// single NOT clause
expectResults("SELECT id FROM $ALIAS WHERE a_s <> 'hello-1' ORDER BY id ASC LIMIT 10", 3);
expectResults("SELECT id FROM $ALIAS WHERE b_s NOT LIKE 'foo' ORDER BY id ASC LIMIT 10", 0);
expectResults("SELECT id FROM $ALIAS WHERE d_s NOT IN ('x','y') ORDER BY id ASC LIMIT 10", 2);
expectResults("SELECT id FROM $ALIAS WHERE a_i IS NULL ORDER BY id ASC LIMIT 10", 2);
expectResults("SELECT id FROM $ALIAS WHERE c_s IS NOT NULL ORDER BY id ASC LIMIT 10", 2);
expectResults("SELECT * FROM $ALIAS WHERE a_s='hello-1' AND d_s='x' ORDER BY id ASC LIMIT 10", 1);
expectResults("SELECT id FROM $ALIAS WHERE a_s='hello-1' AND d_s='x'", 1);
expectResults("SELECT * FROM $ALIAS WHERE a_s <> 'hello-1' AND d_s <> 'x' ORDER BY id ASC LIMIT 10", 2);
expectResults("SELECT id FROM $ALIAS WHERE a_s <> 'hello-1' AND d_s <> 'x'", 2);
expectResults("SELECT * FROM $ALIAS WHERE d_s <> 'x' ORDER BY id ASC LIMIT 10", 2);
expectResults("SELECT id FROM $ALIAS WHERE d_s <> 'x'", 2);
expectResults("SELECT * FROM $ALIAS WHERE (a_s = 'hello-1' OR a_s = 'hello-3') AND d_s <> 'x' ORDER BY id ASC LIMIT 10", 0);
expectResults("SELECT id FROM $ALIAS WHERE (a_s = 'hello-1' OR a_s = 'hello-3') AND d_s <> 'x'", 0);
expectResults("SELECT * FROM $ALIAS WHERE (a_s = 'hello-1' OR a_s = 'hello-3') AND d_s NOT IN ('x') ORDER BY id ASC LIMIT 10", 0);
expectResults("SELECT id FROM $ALIAS WHERE (a_s = 'hello-1' OR a_s = 'hello-3') AND d_s NOT IN ('x')", 0);
expectResults("SELECT * FROM $ALIAS WHERE (a_s = 'hello-1' OR a_s = 'hello-3') AND d_s NOT IN ('a') ORDER BY id ASC LIMIT 10", 2);
expectResults("SELECT id FROM $ALIAS WHERE (a_s = 'hello-1' OR a_s = 'hello-3') AND d_s NOT IN ('a')", 2);
expectResults("SELECT * FROM $ALIAS WHERE (a_s = 'hello-1' OR a_s = 'hello-3') AND d_s NOT LIKE 'x' ORDER BY id ASC LIMIT 10", 0);
expectResults("SELECT id FROM $ALIAS WHERE (a_s = 'hello-1' OR a_s = 'hello-3') AND d_s NOT LIKE 'x'", 0);
expectResults("SELECT * FROM $ALIAS WHERE (a_s = 'hello-1' OR a_s = 'hello-3') AND d_s NOT LIKE 'b' ORDER BY id ASC LIMIT 10", 2);
expectResults("SELECT id FROM $ALIAS WHERE (a_s = 'hello-1' OR a_s = 'hello-3') AND d_s NOT LIKE 'b'", 2);
expectResults("SELECT * FROM $ALIAS WHERE (a_s = 'hello-1' OR a_s = 'hello-3') AND d_s LIKE 'x' ORDER BY id ASC LIMIT 10", 2);
expectResults("SELECT id FROM $ALIAS WHERE (a_s = 'hello-1' OR a_s = 'hello-3') AND d_s LIKE 'x'", 2);
expectResults("SELECT * FROM $ALIAS WHERE a_s <> 'hello-1' AND b_s='foo' AND d_s IS NOT NULL AND a_i IS NULL AND c_s IN ('bar') ORDER BY id ASC LIMIT 10", 1);
expectResults("SELECT id FROM $ALIAS WHERE a_s <> 'hello-1' AND b_s='foo' AND d_s IS NOT NULL AND a_i IS NULL AND c_s IN ('bar')", 1);
// just a bunch of OR's that end up matching all docs
expectResults("SELECT id FROM $ALIAS WHERE a_s <> 'hello-1' OR a_i <> 2 OR d_s <> 'x' ORDER BY id ASC LIMIT 10", 4);
}
}
| 44,075 |
521 | <filename>third_party/virtualbox/src/VBox/Additions/x11/x11include/xorg-server-1.0.1/iplpack.h
/* $XFree86$ */
/* Modified nov 94 by <NAME> (<EMAIL>) for use with
interleaved planes */
#define NUM_LONGS(planes, xs, xe) \
(((((xe) * (planes) + 31) & ~31) - \
(((xs) * (planes)) & ~31))/32)
#define NUM_TEMP_BYTES(planes, longs) \
(((2 * (longs) + (planes) - 1) / planes + 1) * planes * 2)
| 181 |
318 | <reponame>rafaresearch/webbit
package org.webbitserver.netty;
import org.jboss.netty.channel.ChannelHandlerContext;
import org.jboss.netty.channel.ChannelStateEvent;
import org.jboss.netty.channel.ExceptionEvent;
import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
import org.webbitserver.EventSourceHandler;
import java.lang.Thread.UncaughtExceptionHandler;
import java.util.concurrent.Executor;
public class EventSourceConnectionHandler extends SimpleChannelUpstreamHandler {
private final ConnectionHelper connectionHelper;
public EventSourceConnectionHandler(
Executor executor,
UncaughtExceptionHandler exceptionHandler,
UncaughtExceptionHandler ioExceptionHandler,
final NettyEventSourceConnection eventSourceConnection,
final EventSourceHandler eventSourceHandler
) {
this.connectionHelper = new ConnectionHelper(executor, exceptionHandler, ioExceptionHandler) {
@Override
protected void fireOnClose() throws Exception {
eventSourceHandler.onClose(eventSourceConnection);
}
};
}
@Override
public void channelUnbound(ChannelHandlerContext ctx, ChannelStateEvent e) {
connectionHelper.fireOnClose(e);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) {
connectionHelper.fireConnectionException(e);
}
}
| 485 |
679 | <reponame>Grosskopf/openoffice<gh_stars>100-1000
/**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
// MARKER(update_precomp.py): autogen include statement, do not remove
#include "precompiled_sfx2.hxx"
#include <svl/stritem.hxx>
#ifndef GCC
#endif
#include <com/sun/star/util/URL.hpp>
#include <com/sun/star/util/XURLTransformer.hpp>
#include <com/sun/star/frame/XController.hpp>
#include <com/sun/star/lang/XUnoTunnel.hpp>
#include <com/sun/star/frame/status/ItemStatus.hpp>
#include <com/sun/star/frame/status/ItemState.hpp>
#include <com/sun/star/awt/MouseButton.hpp>
#include <vcl/status.hxx>
#include <sfx2/app.hxx>
#include "sfx2/stbitem.hxx"
#include "sfxtypes.hxx"
#include <sfx2/msg.hxx>
#include "arrdecl.hxx"
#include <sfx2/bindings.hxx>
#include <sfx2/msgpool.hxx>
#include <sfx2/module.hxx>
#include <sfx2/dispatch.hxx>
#include <sfx2/unoctitm.hxx>
#include <sfx2/objsh.hxx>
#include <sfx2/sfx.hrc>
#include <comphelper/processfactory.hxx>
#include <svl/eitem.hxx>
#include <svl/stritem.hxx>
#include <svl/intitem.hxx>
#ifndef _TOOLKIT_HELPER_VCLUNOHELPER_HXX_
#include <toolkit/helper/vclunohelper.hxx>
#endif
#include <toolkit/helper/convert.hxx>
using namespace ::com::sun::star;
//--------------------------------------------------------------------
sal_uInt16 SfxStatusBarControl::convertAwtToVCLMouseButtons( sal_Int16 nAwtMouseButtons )
{
sal_uInt16 nVCLMouseButtons( 0 );
if ( nAwtMouseButtons & awt::MouseButton::LEFT )
nVCLMouseButtons |= MOUSE_LEFT;
if ( nAwtMouseButtons & awt::MouseButton::RIGHT )
nVCLMouseButtons |= MOUSE_RIGHT;
if ( nAwtMouseButtons & awt::MouseButton::MIDDLE )
nVCLMouseButtons |= MOUSE_MIDDLE;
return nVCLMouseButtons;
}
//--------------------------------------------------------------------
svt::StatusbarController* SAL_CALL SfxStatusBarControllerFactory(
const uno::Reference< frame::XFrame >& rFrame,
StatusBar* pStatusBar,
unsigned short nID,
const ::rtl::OUString& aCommandURL )
{
::vos::OGuard aGuard( Application::GetSolarMutex() );
util::URL aTargetURL;
aTargetURL.Complete = aCommandURL;
uno::Reference < util::XURLTransformer > xTrans( ::comphelper::getProcessServiceFactory()->createInstance(
rtl::OUString::createFromAscii( "com.sun.star.util.URLTransformer" )), uno::UNO_QUERY );
xTrans->parseStrict( aTargetURL );
SfxObjectShell* pObjShell = NULL;
uno::Reference < frame::XController > xController;
uno::Reference < frame::XModel > xModel;
if ( rFrame.is() )
{
xController = rFrame->getController();
if ( xController.is() )
xModel = xController->getModel();
}
if ( xModel.is() )
{
// Get tunnel from model to retrieve the SfxObjectShell pointer from it
::com::sun::star::uno::Reference < ::com::sun::star::lang::XUnoTunnel > xObj( xModel, uno::UNO_QUERY );
::com::sun::star::uno::Sequence < sal_Int8 > aSeq = SvGlobalName( SFX_GLOBAL_CLASSID ).GetByteSequence();
if ( xObj.is() )
{
sal_Int64 nHandle = xObj->getSomething( aSeq );
if ( nHandle )
pObjShell = reinterpret_cast< SfxObjectShell* >( sal::static_int_cast< sal_IntPtr >( nHandle ));
}
}
SfxModule* pModule = pObjShell ? pObjShell->GetModule() : NULL;
SfxSlotPool* pSlotPool = 0;
if ( pModule )
pSlotPool = pModule->GetSlotPool();
else
pSlotPool = &(SfxSlotPool::GetSlotPool( NULL ));
const SfxSlot* pSlot = pSlotPool->GetUnoSlot( aTargetURL.Path );
if ( pSlot )
{
sal_uInt16 nSlotId = pSlot->GetSlotId();
if ( nSlotId > 0 )
{
rtl::OString aCmd(".uno:");
aCmd += pSlot->GetUnoName();
pStatusBar->SetHelpId( nSlotId, aCmd );
return SfxStatusBarControl::CreateControl( nSlotId, nID, pStatusBar, pModule );
}
}
return NULL;
}
//--------------------------------------------------------------------
SfxStatusBarControl::SfxStatusBarControl
(
sal_uInt16 nSlotID, /* Slot-Id, mit der diese Instanz
verbunden wird. Wurde bei der
Registrierung eine Slot-Id != 0
angegeben, ist dies immer die dort
angegebene. */
sal_uInt16 nCtrlID, /* ID of this controller in the status bar */
StatusBar& rBar /* Referenz auf die StatusBar, f"ur die
dieses Control erzeugt wurde. */
)
/* [Beschreibung]
Konstruktor der Klasse SfxStatusBarControl. Die Subclasses werden
bei Bedarf per Factory vom SFx erzeugt.
Instanzen dieser Basisklasse werden f"ur alle StatusBar-Felder
erzeugt, f"ur die keine speziellen registriert wurden.
*/
: svt::StatusbarController(),
nSlotId( nSlotID ),
nId( nCtrlID ),
pBar( &rBar )
{
}
//--------------------------------------------------------------------
SfxStatusBarControl::~SfxStatusBarControl()
/* [Beschreibung]
Destruktor der Klasse SfxStatusBarControl. Die Instanzen dieser
Klasse und deren Subklassen werden vom SFx zerst"ort.
*/
{}
//--------------------------------------------------------------------
// XInterface
uno::Any SAL_CALL SfxStatusBarControl::queryInterface( const uno::Type & rType )
throw( uno::RuntimeException)
{
return svt::StatusbarController::queryInterface( rType );
}
void SAL_CALL SfxStatusBarControl::acquire() throw()
{
OWeakObject::acquire();
}
void SAL_CALL SfxStatusBarControl::release() throw()
{
OWeakObject::release();
}
//--------------------------------------------------------------------
// XEventListener
void SAL_CALL SfxStatusBarControl::disposing( const lang::EventObject& aEvent )
throw( uno::RuntimeException )
{
svt::StatusbarController::disposing( aEvent );
}
//--------------------------------------------------------------------
// XComponent
void SAL_CALL SfxStatusBarControl::dispose()
throw (uno::RuntimeException)
{
svt::StatusbarController::dispose();
}
//--------------------------------------------------------------------
// XStatusListener
void SAL_CALL SfxStatusBarControl::statusChanged( const frame::FeatureStateEvent& rEvent )
throw ( ::com::sun::star::uno::RuntimeException )
{
SfxViewFrame* pViewFrame = NULL;
uno::Reference < frame::XController > xController;
::vos::OGuard aGuard( Application::GetSolarMutex() );
if ( m_xFrame.is() )
xController = m_xFrame->getController();
uno::Reference < frame::XDispatchProvider > xProvider( xController, uno::UNO_QUERY );
if ( xProvider.is() )
{
uno::Reference < frame::XDispatch > xDisp = xProvider->queryDispatch( rEvent.FeatureURL, ::rtl::OUString(), 0 );
if ( xDisp.is() )
{
uno::Reference< lang::XUnoTunnel > xTunnel( xDisp, uno::UNO_QUERY );
SfxOfficeDispatch* pDisp = NULL;
if ( xTunnel.is() )
{
sal_Int64 nImplementation = xTunnel->getSomething(SfxOfficeDispatch::impl_getStaticIdentifier());
pDisp = reinterpret_cast< SfxOfficeDispatch* >(sal::static_int_cast< sal_IntPtr >( nImplementation ));
}
if ( pDisp )
pViewFrame = pDisp->GetDispatcher_Impl()->GetFrame();
}
}
sal_uInt16 nSlotID = 0;
SfxSlotPool& rPool = SfxSlotPool::GetSlotPool( pViewFrame );
const SfxSlot* pSlot = rPool.GetUnoSlot( rEvent.FeatureURL.Path );
if ( pSlot )
nSlotID = pSlot->GetSlotId();
if ( nSlotID > 0 )
{
if ( rEvent.Requery )
svt::StatusbarController::statusChanged( rEvent );
else
{
SfxItemState eState = SFX_ITEM_DISABLED;
SfxPoolItem* pItem = NULL;
if ( rEvent.IsEnabled )
{
eState = SFX_ITEM_AVAILABLE;
uno::Type pType = rEvent.State.getValueType();
if ( pType == ::getVoidCppuType() )
{
pItem = new SfxVoidItem( nSlotID );
eState = SFX_ITEM_UNKNOWN;
}
else if ( pType == ::getBooleanCppuType() )
{
sal_Bool bTemp = 0;
rEvent.State >>= bTemp ;
pItem = new SfxBoolItem( nSlotID, bTemp );
}
else if ( pType == ::getCppuType((const sal_uInt16*)0) )
{
sal_uInt16 nTemp = 0;
rEvent.State >>= nTemp ;
pItem = new SfxUInt16Item( nSlotID, nTemp );
}
else if ( pType == ::getCppuType((const sal_uInt32*)0) )
{
sal_uInt32 nTemp = 0;
rEvent.State >>= nTemp ;
pItem = new SfxUInt32Item( nSlotID, nTemp );
}
else if ( pType == ::getCppuType((const ::rtl::OUString*)0) )
{
::rtl::OUString sTemp ;
rEvent.State >>= sTemp ;
pItem = new SfxStringItem( nSlotID, sTemp );
}
else if ( pType == ::getCppuType((const ::com::sun::star::frame::status::ItemStatus*)0) )
{
frame::status::ItemStatus aItemStatus;
rEvent.State >>= aItemStatus;
eState = aItemStatus.State;
pItem = new SfxVoidItem( nSlotID );
}
else
{
if ( pSlot )
pItem = pSlot->GetType()->CreateItem();
if ( pItem )
{
pItem->SetWhich( nSlotID );
pItem->PutValue( rEvent.State );
}
else
pItem = new SfxVoidItem( nSlotID );
}
}
StateChanged( nSlotID, eState, pItem );
delete pItem;
}
}
}
//--------------------------------------------------------------------
// XStatusbarController
::sal_Bool SAL_CALL SfxStatusBarControl::mouseButtonDown(
const awt::MouseEvent& rMouseEvent )
throw ( uno::RuntimeException )
{
::vos::OGuard aGuard( Application::GetSolarMutex() );
::Point aPos( rMouseEvent.X, rMouseEvent.Y );
::MouseEvent aMouseEvent( aPos,
(sal_uInt16)rMouseEvent.ClickCount,
0,
convertAwtToVCLMouseButtons( rMouseEvent.Buttons ),
0 );
return MouseButtonDown( aMouseEvent );
}
//--------------------------------------------------------------------
::sal_Bool SAL_CALL SfxStatusBarControl::mouseMove(
const awt::MouseEvent& rMouseEvent )
throw (uno::RuntimeException)
{
::vos::OGuard aGuard( Application::GetSolarMutex() );
::Point aPos( rMouseEvent.X, rMouseEvent.Y );
::MouseEvent aMouseEvent( aPos,
(sal_uInt16)rMouseEvent.ClickCount,
0,
convertAwtToVCLMouseButtons( rMouseEvent.Buttons ),
0 );
return MouseMove( aMouseEvent );
}
//--------------------------------------------------------------------
::sal_Bool SAL_CALL SfxStatusBarControl::mouseButtonUp(
const ::awt::MouseEvent& rMouseEvent )
throw ( uno::RuntimeException )
{
::vos::OGuard aGuard( Application::GetSolarMutex() );
::Point aPos( rMouseEvent.X, rMouseEvent.Y );
::MouseEvent aMouseEvent( aPos,
(sal_uInt16)rMouseEvent.ClickCount,
0,
convertAwtToVCLMouseButtons( rMouseEvent.Buttons ),
0 );
return MouseButtonUp( aMouseEvent );
}
//--------------------------------------------------------------------
void SAL_CALL SfxStatusBarControl::command(
const awt::Point& rPos,
::sal_Int32 nCommand,
::sal_Bool /*bMouseEvent*/,
const ::com::sun::star::uno::Any& /*aData*/ )
throw (::com::sun::star::uno::RuntimeException)
{
::vos::OGuard aGuard( Application::GetSolarMutex() );
::Point aPos( rPos.X, rPos.Y );
CommandEvent aCmdEvent( aPos, (sal_uInt16)nCommand, sal_True, NULL );
Command( aCmdEvent );
}
//--------------------------------------------------------------------
void SAL_CALL SfxStatusBarControl::paint(
const uno::Reference< awt::XGraphics >& xGraphics,
const awt::Rectangle& rOutputRectangle,
::sal_Int32 nStyle )
throw ( ::uno::RuntimeException )
{
::vos::OGuard aGuard( Application::GetSolarMutex() );
OutputDevice* pOutDev = VCLUnoHelper::GetOutputDevice( xGraphics );;
if ( pOutDev )
{
::Rectangle aRect = VCLRectangle( rOutputRectangle );
UserDrawEvent aUserDrawEvent( pOutDev, aRect, pBar->GetCurItemId(), (sal_uInt16)nStyle );
Paint( aUserDrawEvent );
}
}
//--------------------------------------------------------------------
void SAL_CALL SfxStatusBarControl::click( const awt::Point& )
throw ( uno::RuntimeException )
{
::vos::OGuard aGuard( Application::GetSolarMutex() );
Click();
}
//--------------------------------------------------------------------
void SAL_CALL SfxStatusBarControl::doubleClick( const awt::Point& )
throw ( uno::RuntimeException )
{
::vos::OGuard aGuard( Application::GetSolarMutex() );
DoubleClick();
}
//--------------------------------------------------------------------
// old sfx2 interface
//--------------------------------------------------------------------
void SfxStatusBarControl::StateChanged
(
sal_uInt16 nSID,
SfxItemState eState,
const SfxPoolItem* pState /* Zeiger auf ein SfxPoolItem, welches nur
innerhalb dieses Methodenaufrufs g"ultig
ist. Es kann ein 0-Pointer, ein Pointer
auf ein SfxVoidItem oder auf den Typ, f"ur
den die Subclass von SfxStatusBarControl
registriert ist vorkommen. */
)
/* [Beschreibung]
Die Basisimplementation versteht Items vom Type SfxStringItem, bei
denen der Text in das Status-Zeilen-Feld eingetragen wird und
SfxVoidItem, bei denen das Feld geleert wird. Die Basisimplementierng
sollte in "uberladenen Methoden nicht gerufen werden.
*/
{
DBG_MEMTEST();
DBG_ASSERT( pBar != 0, "setting state to dangling StatusBar" );
const SfxStringItem* pStr = PTR_CAST( SfxStringItem, pState );
if ( eState == SFX_ITEM_AVAILABLE && pStr )
pBar->SetItemText( nSID, pStr->GetValue() );
else
{
DBG_ASSERT( eState != SFX_ITEM_AVAILABLE || pState->ISA(SfxVoidItem),
"wrong SfxPoolItem subclass in SfxStatusBarControl" );
pBar->SetItemText( nSID, String() );
}
}
//--------------------------------------------------------------------
sal_Bool SfxStatusBarControl::MouseButtonDown( const MouseEvent & )
/* [Beschreibung]
Diese virtuelle Methode ist eine Weiterleitung des Events
MouseButtonDown() der StatusBar, falls die Maus-Position innerhalb
des Bereichs des betreffenden Items ist, oder die Maus von diesem
Control mit <SfxStatusBarControl::CaptureMouse()> gecaptured wurde.
Die Defaultimplementierung ist leer und gibt FALSE zur"uck.
[Rueckgabewert]
sal_Bool TRUE
das Event wurde bearbeitet und soll nicht an
die StatusBar weitergeleitet werden
FALSE
das Event wurde nicht bearbeitet und soll an
die StatusBar weitergeleitet werden
*/
{
return sal_False;
}
//--------------------------------------------------------------------
sal_Bool SfxStatusBarControl::MouseMove( const MouseEvent & )
/* [Beschreibung]
Diese virtuelle Methode ist eine Weiterleitung des Events
MouseMove() der StatusBar, falls die Maus-Position innerhalb
des Bereichs des betreffenden Items ist, oder die Maus von diesem
Control mit <SfxStatusBarControl::CaptureMouse()> gecaptured wurde.
Die Defaultimplementierung ist leer und gibt FALSE zur"uck.
[Rueckgabewert]
sal_Bool TRUE
das Event wurde bearbeitet und soll nicht an
die StatusBar weitergeleitet werden
FALSE
das Event wurde nicht bearbeitet und soll an
die StatusBar weitergeleitet werden
*/
{
return sal_False;
}
//--------------------------------------------------------------------
sal_Bool SfxStatusBarControl::MouseButtonUp( const MouseEvent & )
/* [Beschreibung]
Diese virtuelle Methode ist eine Weiterleitung des Events
MouseButtonUp() der StatusBar, falls die Maus-Position innerhalb
des Bereichs des betreffenden Items ist, oder die Maus von diesem
Control mit <SfxStatusBarControl::CaptureMouse()> gecaptured wurde.
Die Defaultimplementierung ist leer und gibt FALSE zur"uck.
[Rueckgabewert]
sal_Bool TRUE
das Event wurde bearbeitet und soll nicht an
die StatusBar weitergeleitet werden
FALSE
das Event wurde nicht bearbeitet und soll an
die StatusBar weitergeleitet werden
*/
{
return sal_False;
}
//--------------------------------------------------------------------
void SfxStatusBarControl::Command( const CommandEvent& )
/* [Beschreibung]
Diese virtuelle Methode wird gerufen, wenn f"ur dieses SfxStatusBarControl
ein CommandEvent f"ur erkannt wurde.
Die Defaultimplementierung ist leer.
*/
{
}
//--------------------------------------------------------------------
void SfxStatusBarControl::Click()
/* [Beschreibung]
Diese virtuelle Methode wird gerufen, wenn der Anwender mit der Maus
in das zu diesem Control geh"orige Feld der Statuszeile klickt.
Die Defaultimplementierung ist leer.
*/
{
}
//--------------------------------------------------------------------
void SfxStatusBarControl::DoubleClick()
/* [Beschreibung]
Diese virtuelle Methode wird gerufen, wenn der Anwender mit der Maus
in das zu diesem Control geh"orige Feld der Statuszeile doppel-klickt.
*/
{
::com::sun::star::uno::Sequence< ::com::sun::star::beans::PropertyValue > aArgs;
execute( aArgs );
}
//--------------------------------------------------------------------
void SfxStatusBarControl::Paint
(
const UserDrawEvent& /* Referenz auf einen UserDrawEvent */
)
/* [Beschreibung]
Diese virtuelle Methode wird gerufen, falls das betreffende Feld
mit SIB_USERDRAW gekennzeichnet ist, um den Inhalt zu zeichnen.
Die Ausgabe mu"s auf dem in durch rUDEvt.GetDevice() erh"altlichen
OutputDevice innerhalb des durch rUDEvt.GetRect() angegebenenen
Rechtecks erfolgen.
Die Defaultimplementierung ist leer.
*/
{
}
//--------------------------------------------------------------------
void SfxStatusBarControl::CaptureMouse()
{
}
//--------------------------------------------------------------------
void SfxStatusBarControl::ReleaseMouse()
{
}
//--------------------------------------------------------------------
SfxStatusBarControl* SfxStatusBarControl::CreateControl
(
sal_uInt16 nSlotID,
sal_uInt16 nStbId,
StatusBar* pBar,
SfxModule* pMod
)
{
::vos::OGuard aGuard( Application::GetSolarMutex() );
SfxApplication *pApp = SFX_APP();
SfxSlotPool *pSlotPool;
if ( pMod )
pSlotPool = pMod->GetSlotPool();
else
pSlotPool = &SfxSlotPool::GetSlotPool();
TypeId aSlotType = pSlotPool->GetSlotType(nSlotID);
if ( aSlotType )
{
if ( pMod )
{
SfxStbCtrlFactArr_Impl *pFactories = pMod->GetStbCtrlFactories_Impl();
if ( pFactories )
{
SfxStbCtrlFactArr_Impl &rFactories = *pFactories;
for ( sal_uInt16 nFactory = 0; nFactory < rFactories.Count(); ++nFactory )
if ( rFactories[nFactory]->nTypeId == aSlotType &&
( ( rFactories[nFactory]->nSlotId == 0 ) ||
( rFactories[nFactory]->nSlotId == nSlotID) ) )
return rFactories[nFactory]->pCtor( nSlotID, nStbId, *pBar );
}
}
SfxStbCtrlFactArr_Impl &rFactories = pApp->GetStbCtrlFactories_Impl();
for ( sal_uInt16 nFactory = 0; nFactory < rFactories.Count(); ++nFactory )
if ( rFactories[nFactory]->nTypeId == aSlotType &&
( ( rFactories[nFactory]->nSlotId == 0 ) ||
( rFactories[nFactory]->nSlotId == nSlotID) ) )
return rFactories[nFactory]->pCtor( nSlotID, nStbId, *pBar );
}
return NULL;
}
//--------------------------------------------------------------------
void SfxStatusBarControl::RegisterStatusBarControl(SfxModule* pMod, SfxStbCtrlFactory* pFact)
{
SFX_APP()->RegisterStatusBarControl_Impl( pMod, pFact );
}
//--------------------------------------------------------------------
| 8,455 |
4,391 | <filename>packages/pyright-internal/src/tests/samples/typeAlias15.py
# This sample tests the handling of recursive type aliases that are generic.
from __future__ import annotations
from typing import Mapping, Sequence, TypeVar, Union
S = TypeVar("S")
RecList = Union[Mapping[str, "RecList[S]"], Sequence["RecList[S]"], S]
T3 = TypeVar("T3", RecList[int], RecList[str])
def f3(x: RecList[int] | RecList[str]) -> None:
...
def g3(x: T3):
return f3(x)
def f4(x: RecList[str] | RecList[int]) -> None:
...
def g4(x: T3):
return f4(x)
| 213 |
360 | <reponame>Yanci0/openGauss-server<filename>src/include/utils/evp_cipher.h
/*
* Copyright (c) 2020 Huawei Technologies Co.,Ltd.
*
* openGauss is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
* ---------------------------------------------------------------------------------------
*
* evp_cipher.h
* SM4 encryption algorithm
* Interfaces of the AES encryption algorithm
*
*
* IDENTIFICATION
* src/include/utils/evp_cipher.h
*
* ---------------------------------------------------------------------------------------
*/
#ifndef EVP_CIPHER_H
#define EVP_CIPHER_H
#define KEY_128BIT_LEN 16
#define KEY_256BIT_LEN 32
/* To maintain forward compatibility, the value of enum cannot be changed */
typedef enum {
TDE_ALGO_NONE = 0,
TDE_ALGO_AES_128_CTR = 1,
TDE_ALGO_AES_128_GCM = 2,
TDE_ALGO_AES_256_CTR = 3,
TDE_ALGO_AES_256_GCM = 4,
TDE_ALGO_SM4_CTR = 5,
} TdeAlgo;
bool encrypt_partial_mode(const char* plainText, const size_t plainLength, char* cipherText,
size_t* cipherLength, unsigned char* key, unsigned char* iv, TdeAlgo algo);
bool decrypt_partial_mode(const char* cipherText, const size_t cipherLength, char* plainText,
size_t* plainLength, unsigned char* key, unsigned char* iv, TdeAlgo algo);
#endif /* EVP_CIPHER_H */
| 615 |
1,261 | <filename>tests/01-internal/01-hash.c
/**
Onion HTTP server library
Copyright (C) 2010-2018 <NAME> and others
This library is free software; you can redistribute it and/or
modify it under the terms of, at your choice:
a. the Apache License Version 2.0.
b. the GNU General Public License as published by the
Free Software Foundation; either version 2.0 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of both licenses, if not see
<http://www.gnu.org/licenses/> and
<http://www.apache.org/licenses/LICENSE-2.0>.
*/
#include <stdlib.h>
#include <string.h>
#include <onion/dict.h>
#include <onion/log.h>
#include "../ctest.h"
#include <unistd.h>
#include <onion/block.h>
#ifdef HAVE_PTHREADS
#include <pthread.h>
#endif
void t01_create_add_free() {
INIT_LOCAL();
onion_dict *dict;
const char *value;
dict = onion_dict_new();
FAIL_IF_EQUAL(dict, NULL);
// Get before anything in
value = onion_dict_get(dict, "Request");
FAIL_IF_NOT_EQUAL(value, NULL);
// basic add
onion_dict_add(dict, "Request", "GET /", OD_DUP_ALL);
value = onion_dict_get(dict, "Request");
FAIL_IF_NOT_EQUAL_STR(value, "GET /");
// basic remove
onion_dict_remove(dict, "Request");
value = onion_dict_get(dict, "Request");
FAIL_IF_NOT_EQUAL(value, NULL);
onion_dict_free(dict);
END_LOCAL();
}
void t01_create_add_free_10() {
INIT_LOCAL();
onion_dict *dict;
const char *value;
dict = onion_dict_new();
FAIL_IF_EQUAL(dict, NULL);
// Get before anything in
value = onion_dict_get(dict, "Request");
FAIL_IF_NOT_EQUAL(value, NULL);
// basic add
int i;
char tmp[256];
for (i = 0; i < 10; i++) {
snprintf(tmp, sizeof(tmp), "%d", (i * 13) % 10);
////ONION_DEBUG("add key %s",tmp);
onion_dict_add(dict, tmp, "GET /", OD_DUP_ALL);
value = onion_dict_get(dict, tmp);
FAIL_IF_NOT_EQUAL_STR(value, "GET /");
//onion_dict_print_dot(dict);
}
for (i = 0; i < 10; i++) {
snprintf(tmp, sizeof(tmp), "%d", i);
////ONION_DEBUG("rm key %s",tmp);
onion_dict_remove(dict, tmp);
value = onion_dict_get(dict, tmp);
FAIL_IF_NOT_EQUAL(value, NULL);
//onion_dict_print_dot(dict);
}
onion_dict_free(dict);
END_LOCAL();
}
void t02_create_and_free_a_lot(unsigned int n) {
INIT_LOCAL();
onion_dict *dict;
const char *value;
unsigned int i;
dict = onion_dict_new();
FAIL_IF_EQUAL(dict, NULL);
// Linear add
for (i = 0; i < n; i++) {
char key[16], val[16];
sprintf(key, "key %d", i);
sprintf(val, "val %d", i);
onion_dict_add(dict, key, val, OD_DUP_ALL);
}
// Linear get
for (i = 0; i < n; i++) {
char key[16], val[16];
sprintf(key, "key %d", i);
sprintf(val, "val %d", i);
value = onion_dict_get(dict, key);
FAIL_IF_NOT_EQUAL_STR(val, value);
}
// remove all
for (i = 0; i < n; i++) {
char key[16];
sprintf(key, "key %d", i);
int ok = onion_dict_remove(dict, key);
FAIL_IF_NOT(ok);
}
// check removed all
for (i = 0; i < n; i++) {
char key[16], val[16];
sprintf(key, "key %d", i);
sprintf(val, "val %d", i);
value = onion_dict_get(dict, key);
//fprintf(stderr,"%s %s\n",key,value);
FAIL_IF_NOT_EQUAL(NULL, value);
FAIL_IF_NOT_EQUAL_STR(NULL, value);
}
onion_dict_free(dict);
END_LOCAL();
}
#define R1(x) ((x)*39872265)%28645
#define R2(x) ((x)*43433422)%547236
void t03_create_and_free_a_lot_random(unsigned int n) {
INIT_LOCAL();
onion_dict *dict;
const char *value;
unsigned int i;
dict = onion_dict_new();
FAIL_IF_EQUAL(dict, NULL);
// Linear add
for (i = 0; i < n; i++) {
char key[16], val[16];
sprintf(key, "key %d", R1(i));
sprintf(val, "val %d", R2(i));
onion_dict_add(dict, key, val, OD_DUP_ALL);
}
// Linear get
for (i = 0; i < n; i++) {
char key[16], val[16];
sprintf(key, "key %d", R1(i));
sprintf(val, "val %d", R2(i));
value = onion_dict_get(dict, key);
FAIL_IF_NOT_EQUAL_STR(val, value);
}
// remove all
for (i = n; i > 0; i--) {
char key[16];
int removed;
sprintf(key, "key %d", R1(i - 1));
//fprintf(stderr,"%s %d\n",key,i-1);
removed = onion_dict_remove(dict, key);
FAIL_IF_NOT_EQUAL(removed, 1);
}
// check removed all
for (i = 0; i < n; i++) {
char key[16], val[16];
sprintf(key, "key %d", R1(i));
sprintf(val, "val %d", R1(i));
value = onion_dict_get(dict, key);
//fprintf(stderr,"%s %s\n",key,value);
FAIL_IF_NOT_EQUAL(NULL, value);
FAIL_IF_NOT_EQUAL_STR(NULL, value);
}
onion_dict_free(dict);
END_LOCAL();
}
void t04_create_and_free_a_dup() {
INIT_LOCAL();
onion_dict *dict;
const char *value;
dict = onion_dict_new();
FAIL_IF_EQUAL(dict, NULL);
// Get before anything in
value = onion_dict_get(dict, "Request");
FAIL_IF_NOT_EQUAL(value, NULL);
// basic add
onion_dict_add(dict, "Request", "GET /", OD_DUP_ALL);
value = onion_dict_get(dict, "Request");
FAIL_IF_NOT_EQUAL_STR(value, "GET /");
onion_dict_add(dict, "Request", "GET /", OD_DUP_ALL);
value = onion_dict_get(dict, "Request");
FAIL_IF_NOT_EQUAL_STR(value, "GET /");
// basic remove
onion_dict_remove(dict, "Request");
value = onion_dict_get(dict, "Request");
FAIL_IF_NOT_EQUAL_STR(value, "GET /");
// basic remove
onion_dict_remove(dict, "Request");
value = onion_dict_get(dict, "Request");
FAIL_IF_NOT_EQUAL(value, NULL);
onion_dict_free(dict);
END_LOCAL();
}
void append_as_headers(char *str, const char *key, const char *value, int flags) {
char tmp[1024];
sprintf(tmp, "%s: %s\n", key, value);
strcat(str, tmp);
}
void t05_preorder() {
INIT_LOCAL();
onion_dict *dict;
dict = onion_dict_new();
onion_dict_add(dict, "A", "B", 0);
onion_dict_add(dict, "C", "D", 0);
onion_dict_add(dict, "E", "F", 0);
onion_dict_add(dict, "G", "H", 0);
onion_dict_add(dict, "I", "J", 0);
onion_dict_add(dict, "K", "L", 0);
onion_dict_add(dict, "M", "N", 0);
onion_dict_add(dict, "O", "P", 0);
onion_dict_add(dict, "Q", "R", 0);
onion_dict_add(dict, "S", "T", 0);
char buffer[4096];
memset(buffer, 0, sizeof(buffer));
onion_dict_preorder(dict, append_as_headers, buffer);
FAIL_IF_NOT_EQUAL_STR(buffer,
"A: B\nC: D\nE: F\nG: H\nI: J\nK: L\nM: N\nO: P\nQ: R\nS: T\n");
onion_dict_free(dict);
END_LOCAL();
}
void t06_null_add() {
INIT_LOCAL();
onion_dict *dict;
dict = onion_dict_new();
onion_dict_add(dict, "b", NULL, 0);
onion_dict_add(dict, "a", NULL, 0);
onion_dict_add(dict, "c", "1", 0);
FAIL_IF_NOT_EQUAL_STR(onion_dict_get(dict, "c"), "1");
FAIL_IF_NOT_EQUAL(onion_dict_get(dict, "a"), NULL);
onion_dict_free(dict);
END_LOCAL();
}
void t07_sum(int *v, const char *key, const char *value, int flags) {
*v += atoi(value);
}
void t07_replace() {
INIT_LOCAL();
onion_dict *dict = onion_dict_new();
onion_dict_add(dict, "a", "1", OD_DUP_ALL | OD_REPLACE);
onion_dict_add(dict, "a", "1", OD_REPLACE);
onion_dict_add(dict, "a", "1", OD_DUP_ALL | OD_REPLACE);
onion_dict_add(dict, "a", "1", OD_REPLACE);
onion_dict_add(dict, "a", "1", OD_DUP_ALL | OD_REPLACE);
int n = 0;
onion_dict_preorder(dict, t07_sum, &n);
FAIL_IF_NOT_EQUAL_INT(n, 1);
onion_dict_add(dict, "a", "1", 0);
n = 0;
onion_dict_preorder(dict, t07_sum, &n);
FAIL_IF_NOT_EQUAL_INT(n, 2);
onion_dict_free(dict);
END_LOCAL();
}
#ifdef HAVE_PTHREADS
#define N_READERS 30
char *t08_thread_read(onion_dict * d) {
char done = 0;
char *ret = NULL;
while (!done) {
//ONION_DEBUG("Lock read");
onion_dict_lock_write(d);
//ONION_DEBUG("Got read lock");
const char *test = onion_dict_get(d, "test");
if (test) {
//ONION_DEBUG("Unlock");
//onion_dict_lock_write(d);
//ONION_DEBUG("Got write lock");
char tmp[16];
snprintf(tmp, 16, "%d", onion_dict_count(d));
onion_dict_remove(d, "test");
onion_dict_add(d, tmp, "test", OD_DUP_ALL);
ONION_DEBUG("Write answer %d", onion_dict_count(d));
done = 1;
//ONION_DEBUG("Unlock");
onion_dict_unlock(d);
ret = (char *)1;
break;
}
//ONION_DEBUG("Unlock");
onion_dict_unlock(d);
usleep(200);
}
//ONION_DEBUG("dict free");
onion_dict_free(d);
return ret;
}
void *t08_thread_write(onion_dict * d) {
int n = 0;
while (n != N_READERS) {
int i;
n = 0;
//ONION_DEBUG("Lock read");
onion_dict_lock_read(d);
//ONION_DEBUG("Got read lock");
for (i = 0; i < N_READERS; i++) {
char tmp[16];
snprintf(tmp, 16, "%d", i + 1);
const char *r = onion_dict_get(d, tmp);
if (r)
n++;
}
//ONION_DEBUG("Unlock");
onion_dict_unlock(d);
//ONION_DEBUG("Lock write");
onion_dict_lock_write(d);
//ONION_DEBUG("Got write lock");
onion_dict_add(d, "test", "test", OD_DUP_ALL | OD_REPLACE);
//ONION_DEBUG("Unlock");
onion_dict_unlock(d);
ONION_DEBUG("Found %d answers, should be %d.", n, N_READERS);
usleep(200);
}
onion_dict_free(d);
return (char *)1;
}
void t08_threaded_lock() {
INIT_LOCAL();
onion_dict *d = onion_dict_new();
pthread_t thread[N_READERS];
int i;
for (i = 0; i < N_READERS; i++) {
onion_dict *d2 = onion_dict_dup(d);
pthread_create(&thread[i], NULL, (void *)t08_thread_read, d2);
}
//sleep(1);
t08_thread_write(d);
for (i = 0; i < N_READERS; i++) {
char *v;
pthread_join(thread[i], (void **)&v);
FAIL_IF_NOT_EQUAL(v, (char *)v);
}
END_LOCAL();
}
#define NWAR 100
#define WARLOOPS 1000
void t09_thread_war_thread(onion_dict * d) {
int i;
char tmp[16];
for (i = 0; i < WARLOOPS; i++) {
snprintf(tmp, 16, "%04X", i);
if (rand() % 1) {
onion_dict_lock_read(d);
onion_dict_get(d, tmp);
onion_dict_unlock(d);
} else {
onion_dict_lock_write(d);
onion_dict_add(d, tmp, tmp, OD_DUP_ALL | OD_REPLACE);
onion_dict_unlock(d);
}
}
onion_dict_free(d);
}
void t09_thread_war() {
INIT_LOCAL();
pthread_t thread[NWAR];
int i;
onion_dict *d = onion_dict_new();
for (i = 0; i < NWAR; i++) {
onion_dict *dup = onion_dict_dup(d);
pthread_create(&thread[i], NULL, (void *)&t09_thread_war_thread, dup);
}
onion_dict_free(d);
for (i = 0; i < NWAR; i++) {
pthread_join(thread[i], NULL);
}
END_LOCAL();
}
#endif
void t10_tojson() {
INIT_LOCAL();
onion_dict *d = onion_dict_new();
const char *tmp;
int s;
onion_block *b;
b = onion_dict_to_json(d);
tmp = onion_block_data(b);
ONION_DEBUG("Json returned is '%s'", tmp);
FAIL_IF_NOT_EQUAL_STR(tmp, "{}");
onion_block_free(b);
onion_dict_add(d, "test", "json", 0);
b = onion_dict_to_json(d);
FAIL_IF_EQUAL(b, NULL);
tmp = onion_block_data(b);
s = onion_block_size(b);
ONION_DEBUG("Json returned is '%s'", tmp);
FAIL_IF(s <= 0);
FAIL_IF_EQUAL(strstr(tmp, "{"), NULL);
FAIL_IF_EQUAL(strstr(tmp, "}"), NULL);
FAIL_IF_EQUAL(strstr(tmp, "\"test\""), NULL);
FAIL_IF_EQUAL(strstr(tmp, "\"json\""), NULL);
FAIL_IF_NOT_EQUAL(strstr(tmp, ","), NULL);
onion_block_free(b);
onion_dict_add(d, "other", "data", 0);
b = onion_dict_to_json(d);
tmp = onion_block_data(b);
s = onion_block_size(b);
ONION_DEBUG("Json returned is '%s'", tmp);
FAIL_IF(s <= 0);
FAIL_IF_EQUAL(strstr(tmp, "{"), NULL);
FAIL_IF_EQUAL(strstr(tmp, "}"), NULL);
FAIL_IF_EQUAL(strstr(tmp, "\"test\""), NULL);
FAIL_IF_EQUAL(strstr(tmp, "\"json\""), NULL);
FAIL_IF_EQUAL(strstr(tmp, ","), NULL);
FAIL_IF_EQUAL(strstr(tmp, "\"other\""), NULL);
FAIL_IF_EQUAL(strstr(tmp, "\"data\""), NULL);
onion_block_free(b);
onion_dict_add(d, "with\"", "data\n", 0);
b = onion_dict_to_json(d);
tmp = onion_block_data(b);
s = onion_block_size(b);
ONION_DEBUG("Json returned is '%s'", tmp);
FAIL_IF(s <= 0);
FAIL_IF_EQUAL(strstr(tmp, "\\n"), NULL);
FAIL_IF_EQUAL(strstr(tmp, "\\\""), NULL);
onion_block_free(b);
onion_dict_free(d);
END_LOCAL();
}
void cmpdict(onion_dict * d, const char *key, const char *value, int flags) {
if (flags & OD_DICT) {
onion_dict_preorder((onion_dict *) value, cmpdict,
onion_dict_get_dict(d, key));
onion_dict_preorder(onion_dict_get_dict(d, key), cmpdict,
(onion_dict *) value);
} else
FAIL_IF_NOT_EQUAL_STR(value, onion_dict_get(d, key));
}
void t11_hard_dup() {
INIT_LOCAL();
onion_dict *orig = onion_dict_new();
char tmp[9];
int i;
for (i = 0; i < 256; i++) {
sprintf(tmp, "%08X", rand());
onion_dict_add(orig, tmp, tmp, OD_DUP_ALL);
}
onion_dict_add(orig, "0", "no frees", 0);
onion_dict *dest = onion_dict_hard_dup(orig);
/// Check they have exactly the same keys.
onion_dict_preorder(orig, cmpdict, dest);
onion_dict_preorder(dest, cmpdict, orig);
onion_dict_free(orig);
onion_dict_free(dest);
END_LOCAL();
}
void t12_dict_in_dict() {
INIT_LOCAL();
onion_dict *A = onion_dict_new();
onion_dict *B = onion_dict_new();
onion_dict *C = onion_dict_new();
onion_dict *D = onion_dict_new();
int i;
for (i = 0; i < 16; i++) {
char tmp[9];
sprintf(tmp, "%08X", rand());
onion_dict_add(A, tmp, tmp, OD_DUP_ALL);
sprintf(tmp, "%08X", rand());
onion_dict_add(B, tmp, tmp, OD_DUP_ALL);
sprintf(tmp, "%08X", rand());
onion_dict_add(C, tmp, tmp, OD_DUP_ALL);
sprintf(tmp, "%08X", rand());
onion_dict_add(D, tmp, tmp, OD_DUP_ALL);
}
onion_dict_add(A, "B", B, OD_DICT | OD_FREE_VALUE);
onion_dict_add(A, "C", C, OD_DICT | OD_FREE_VALUE);
onion_dict_add(A, "D", D, OD_DICT | OD_FREE_VALUE);
FAIL_IF_NOT_EQUAL((onion_dict *) onion_dict_get(A, "B"), NULL);
FAIL_IF_NOT_EQUAL((onion_dict *) onion_dict_get(A, "C"), NULL);
FAIL_IF_NOT_EQUAL((onion_dict *) onion_dict_get(A, "D"), NULL);
FAIL_IF_NOT_EQUAL((onion_dict *) onion_dict_get_dict(A, "B"), B);
FAIL_IF_NOT_EQUAL((onion_dict *) onion_dict_get_dict(A, "C"), C);
FAIL_IF_NOT_EQUAL((onion_dict *) onion_dict_get_dict(A, "D"), D);
{
onion_block *tmpA = onion_dict_to_json(A);
onion_block *tmpB = onion_dict_to_json(B);
onion_block *tmpC = onion_dict_to_json(C);
onion_block *tmpD = onion_dict_to_json(D);
/*
ONION_DEBUG("Json is: %s",tmpA);
ONION_DEBUG("Json is: %s",tmpB);
ONION_DEBUG("Json is: %s",tmpC);
ONION_DEBUG("Json is: %s",tmpD);
*/
FAIL_IF_EQUAL(strstr(onion_block_data(tmpA), onion_block_data(tmpB)), NULL);
FAIL_IF_EQUAL(strstr(onion_block_data(tmpA), onion_block_data(tmpC)), NULL);
FAIL_IF_EQUAL(strstr(onion_block_data(tmpA), onion_block_data(tmpD)), NULL);
onion_block_free(tmpA);
onion_block_free(tmpB);
onion_block_free(tmpC);
onion_block_free(tmpD);
}
B = onion_dict_hard_dup(A);
onion_dict_free(A);
onion_dict_free(B);
END_LOCAL();
}
void t13_dict_rget() {
INIT_LOCAL();
onion_dict *A = onion_dict_new();
onion_dict *B = onion_dict_new();
onion_dict *C = onion_dict_new();
onion_dict *D = onion_dict_new();
int i;
for (i = 0; i < 16; i++) {
char tmp[9];
sprintf(tmp, "%08X", rand());
onion_dict_add(A, tmp, tmp, OD_DUP_ALL);
sprintf(tmp, "%08X", rand());
onion_dict_add(B, tmp, tmp, OD_DUP_ALL);
sprintf(tmp, "%08X", rand());
onion_dict_add(C, tmp, tmp, OD_DUP_ALL);
sprintf(tmp, "%08X", rand());
onion_dict_add(D, tmp, tmp, OD_DUP_ALL);
}
onion_dict_add(A, "B", B, OD_DICT | OD_FREE_VALUE);
onion_dict_add(A, "C", C, OD_DICT | OD_FREE_VALUE);
onion_dict_add(A, "D", D, OD_DICT | OD_FREE_VALUE);
onion_dict_add(B, "C", C, OD_DICT);
onion_dict_add(C, "a", "hello", 0);
FAIL_IF_NOT_EQUAL(onion_dict_rget(A, "B", NULL), NULL);
FAIL_IF_NOT_EQUAL(onion_dict_rget(A, "C", NULL), NULL);
FAIL_IF_NOT_EQUAL(onion_dict_rget(A, "B", "C", NULL), NULL);
FAIL_IF_NOT_EQUAL(onion_dict_rget_dict(A, "B", NULL), B);
FAIL_IF_NOT_EQUAL(onion_dict_rget_dict(A, "C", NULL), C);
FAIL_IF_NOT_EQUAL(onion_dict_rget_dict(A, "B", "C", NULL), C);
FAIL_IF_NOT_EQUAL_STR(onion_dict_rget(A, "B", "C", "a", NULL), "hello");
FAIL_IF_NOT_EQUAL(onion_dict_rget_dict(A, "B", "C", "a", NULL), NULL);
// This should remove all the others, as they hang from it.
onion_dict_free(A);
END_LOCAL();
}
void t14_dict_case_insensitive() {
INIT_LOCAL();
onion_dict *d = onion_dict_new();
onion_dict_add(d, "Test", "OK", 0);
FAIL_IF_NOT_EQUAL(onion_dict_get(d, "test"), NULL);
onion_dict_set_flags(d, OD_ICASE);
FAIL_IF_NOT_EQUAL_STR(onion_dict_get(d, "test"), "OK");
onion_dict_free(d);
END_LOCAL();
}
void t15_hard_dup_dict_in_dict() {
INIT_LOCAL();
onion_dict *orig = onion_dict_new();
char tmp[9];
int i;
for (i = 0; i < 256; i++) {
sprintf(tmp, "%08X", rand());
onion_dict_add(orig, tmp, tmp, OD_DUP_ALL);
}
onion_dict_add(orig, "0", "no frees", 0);
onion_dict *subdict = onion_dict_new();
onion_dict_add(subdict, "subdict", "test", 0);
onion_dict_add(orig, "subdict", subdict, OD_DICT | OD_FREE_VALUE);
onion_dict *dest = onion_dict_hard_dup(orig);
FAIL_IF(orig == dest);
/// Check they have exactly the same keys.
onion_dict_preorder(orig, cmpdict, dest);
onion_dict_preorder(dest, cmpdict, orig);
onion_dict_free(orig);
onion_dict_free(dest);
END_LOCAL();
}
void t16_soft_dup_dict_in_dict() {
INIT_LOCAL();
onion_dict *orig = onion_dict_new();
char tmp[9];
int i;
for (i = 0; i < 256; i++) {
sprintf(tmp, "%08X", rand());
onion_dict_add(orig, tmp, tmp, OD_DUP_ALL);
}
onion_dict_add(orig, "0", "no frees", 0);
onion_dict *subdict = onion_dict_new();
onion_dict_add(subdict, "subdict", "test", 0);
onion_dict_add(orig, "subdict", subdict, OD_DICT | OD_FREE_VALUE);
onion_dict *dest = onion_dict_dup(orig);
FAIL_IF_NOT(orig == dest);
/// Check they have exactly the same keys.
onion_dict_preorder(orig, cmpdict, dest);
onion_dict_preorder(dest, cmpdict, orig);
onion_dict_free(orig);
onion_dict_free(dest);
END_LOCAL();
}
void t17_merge() {
INIT_LOCAL();
onion_dict *a = onion_dict_from_json("{\"hello\":\"world\"}");
onion_dict *b =
onion_dict_from_json
("{\"bye\":\"_world_\", \"sub\": { \"hello\": \"world!\" } }");
onion_dict_merge(a, b);
FAIL_IF_NOT_EQUAL_STR(onion_dict_get(a, "bye"), "_world_");
FAIL_IF_NOT_EQUAL_STR(onion_dict_rget(a, "sub", "hello", NULL), "world!");
onion_dict_free(b);
FAIL_IF_NOT_EQUAL_STR(onion_dict_rget(a, "sub", "hello", NULL), "world!");
onion_dict_free(a);
END_LOCAL();
}
void t18_json_escape_codes() {
INIT_LOCAL();
onion_dict *d =
onion_dict_from_json
("{ \"hello\": \"Hello\\nworld\", \"second\":\"second\" }");
FAIL_IF_NOT_STRSTR(onion_dict_get(d, "hello"), "Hello\nworld");
FAIL_IF_NOT_STRSTR(onion_dict_get(d, "second"), "second");
onion_dict_free(d);
d = onion_dict_from_json("{ \"hello\": \"\\uD83D\\uDE02\" }");
FAIL_IF_NOT_STRSTR(onion_dict_get(d, "hello"), "😂");
onion_dict_free(d);
d = onion_dict_from_json("{ \"hello\": \"\\uD83D\\uDE03\" }"); // Another code point
FAIL_IF_STRSTR(onion_dict_get(d, "hello"), "😂");
onion_dict_free(d);
d = onion_dict_from_json("{ \"hello\": \"\\u007b\" }"); // simple unicode
FAIL_IF_NOT_STRSTR(onion_dict_get(d, "hello"), "{");
onion_dict_free(d);
d = onion_dict_from_json("{ \"hello\": \"\\\"Quote\" }"); // Escape quote
FAIL_IF_NOT_STRSTR(onion_dict_get(d, "hello"), "\"Quote");
onion_dict_free(d);
d = onion_dict_from_json("{ \"hello\": \"\"Quote\" }"); // Must fail
FAIL_IF_NOT_EQUAL(d, NULL);
d = onion_dict_new();
onion_dict_add(d, "hello", "Hello\nWorld\\", 0);
onion_dict_add(d, "second", "123", 0);
onion_block *b = onion_dict_to_json(d);
FAIL_IF_NOT_EQUAL_STR(onion_block_data(b),
"{\"hello\":\"Hello\\nWorld\\\\\", \"second\":\"123\"}");
onion_block_free(b);
onion_dict_free(d);
d = onion_dict_new();
onion_dict_add(d, "hello", "😂\t\n😂", 0);
b = onion_dict_to_json(d);
FAIL_IF_NOT_EQUAL_STR(onion_block_data(b), "{\"hello\":\"😂\\t\\n😂\"}");
onion_block_free(b);
onion_dict_free(d);
d = onion_dict_new();
onion_dict_add(d, "hello", "\02\03\x7f", 0);
b = onion_dict_to_json(d);
FAIL_IF_NOT_EQUAL_STR(onion_block_data(b),
"{\"hello\":\"\\u0002\\u0003\\u007F\"}");
onion_block_free(b);
onion_dict_free(d);
END_LOCAL();
}
int main(int argc, char **argv) {
START();
t01_create_add_free();
t01_create_add_free_10();
t02_create_and_free_a_lot(100);
t03_create_and_free_a_lot_random(100);
t04_create_and_free_a_dup();
t05_preorder();
t06_null_add();
t07_replace();
#ifdef HAVE_PTHREADS
t08_threaded_lock();
t09_thread_war();
#endif
t10_tojson();
t11_hard_dup();
t12_dict_in_dict();
t13_dict_rget();
t14_dict_case_insensitive();
t15_hard_dup_dict_in_dict();
t16_soft_dup_dict_in_dict();
t17_merge();
t18_json_escape_codes();
END();
}
| 9,660 |
1,118 | {"deu":{"common":"Ungarn","official":"Ungarn"},"fin":{"common":"Unkari","official":"Unkari"},"fra":{"common":"Hongrie","official":"Hongrie"},"hrv":{"common":"Mađarska","official":"Madžarska"},"ita":{"common":"Ungheria","official":"Ungheria"},"jpn":{"common":"ハンガリー","official":"ハンガリー"},"nld":{"common":"Hongarije","official":"Hongarije"},"por":{"common":"Hungria","official":"Hungria"},"rus":{"common":"Венгрия","official":"Венгрия"},"spa":{"common":"Hungría","official":"Hungría"}}
| 167 |
544 | <gh_stars>100-1000
package com.hubspot.jinjava.el.ext.eager;
import com.hubspot.jinjava.el.ExtendedSyntaxBuilder;
import de.odysseus.el.tree.impl.Parser;
public class EagerExtendedSyntaxBuilder extends ExtendedSyntaxBuilder {
public EagerExtendedSyntaxBuilder() {
super();
}
public EagerExtendedSyntaxBuilder(Feature... features) {
super(features);
}
@Override
protected Parser createParser(String expression) {
return new EagerExtendedParser(this, expression);
}
}
| 164 |
25,151 | <reponame>TamsilAmani/selenium<filename>java/src/org/openqa/selenium/support/locators/RelativeLocatorServerSide.java<gh_stars>1000+
// Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.support.locators;
import com.google.auto.service.AutoService;
import com.google.common.collect.ImmutableMap;
import org.openqa.selenium.By;
import org.openqa.selenium.InvalidArgumentException;
import org.openqa.selenium.JavascriptExecutor;
import org.openqa.selenium.SearchContext;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.WebElement;
import org.openqa.selenium.internal.Require;
import org.openqa.selenium.remote.JsonToWebElementConverter;
import org.openqa.selenium.remote.RemoteWebDriver;
import org.openqa.selenium.remote.locators.CustomLocator;
import java.util.List;
import static org.openqa.selenium.support.locators.RelativeLocatorScript.FIND_ELEMENTS;
@AutoService(CustomLocator.class)
public class RelativeLocatorServerSide implements CustomLocator {
@Override
public String getLocatorName() {
return "relative";
}
@Override
public By createBy(Object usingParameter) {
Require.nonNull("Using", usingParameter);
return new RemoteRelative(usingParameter);
}
private static class RemoteRelative extends By {
private final Object using;
private RemoteRelative(Object usingParameter) {
using = usingParameter;
}
@Override
public List<WebElement> findElements(SearchContext context) {
JavascriptExecutor js = getJavascriptExecutor(context);
WebDriver driver = getWebDriver(context);
if (driver instanceof RemoteWebDriver) {
Object converted = new JsonToWebElementConverter((RemoteWebDriver) driver).apply(using);
@SuppressWarnings("unchecked")
List<WebElement> elements = (List<WebElement>) js.executeScript(FIND_ELEMENTS, ImmutableMap.of("relative", converted));
return elements;
}
throw new InvalidArgumentException("Unable to find element");
}
}
}
| 840 |
3,897 | <reponame>mcheah-bose/mbed-os
/*
* Copyright 2019 NXP
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef _FSL_ANATOP_AI_H_
#define _FSL_ANATOP_AI_H_
#include "fsl_common.h"
/*! @addtogroup anatop_ai */
/*! @{ */
/*! @file */
/*! @name Driver version */
/*@{*/
/*! @brief Anatop AI driver version 1.0.0. */
#define FSL_ANATOP_AI_DRIVER_VERSION (MAKE_VERSION(1, 0, 0))
/*@}*/
typedef enum _anatop_ai_itf
{
kAI_Itf_Ldo = 0,
kAI_Itf_1g = 1,
kAI_Itf_Audio = 2,
kAI_Itf_Video = 3,
kAI_Itf_400m = 4,
kAI_Itf_Temp = 5,
kAI_Itf_Bandgap = 6,
} anatop_ai_itf_t;
typedef enum _anatop_ai_reg
{
kAI_PHY_LDO_CTRL0 = 0x0,
kAI_PHY_LDO_CTRL0_SET = 0x4,
kAI_PHY_LDO_CTRL0_CLR = 0x8,
kAI_PHY_LDO_CTRL0_TOG = 0xC,
kAI_PHY_LDO_STAT0 = 0x50,
kAI_PHY_LDO_STAT0_SET = 0x54,
kAI_PHY_LDO_STAT0_CLR = 0x58,
kAI_PHY_LDO_STAT0_TOG = 0x5C,
kAI_BANDGAP_CTRL0 = 0x0,
kAI_BANDGAP_STAT0 = 0x50,
kAI_RCOSC400M_CTRL0 = 0x0,
kAI_RCOSC400M_CTRL0_SET = 0x4,
kAI_RCOSC400M_CTRL0_CLR = 0x8,
kAI_RCOSC400M_CTRL0_TOG = 0xC,
kAI_RCOSC400M_CTRL1 = 0x10,
kAI_RCOSC400M_CTRL1_SET = 0x14,
kAI_RCOSC400M_CTRL1_CLR = 0x18,
kAI_RCOSC400M_CTRL1_TOG = 0x1C,
kAI_RCOSC400M_CTRL2 = 0x20,
kAI_RCOSC400M_CTRL2_SET = 0x24,
kAI_RCOSC400M_CTRL2_CLR = 0x28,
kAI_RCOSC400M_CTRL2_TOG = 0x2C,
kAI_RCOSC400M_CTRL3 = 0x30,
kAI_RCOSC400M_CTRL3_SET = 0x34,
kAI_RCOSC400M_CTRL3_CLR = 0x38,
kAI_RCOSC400M_CTRL3_TOG = 0x3C,
kAI_RCOSC400M_STAT0 = 0x50,
kAI_RCOSC400M_STAT0_SET = 0x54,
kAI_RCOSC400M_STAT0_CLR = 0x58,
kAI_RCOSC400M_STAT0_TOG = 0x5C,
kAI_RCOSC400M_STAT1 = 0x60,
kAI_RCOSC400M_STAT1_SET = 0x64,
kAI_RCOSC400M_STAT1_CLR = 0x68,
kAI_RCOSC400M_STAT1_TOG = 0x6C,
kAI_RCOSC400M_STAT2 = 0x70,
kAI_RCOSC400M_STAT2_SET = 0x74,
kAI_RCOSC400M_STAT2_CLR = 0x78,
kAI_RCOSC400M_STAT2_TOG = 0x7C,
kAI_PLL1G_CTRL0 = 0x0,
kAI_PLL1G_CTRL0_SET = 0x4,
kAI_PLL1G_CTRL0_CLR = 0x8,
kAI_PLL1G_CTRL1 = 0x10,
kAI_PLL1G_CTRL1_SET = 0x14,
kAI_PLL1G_CTRL1_CLR = 0x18,
kAI_PLL1G_CTRL2 = 0x20,
kAI_PLL1G_CTRL2_SET = 0x24,
kAI_PLL1G_CTRL2_CLR = 0x28,
kAI_PLL1G_CTRL3 = 0x30,
kAI_PLL1G_CTRL3_SET = 0x34,
kAI_PLL1G_CTRL3_CLR = 0x38,
kAI_PLLAUDIO_CTRL0 = 0x0,
kAI_PLLAUDIO_CTRL0_SET = 0x4,
kAI_PLLAUDIO_CTRL0_CLR = 0x8,
kAI_PLLAUDIO_CTRL1 = 0x10,
kAI_PLLAUDIO_CTRL1_SET = 0x14,
kAI_PLLAUDIO_CTRL1_CLR = 0x18,
kAI_PLLAUDIO_CTRL2 = 0x20,
kAI_PLLAUDIO_CTRL2_SET = 0x24,
kAI_PLLAUDIO_CTRL2_CLR = 0x28,
kAI_PLLAUDIO_CTRL3 = 0x30,
kAI_PLLAUDIO_CTRL3_SET = 0x34,
kAI_PLLAUDIO_CTRL3_CLR = 0x38,
kAI_PLLVIDEO_CTRL0 = 0x0,
kAI_PLLVIDEO_CTRL0_SET = 0x4,
kAI_PLLVIDEO_CTRL0_CLR = 0x8,
kAI_PLLVIDEO_CTRL1 = 0x10,
kAI_PLLVIDEO_CTRL1_SET = 0x14,
kAI_PLLVIDEO_CTRL1_CLR = 0x18,
kAI_PLLVIDEO_CTRL2 = 0x20,
kAI_PLLVIDEO_CTRL2_SET = 0x24,
kAI_PLLVIDEO_CTRL2_CLR = 0x28,
kAI_PLLVIDEO_CTRL3 = 0x30,
kAI_PLLVIDEO_CTRL3_SET = 0x34,
kAI_PLLVIDEO_CTRL3_CLR = 0x38,
} anatop_ai_reg_t;
/* ----------------------------------------------------------------------------
-- AI PHY_LDO CTRL0 Register Masks
---------------------------------------------------------------------------- */
/*!
* @addtogroup AI_Register_Masks PHY_LDO Register Masks
* @{
*/
/*! @name CTRL0 - CTRL0 Register */
/*! @{ */
#define AI_PHY_LDO_CTRL0_LINREG_EN(x) \
(((uint32_t)(((uint32_t)(x)) << AI_PHY_LDO_CTRL0_LINREG_EN_SHIFT)) & AI_PHY_LDO_CTRL0_LINREG_EN_MASK)
#define AI_PHY_LDO_CTRL0_LINREG_EN_MASK (0x1U)
#define AI_PHY_LDO_CTRL0_LINREG_EN_SHIFT (0U)
/*! LINREG_EN - LinReg master enable
* LinReg master enable. Setting this bit will enable the regular
*/
#define AI_PHY_LDO_CTRL0_PWRUPLOAD_DIS(x) \
(((uint32_t)(((uint32_t)(x)) << AI_PHY_LDO_CTRL0_PWRUPLOAD_DIS_SHIFT)) & AI_PHY_LDO_CTRL0_PWRUPLOAD_DIS_MASK)
#define AI_PHY_LDO_CTRL0_PWRUPLOAD_DIS_MASK (0x2U)
#define AI_PHY_LDO_CTRL0_PWRUPLOAD_DIS_SHIFT (1U)
/*! LINREG_PWRUPLOAD_DIS - LinReg power-up load disable
* 0b0..Internal pull-down enabled
* 0b1..Internal pull-down disabled
*/
#define AI_PHY_LDO_CTRL0_LIMIT_EN(x) \
(((uint32_t)(((uint32_t)(x)) << AI_PHY_LDO_CTRL0_LIMIT_EN_SHIFT)) & AI_PHY_LDO_CTRL0_LIMIT_EN_MASK)
#define AI_PHY_LDO_CTRL0_LIMIT_EN_MASK (0x4U)
#define AI_PHY_LDO_CTRL0_LIMIT_EN_SHIFT (2U)
/*! LINREG_LIMIT_EN - LinReg current limit enable
* LinReg current-limit enable. Setting this bit will enable the
* current-limiter in the regulator
*/
#define AI_PHY_LDO_CTRL0_OUTPUT_TRG(x) \
(((uint32_t)(((uint32_t)(x)) << AI_PHY_LDO_CTRL0_OUTPUT_TRG_SHIFT)) & AI_PHY_LDO_CTRL0_OUTPUT_TRG_MASK)
#define AI_PHY_LDO_CTRL0_OUTPUT_TRG_MASK (0x1F0U)
#define AI_PHY_LDO_CTRL0_OUTPUT_TRG_SHIFT (4U)
/*! LINREG_OUTPUT_TRG - LinReg output voltage target setting
* 0b00000..Set output voltage to x.xV
* 0b10000..Set output voltage to 1.0V
* 0b11111..Set output voltage to x.xV
*/
#define AI_PHY_LDO_CTRL0_PHY_ISO_B(x) \
(((uint32_t)(((uint32_t)(x)) << AI_PHY_LDO_CTRL0_PHY_ISO_B_SHIFT)) & AI_PHY_LDO_CTRL0_PHY_ISO_B_MASK)
#define AI_PHY_LDO_CTRL0_PHY_ISO_B_MASK (0x8000U)
#define AI_PHY_LDO_CTRL0_PHY_ISO_B_SHIFT (15U)
/*! LINREG_PHY_ISO_B - Isolation control for attached PHY load
* This control bit is to be used by the system controller to isolate the
* attached PHY load when the LinReg is powered down. During a power-up
* event of the regulator it is expected that this control signal is set high
* at least 100us after the main regulator is enabled. During a power-down
* event of the regulator it is expected that this control signal is set low
* before the main regulator is disabled/power-down.
*/
/*! @} */
/*! @name STAT0 - STAT0 Register */
/*! @{ */
#define AI_PHY_LDO_STAT0_LINREG_STAT(x) \
(((uint32_t)(((uint32_t)(x)) << AI_PHY_LDO_STAT0_LINREG_STAT_SHIFT)) & AI_PHY_LDO_STAT0_LINREG_STAT_MASK)
#define AI_PHY_LDO_STAT0_LINREG_STAT_MASK (0xFU)
#define AI_PHY_LDO_STAT0_LINREG_STAT_SHIFT (0U)
/*! LINREG_STAT - LinReg status bits
* LinReg status bits.
*/
/*! @} */
/*! @} */
/*!
* @addtogroup AI_Register_Masks BANDGAP Register Masks
* @{
*/
/*! @name CTRL0 - CTRL0 Register */
/*! @{ */
#define AI_BANDGAP_CTRL0_REFTOP_PWD(x) \
(((uint32_t)(((uint32_t)(x)) << AI_BANDGAP_CTRL0_REFTOP_PWD_SHIFT)) & AI_BANDGAP_CTRL0_REFTOP_PWD_MASK)
#define AI_BANDGAP_CTRL0_REFTOP_PWD_MASK (0x1U)
#define AI_BANDGAP_CTRL0_REFTOP_PWD_SHIFT (0U)
/*! REFTOP_PWD - This bit fully powers down the bandgap module.
* Setting this bit high will disable reference output currents and voltages from the
* bandgap and will affect functionality and validity of the voltage detectors.
*/
#define AI_BANDGAP_CTRL0_REFTOP_LINREGREF_PWD(x) \
(((uint32_t)(((uint32_t)(x)) << AI_BANDGAP_CTRL0_REFTOP_LINREGREF_PWD_SHIFT)) & \
AI_BANDGAP_CTRL0_REFTOP_LINREGREF_PWD_MASK)
#define AI_BANDGAP_CTRL0_REFTOP_LINREGREF_PWD_MASK (0x2U)
#define AI_BANDGAP_CTRL0_REFTOP_LINREGREF_PWD_SHIFT (1U)
/*!
* REFOP_LINREGREF_PWD - This bit powers down only the voltage reference output section of the bandgap.
* Setting this bit high will affect functionality and validity
* of the voltage detectors.
*/
#define AI_BANDGAP_CTRL0_REFTOP_PWDVBGUP(x) \
(((uint32_t)(((uint32_t)(x)) << AI_BANDGAP_CTRL0_REFTOP_PWDVBGUP_SHIFT)) & AI_BANDGAP_CTRL0_REFTOP_PWDVBGUP_MASK)
#define AI_BANDGAP_CTRL0_REFTOP_PWDVBGUP_MASK (0x4U)
#define AI_BANDGAP_CTRL0_REFTOP_PWDVBGUP_SHIFT (2U)
/*!
* REFTOP_PWDVBGUP - This bit powers down the VBGUP detector of the bandgap
* without affecting any additional functionality.
*/
#define AI_BANDGAP_CTRL0_REFTOP_LOWPOWER(x) \
(((uint32_t)(((uint32_t)(x)) << AI_BANDGAP_CTRL0_REFTOP_LOWPOWER_SHIFT)) & AI_BANDGAP_CTRL0_REFTOP_LOWPOWER_MASK)
#define AI_BANDGAP_CTRL0_REFTOP_LOWPOWER_MASK (0x8U)
#define AI_BANDGAP_CTRL0_REFTOP_LOWPOWER_SHIFT (3U)
/*!
* REFTOP_LOWPOWER - This bit enables the low-power operation of the
* bandgap by cutting the bias currents in half to the main amplifiers.
* This will save power but could affect the accuracy of the output voltages and currents.
*/
#define AI_BANDGAP_CTRL0_REFTOP_SELFBIASOFF(x) \
(((uint32_t)(((uint32_t)(x)) << AI_BANDGAP_CTRL0_REFTOP_SELFBIASOFF_SHIFT)) & \
AI_BANDGAP_CTRL0_REFTOP_SELFBIASOFF_MASK)
#define AI_BANDGAP_CTRL0_REFTOP_SELFBIASOFF_MASK (0x10U)
#define AI_BANDGAP_CTRL0_REFTOP_SELFBIASOFF_SHIFT (4U)
/*!
* REFTOP_SELFBIASOFF - Control bit to disable the self-bias circuit in the bandgap.
* The self-bias circuit is used by the bandgap during startup. This bit should be
* set high after the bandgap has stabilized and is necessary for best noise performance
* of modules using the outputs of the bandgap. It is expected that this control bit
* be set low any time that either the bandgap is fully powered-down or the 1.8V supply is removed.
*/
#define AI_BANDGAP_CTRL0_REFTOP_VBGADJ(x) \
(((uint32_t)(((uint32_t)(x)) << AI_BANDGAP_CTRL0_REFTOP_VBGADJ_SHIFT)) & AI_BANDGAP_CTRL0_REFTOP_VBGADJ_MASK)
#define AI_BANDGAP_CTRL0_REFTOP_VBGADJ_MASK (0xE0U)
#define AI_BANDGAP_CTRL0_REFTOP_VBGADJ_SHIFT (5U)
/*!
* REFTOP_VBGADJ - These bits allow the output VBG voltage of the bandgap to be trimmed
* 000 : nominal
* 001 : +10mV
* 010 : +20mV
* 011 : +30mV
* 100 : -10mV
* 101 : -20mV
* 110 : -30mV
* 111 : -40mV
*/
#define AI_BANDGAP_CTRL0_REFTOP_IBZTCADJ(x) \
(((uint32_t)(((uint32_t)(x)) << AI_BANDGAP_CTRL0_REFTOP_IBZTCADJ_SHIFT)) & AI_BANDGAP_CTRL0_REFTOP_IBZTCADJ_MASK)
#define AI_BANDGAP_CTRL0_REFTOP_IBZTCADJ_MASK (0x1C00U)
#define AI_BANDGAP_CTRL0_REFTOP_IBZTCADJ_SHIFT (10U)
/*!
* REFTOP_IBZTCADJ - These bits allow trimming of the ZTC bias currents from the bandgap to
* the temperature sensors. Assuming a typical process corner the expected values of output
* currents are:
* 000 : 11.5 uA
* 001 : 11.8 uA
* 010 : 12.1 uA
* 100 : 12.4 uA (Nominal expected from MX8QM tempsensor)
* 101 : 12.7 uA
* 110 : 13.0 uA
* 111 : 13.3 uA
*/
/*! @} */
/*! @name STAT0 - STAT0 Register */
/*! @{ */
#define AI_BANDGAP_STAT0_REFTOP_VBGUP(x) \
(((uint32_t)(((uint32_t)(x)) << AI_BANDGAP_STAT0_REFTOP_VBGUP_SHIFT)) & AI_BANDGAP_STAT0_REFTOP_VBGUP_MASK)
#define AI_BANDGAP_STAT0_REFTOP_VBGUP_MASK (0x1U)
#define AI_BANDGAP_STAT0_REFTOP_VBGUP_SHIFT (0U)
/*! @} */
/*! @} */
/*!
* @addtogroup AI_Register_Masks RCOSC 400M Register Masks
* @{
*/
/*! @name CTRL0 - CTRL0 Register */
/*! @{ */
#define AI_RCOSC400M_CTRL0_REF_CLK_DIV(x) \
(((uint32_t)(((uint32_t)(x)) << AI_RCOSC400M_CTRL0_REF_CLK_DIV_SHIFT)) & AI_RCOSC400M_CTRL0_REF_CLK_DIV_MASK)
#define AI_RCOSC400M_CTRL0_REF_CLK_DIV_MASK (0x3F000000U)
#define AI_RCOSC400M_CTRL0_REF_CLK_DIV_SHIFT (24U)
/*! @} */
/*! @name CTRL1 - CTRL1 Register */
/*! @{ */
#define AI_RCOSC400M_CTRL1_HYST_MINUS(x) \
(((uint32_t)(((uint32_t)(x)) << AI_RCOSC400M_CTRL1_HYST_MINUS_SHIFT)) & AI_RCOSC400M_CTRL1_HYST_MINUS_MASK)
#define AI_RCOSC400M_CTRL1_HYST_MINUS_MASK (0xFU)
#define AI_RCOSC400M_CTRL1_HYST_MINUS_SHIFT (0U)
#define AI_RCOSC400M_CTRL1_HYST_PLUS(x) \
(((uint32_t)(((uint32_t)(x)) << AI_RCOSC400M_CTRL1_HYST_PLUS_SHIFT)) & AI_RCOSC400M_CTRL1_HYST_PLUS_MASK)
#define AI_RCOSC400M_CTRL1_HYST_PLUS_MASK (0xF00U)
#define AI_RCOSC400M_CTRL1_HYST_PLUS_SHIFT (8U)
#define AI_RCOSC400M_CTRL1_TARGET_COUNT(x) \
(((uint32_t)(((uint32_t)(x)) << AI_RCOSC400M_CTRL1_TARGET_COUNT_SHIFT)) & AI_RCOSC400M_CTRL1_TARGET_COUNT_MASK)
#define AI_RCOSC400M_CTRL1_TARGET_COUNT_MASK (0xFFFF0000U)
#define AI_RCOSC400M_CTRL1_TARGET_COUNT_SHIFT (16U)
/*! @} */
/*! @name CTRL2 - CTRL2 Register */
/*! @{ */
#define AI_RCOSC400M_CTRL2_TUNE_BYP(x) \
(((uint32_t)(((uint32_t)(x)) << AI_RCOSC400M_CTRL2_TUNE_BYP_SHIFT)) & AI_RCOSC400M_CTRL2_TUNE_BYP_MASK)
#define AI_RCOSC400M_CTRL2_TUNE_BYP_MASK (0x400U)
#define AI_RCOSC400M_CTRL2_TUNE_BYP_SHIFT (10U)
#define AI_RCOSC400M_CTRL2_TUNE_EN(x) \
(((uint32_t)(((uint32_t)(x)) << AI_RCOSC400M_CTRL2_TUNE_EN_SHIFT)) & AI_RCOSC400M_CTRL2_TUNE_EN_MASK)
#define AI_RCOSC400M_CTRL2_TUNE_EN_MASK (0x1000U)
#define AI_RCOSC400M_CTRL2_TUNE_EN_SHIFT (12U)
#define AI_RCOSC400M_CTRL2_TUNE_START(x) \
(((uint32_t)(((uint32_t)(x)) << AI_RCOSC400M_CTRL2_TUNE_START_SHIFT)) & AI_RCOSC400M_CTRL2_TUNE_START_MASK)
#define AI_RCOSC400M_CTRL2_TUNE_START_MASK (0x4000U)
#define AI_RCOSC400M_CTRL2_TUNE_START_SHIFT (14U)
#define AI_RCOSC400M_CTRL2_OSC_TUNE_VAL(x) \
(((uint32_t)(((uint32_t)(x)) << AI_RCOSC400M_CTRL2_OSC_TUNE_VAL_SHIFT)) & AI_RCOSC400M_CTRL2_OSC_TUNE_VAL_MASK)
#define AI_RCOSC400M_CTRL2_OSC_TUNE_VAL_MASK (0xFF000000U)
#define AI_RCOSC400M_CTRL2_OSC_TUNE_VAL_SHIFT (24U)
/*! @} */
/*! @name CTRL3 - CTRL3 Register */
/*! @{ */
#define AI_RCOSC400M_CTRL3_CLR_ERR(x) \
(((uint32_t)(((uint32_t)(x)) << AI_RCOSC400M_CTRL3_CLR_ERR_SHIFT)) & AI_RCOSC400M_CTRL3_CLR_ERR_MASK)
#define AI_RCOSC400M_CTRL3_CLR_ERR_MASK (0x1U)
#define AI_RCOSC400M_CTRL3_CLR_ERR_SHIFT (0U)
#define AI_RCOSC400M_CTRL3_EN_1M_CLK(x) \
(((uint32_t)(((uint32_t)(x)) << AI_RCOSC400M_CTRL3_EN_1M_CLK_SHIFT)) & AI_RCOSC400M_CTRL3_EN_1M_CLK_MASK)
#define AI_RCOSC400M_CTRL3_EN_1M_CLK_MASK (0x100U)
#define AI_RCOSC400M_CTRL3_EN_1M_CLK_SHIFT (8U)
#define AI_RCOSC400M_CTRL3_MUX_1M_CLK(x) \
(((uint32_t)(((uint32_t)(x)) << AI_RCOSC400M_CTRL3_MUX_1M_CLK_SHIFT)) & AI_RCOSC400M_CTRL3_MUX_1M_CLK_MASK)
#define AI_RCOSC400M_CTRL3_MUX_1M_CLK_MASK (0x400U)
#define AI_RCOSC400M_CTRL3_MUX_1M_CLK_SHIFT (10U)
#define AI_RCOSC400M_CTRL3_COUNT_1M_CLK(x) \
(((uint32_t)(((uint32_t)(x)) << AI_RCOSC400M_CTRL3_COUNT_1M_CLK_SHIFT)) & AI_RCOSC400M_CTRL3_COUNT_1M_CLK_MASK)
#define AI_RCOSC400M_CTRL3_COUNT_1M_CLK_MASK (0xFFFF0000U)
#define AI_RCOSC400M_CTRL3_COUNT_1M_CLK_SHIFT (16U)
/*! @} */
/*! @name STAT0 - STAT0 Register */
/*! @{ */
#define AI_RCOSC400M_STAT0_CLK1M_ERR(x) \
(((uint32_t)(((uint32_t)(x)) << AI_RCOSC400M_STAT0_CLK1M_ERR_SHIFT)) & AI_RCOSC400M_STAT0_CLK1M_ERR_MASK)
#define AI_RCOSC400M_STAT0_CLK1M_ERR_MASK (0x1U)
#define AI_RCOSC400M_STAT0_CLK1M_ERR_SHIFT (0U)
/*! @} */
/*! @name STAT1 - STAT1 Register */
/*! @{ */
#define AI_RCOSC400M_STAT1_CURR_COUNT_VAL(x) \
(((uint32_t)(((uint32_t)(x)) << AI_RCOSC400M_STAT1_CURR_COUNT_VAL_SHIFT)) & AI_RCOSC400M_STAT1_CURR_COUNT_VAL_MASK)
#define AI_RCOSC400M_STAT1_CURR_COUNT_VAL_MASK (0xFFFF0000U)
#define AI_RCOSC400M_STAT1_CURR_COUNT_VAL_SHIFT (16U)
/*! @} */
/*! @name STAT2 - STAT2 Register */
/*! @{ */
#define AI_RCOSC400M_STAT2_CURR_OSC_TUNE_VAL(x) \
(((uint32_t)(((uint32_t)(x)) << AI_RCOSC400M_STAT2_CURR_OSC_TUNE_VAL_SHIFT)) & \
AI_RCOSC400M_STAT2_CURR_OSC_TUNE_VAL_MASK)
#define AI_RCOSC400M_STAT2_CURR_OSC_TUNE_VAL_MASK (0xFF000000U)
#define AI_RCOSC400M_STAT2_CURR_OSC_TUNE_VAL_SHIFT (24U)
/*! @} */
/*!
* @addtogroup AI_Register_Masks PLL 1G Register Masks
* @{
*/
/*! @name CTRL0 - CTRL0 Register */
/*! @{ */
#define AI_PLL1G_CTRL0_HOLD_RING_OFF(x) \
(((uint32_t)(((uint32_t)(x)) << AI_PLL1G_CTRL0_HOLD_RING_OFF_SHIFT)) & AI_PLL1G_CTRL0_HOLD_RING_OFF_MASK)
#define AI_PLL1G_CTRL0_HOLD_RING_OFF_MASK (0x2000UL)
#define AI_PLL1G_CTRL0_HOLD_RING_OFF_SHIFT (13U)
#define AI_PLL1G_CTRL0_POWER_UP(x) \
(((uint32_t)(((uint32_t)(x)) << AI_PLL1G_CTRL0_POWER_UP_SHIFT)) & AI_PLL1G_CTRL0_POWER_UP_MASK)
#define AI_PLL1G_CTRL0_POWER_UP_MASK (0x4000UL)
#define AI_PLL1G_CTRL0_POWER_UP_SHIFT (14U)
#define AI_PLL1G_CTRL0_ENABLE(x) \
(((uint32_t)(((uint32_t)(x)) << AI_PLL1G_CTRL0_ENABLE_SHIFT)) & AI_PLL1G_CTRL0_ENABLE_MASK)
#define AI_PLL1G_CTRL0_ENABLE_MASK (0x8000UL)
#define AI_PLL1G_CTRL0_ENABLE_SHIFT (15U)
#define AI_PLL1G_CTRL0_BYPASS(x) \
(((uint32_t)(((uint32_t)(x)) << AI_PLL1G_CTRL0_BYPASS_SHIFT)) & AI_PLL1G_CTRL0_BYPASS_MASK)
#define AI_PLL1G_CTRL0_BYPASS_MASK (0x10000UL)
#define AI_PLL1G_CTRL0_BYPASS_SHIFT (16U)
#define AI_PLL1G_CTRL0_PLL_REG_EN(x) \
(((uint32_t)(((uint32_t)(x)) << AI_PLL1G_CTRL0_PLL_REG_EN_SHIFT)) & AI_PLL1G_CTRL0_PLL_REG_EN_MASK)
#define AI_PLL1G_CTRL0_PLL_REG_EN_MASK (0x400000UL)
#define AI_PLL1G_CTRL0_PLL_REG_EN_SHIFT (22U)
/*! @} */
/*!
* @}
*/
/*!
* @addtogroup AI_Register_Masks PLL AUDIO Register Masks
* @{
*/
/*! @name CTRL0 - CTRL0 Register */
/*! @{ */
#define AI_PLLAUDIO_CTRL0_HOLD_RING_OFF(x) \
(((uint32_t)(((uint32_t)(x)) << AI_PLLAUDIO_CTRL0_HOLD_RING_OFF_SHIFT)) & AI_PLLAUDIO_CTRL0_HOLD_RING_OFF_MASK)
#define AI_PLLAUDIO_CTRL0_HOLD_RING_OFF_MASK (0x2000UL)
#define AI_PLLAUDIO_CTRL0_HOLD_RING_OFF_SHIFT (13U)
#define AI_PLLAUDIO_CTRL0_POWER_UP(x) \
(((uint32_t)(((uint32_t)(x)) << AI_PLLAUDIO_CTRL0_POWER_UP_SHIFT)) & AI_PLLAUDIO_CTRL0_POWER_UP_MASK)
#define AI_PLLAUDIO_CTRL0_POWER_UP_MASK (0x4000UL)
#define AI_PLLAUDIO_CTRL0_POWER_UP_SHIFT (14U)
#define AI_PLLAUDIO_CTRL0_ENABLE(x) \
(((uint32_t)(((uint32_t)(x)) << AI_PLLAUDIO_CTRL0_ENABLE_SHIFT)) & AI_PLLAUDIO_CTRL0_ENABLE_MASK)
#define AI_PLLAUDIO_CTRL0_ENABLE_MASK (0x8000UL)
#define AI_PLLAUDIO_CTRL0_ENABLE_SHIFT (15U)
#define AI_PLLAUDIO_CTRL0_BYPASS(x) \
(((uint32_t)(((uint32_t)(x)) << AI_PLLAUDIO_CTRL0_BYPASS_SHIFT)) & AI_PLLAUDIO_CTRL0_BYPASS_MASK)
#define AI_PLLAUDIO_CTRL0_BYPASS_MASK (0x10000UL)
#define AI_PLLAUDIO_CTRL0_BYPASS_SHIFT (16U)
#define AI_PLLAUDIO_CTRL0_PLL_REG_EN(x) \
(((uint32_t)(((uint32_t)(x)) << AI_PLLAUDIO_CTRL0_PLL_REG_EN_SHIFT)) & AI_PLLAUDIO_CTRL0_PLL_REG_EN_MASK)
#define AI_PLLAUDIO_CTRL0_PLL_REG_EN_MASK (0x400000UL)
#define AI_PLLAUDIO_CTRL0_PLL_REG_EN_SHIFT (22U)
/*! @} */
/*!
* @}
*/
/*!
* @addtogroup AI_Register_Masks PLL VIDEO Register Masks
* @{
*/
/*! @name CTRL0 - CTRL0 Register */
/*! @{ */
#define AI_PLLVIDEO_CTRL0_HOLD_RING_OFF(x) \
(((uint32_t)(((uint32_t)(x)) << AI_PLLVIDEO_CTRL0_HOLD_RING_OFF_SHIFT)) & AI_PLLVIDEO_CTRL0_HOLD_RING_OFF_MASK)
#define AI_PLLVIDEO_CTRL0_HOLD_RING_OFF_MASK (0x2000UL)
#define AI_PLLVIDEO_CTRL0_HOLD_RING_OFF_SHIFT (13U)
#define AI_PLLVIDEO_CTRL0_POWER_UP(x) \
(((uint32_t)(((uint32_t)(x)) << AI_PLLVIDEO_CTRL0_POWER_UP_SHIFT)) & AI_PLLVIDEO_CTRL0_POWER_UP_MASK)
#define AI_PLLVIDEO_CTRL0_POWER_UP_MASK (0x4000UL)
#define AI_PLLVIDEO_CTRL0_POWER_UP_SHIFT (14U)
#define AI_PLLVIDEO_CTRL0_ENABLE(x) \
(((uint32_t)(((uint32_t)(x)) << AI_PLLVIDEO_CTRL0_ENABLE_SHIFT)) & AI_PLLVIDEO_CTRL0_ENABLE_MASK)
#define AI_PLLVIDEO_CTRL0_ENABLE_MASK (0x8000UL)
#define AI_PLLVIDEO_CTRL0_ENABLE_SHIFT (15U)
#define AI_PLLVIDEO_CTRL0_BYPASS(x) \
(((uint32_t)(((uint32_t)(x)) << AI_PLLVIDEO_CTRL0_BYPASS_SHIFT)) & AI_PLLVIDEO_CTRL0_BYPASS_MASK)
#define AI_PLLVIDEO_CTRL0_BYPASS_MASK (0x10000UL)
#define AI_PLLVIDEO_CTRL0_BYPASS_SHIFT (16U)
#define AI_PLLVIDEO_CTRL0_PLL_REG_EN(x) \
(((uint32_t)(((uint32_t)(x)) << AI_PLLVIDEO_CTRL0_PLL_REG_EN_SHIFT)) & AI_PLLVIDEO_CTRL0_PLL_REG_EN_MASK)
#define AI_PLLVIDEO_CTRL0_PLL_REG_EN_MASK (0x400000UL)
#define AI_PLLVIDEO_CTRL0_PLL_REG_EN_SHIFT (22U)
/*! @} */
/*!
* @}
*/
/*! @} */
/*******************************************************************************
* API
******************************************************************************/
#if defined(__cplusplus)
extern "C" {
#endif /* __cplusplus */
/*!
* @brief AI interface access
*
* @param itf AI interface name
* @param isWrite write enable
* @param addr address
* @param wdata data to be set
*
*/
uint32_t ANATOP_AI_Access(anatop_ai_itf_t itf, bool isWrite, anatop_ai_reg_t addr, uint32_t wdata);
/*!
* @brief AI interface writing
*
* @param itf AI interface name
* @param addr address
* @param wdata data to be set
*
*/
void ANATOP_AI_Write(anatop_ai_itf_t itf, anatop_ai_reg_t addr, uint32_t wdata);
/*!
* @brief AI interface reading
*
* @param itf AI interface name
* @param addr address
* @return data read
*
*/
uint32_t ANATOP_AI_Read(anatop_ai_itf_t itf, anatop_ai_reg_t addr);
/*!
* @brief AI interface write with mask and shift
*
* @param itf AI interface name
* @param addr address
* @param wdata data to be written
* @param mask bit field mask
* @param shift bit field shift
*
*/
void ANATOP_AI_WriteWithMaskShift(
anatop_ai_itf_t itf, anatop_ai_reg_t addr, uint32_t wdata, uint32_t mask, uint32_t shift);
/* @} */
#if defined(__cplusplus)
}
#endif /* __cplusplus */
/*! @} */
#endif /* _FSL_ANATOP_AI_H_ */
| 10,556 |
535 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include "pagespeed/kernel/image/frame_interface_optimizer.h"
#include <cstdint>
#include "pagespeed/kernel/base/message_handler.h"
#include "pagespeed/kernel/base/string.h"
namespace pagespeed {
namespace image_compression {
// Takes ownership of reader.
MultipleFramePaddingReader::MultipleFramePaddingReader(
MultipleFrameReader* reader)
: MultipleFrameReader(reader->message_handler()), impl_(reader) {}
MultipleFramePaddingReader::~MultipleFramePaddingReader() {}
ScanlineStatus MultipleFramePaddingReader::Reset() { return impl_->Reset(); }
ScanlineStatus MultipleFramePaddingReader::Initialize() {
ScanlineStatus status = impl_->Initialize(image_buffer_, buffer_length_);
if (status.Success()) {
status = impl_->GetImageSpec(&image_spec_);
}
return status;
}
bool MultipleFramePaddingReader::HasMoreFrames() const {
return impl_->HasMoreFrames();
}
bool MultipleFramePaddingReader::HasMoreScanlines() const {
return current_scanline_idx_ < padded_frame_spec_.height;
}
ScanlineStatus MultipleFramePaddingReader::PrepareNextFrame() {
frame_needs_no_padding_ = false;
frame_is_full_height_ = false;
frame_is_full_width_ = false;
// If image_spec_.use_bg_color == false, then we pad the frame with
// the transparent color defined in kTransparent.
static const PixelRgbaChannels kTransparent = {0, 0, 0, kAlphaTransparent};
ScanlineStatus status;
if (impl_->PrepareNextFrame(&status) &&
impl_->GetFrameSpec(&impl_frame_spec_, &status)) {
// Bounds-check the FrameSpec.
impl_frame_spec_.left = image_spec_.TruncateXIndex(impl_frame_spec_.left);
impl_frame_spec_.width =
image_spec_.TruncateXIndex(impl_frame_spec_.left +
impl_frame_spec_.width) -
impl_frame_spec_.left;
padded_frame_spec_ = impl_frame_spec_;
padded_frame_spec_.width = image_spec_.width;
padded_frame_spec_.height = image_spec_.height;
padded_frame_spec_.top = 0;
padded_frame_spec_.left = 0;
bytes_per_pixel_ = GetBytesPerPixel(padded_frame_spec_.pixel_format);
size_px scanline_num_bytes = padded_frame_spec_.width * bytes_per_pixel_;
current_scanline_.reset(new uint8_t[scanline_num_bytes]);
scanline_template_.reset(new uint8_t[scanline_num_bytes]);
uint8_t* template_ptr_end = scanline_template_.get() + scanline_num_bytes;
const void* bg_color =
(image_spec_.use_bg_color ? image_spec_.bg_color : kTransparent);
for (uint8_t* template_ptr = scanline_template_.get();
template_ptr < template_ptr_end; template_ptr += bytes_per_pixel_) {
memcpy(template_ptr, bg_color, bytes_per_pixel_);
}
current_scanline_idx_ = 0;
// These are guaranteed to be in range because impl_frame_spec_
// was itself bounds-checked above.
size_px foreground_scanline_start_idx = impl_frame_spec_.left;
size_px foreground_scanline_end_idx =
(impl_frame_spec_.left + impl_frame_spec_.width);
foreground_scanline_start_byte_ =
(current_scanline_.get() +
bytes_per_pixel_ * foreground_scanline_start_idx);
frame_is_full_width_ = ((foreground_scanline_start_idx == 0) &&
(foreground_scanline_end_idx == image_spec_.width));
frame_is_full_height_ = ((impl_frame_spec_.top == 0) &&
(impl_frame_spec_.height == image_spec_.height));
frame_needs_no_padding_ = frame_is_full_width_ && frame_is_full_height_;
// Set the background color for all the scanlines to follow. Note
// that since the foreground is rectangular, the same foreground
// pixels will get overwritten in each scanline, while the
// background pixels remain untouched.
memcpy(current_scanline_.get(), scanline_template_.get(),
scanline_num_bytes);
}
return status;
}
ScanlineStatus MultipleFramePaddingReader::ReadNextScanline(
const void** out_scanline_bytes) {
if (frame_needs_no_padding_) {
// Short-circuit any additional computations.
++current_scanline_idx_;
return impl_->ReadNextScanline(out_scanline_bytes);
}
if (!HasMoreScanlines()) {
return PS_LOGGED_STATUS(
PS_LOG_DFATAL, message_handler(), SCANLINE_STATUS_INVOCATION_ERROR,
FRAME_PADDING_READER, "no more scanlines in the current frame");
}
const void* impl_scanline = nullptr;
ScanlineStatus status;
if (frame_is_full_height_ ||
((current_scanline_idx_ >= impl_frame_spec_.top) &&
(current_scanline_idx_ <
(impl_frame_spec_.top + impl_frame_spec_.height)))) {
// This scanline contains foreground pixels.
// If a full-width row, we can short-circuit the remaining
// computations.
if (frame_is_full_width_) {
++current_scanline_idx_;
return impl_->ReadNextScanline(out_scanline_bytes);
}
// Read the foreground row for use below.
if (!impl_->ReadNextScanline(&impl_scanline, &status)) {
return status;
}
}
if (impl_scanline == nullptr) {
// This scanline contains only background pixels.
*out_scanline_bytes = scanline_template_.get();
} else {
// Overwrite the foreground pixels appropriately. Note that the
// background pixels were already set in PrepareNextFrame.
memcpy(foreground_scanline_start_byte_, impl_scanline,
bytes_per_pixel_ * impl_frame_spec_.width);
*out_scanline_bytes = current_scanline_.get();
}
++current_scanline_idx_;
return status;
}
ScanlineStatus MultipleFramePaddingReader::GetFrameSpec(
FrameSpec* frame_spec) const {
*frame_spec = padded_frame_spec_;
return ScanlineStatus(SCANLINE_STATUS_SUCCESS);
}
ScanlineStatus MultipleFramePaddingReader::GetImageSpec(
ImageSpec* image_spec) const {
ScanlineStatus status = impl_->GetImageSpec(image_spec);
if (status.Success() && !image_spec->Equals(image_spec_)) {
return ScanlineStatus(SCANLINE_STATUS_INTERNAL_ERROR, FRAME_PADDING_READER,
"ImageSpec changed during image processing");
}
return status;
}
MessageHandler* MultipleFramePaddingReader::message_handler() const {
return impl_->message_handler();
}
ScanlineStatus MultipleFramePaddingReader::set_quirks_mode(
QuirksMode quirks_mode) {
return impl_->set_quirks_mode(quirks_mode);
}
QuirksMode MultipleFramePaddingReader::quirks_mode() const {
return impl_->quirks_mode();
}
} // namespace image_compression
} // namespace pagespeed
| 2,559 |
353 | <filename>ceph_deploy/util/net.py
try:
from urllib.request import urlopen
from urllib.error import HTTPError
except ImportError:
from urllib2 import urlopen, HTTPError
from ceph_deploy import exc
import logging
import re
import socket
from ceph_deploy.lib import remoto
LOG = logging.getLogger(__name__)
# TODO: at some point, it might be way more accurate to do this in the actual
# host where we need to get IPs from. SaltStack does this by calling `ip` and
# parsing the output, which is probably the one true way of dealing with it.
def get_nonlocal_ip(host, subnet=None):
"""
Search result of getaddrinfo() for a non-localhost-net address
"""
try:
ailist = socket.getaddrinfo(host, None)
except socket.gaierror:
raise exc.UnableToResolveError(host)
for ai in ailist:
# an ai is a 5-tuple; the last element is (ip, port)
ip = ai[4][0]
if subnet and ip_in_subnet(ip, subnet):
LOG.info('found ip (%s) for host (%s) to be in cluster subnet (%s)' % (
ip,
host,
subnet,)
)
return ip
if not ip.startswith('127.'):
if subnet:
LOG.warning('could not match ip (%s) for host (%s) for cluster subnet (%s)' % (
ip,
host,
subnet,)
)
return ip
raise exc.UnableToResolveError(host)
def ip_in_subnet(ip, subnet):
"""Does IP exists in a given subnet utility. Returns a boolean"""
ipaddr = int(''.join(['%02x' % int(x) for x in ip.split('.')]), 16)
netstr, bits = subnet.split('/')
netaddr = int(''.join(['%02x' % int(x) for x in netstr.split('.')]), 16)
mask = (0xffffffff << (32 - int(bits))) & 0xffffffff
return (ipaddr & mask) == (netaddr & mask)
def in_subnet(cidr, addrs=None):
"""
Returns True if host is within specified subnet, otherwise False
"""
for address in addrs:
if ip_in_subnet(address, cidr):
return True
return False
def ip_addresses(conn, interface=None, include_loopback=False):
"""
Returns a list of IPv4/IPv6 addresses assigned to the host. 127.0.0.1/::1 is
ignored, unless 'include_loopback=True' is indicated. If 'interface' is
provided, then only IP addresses from that interface will be returned.
Example output looks like::
>>> ip_addresses(conn)
>>> ['192.168.1.111', '10.0.1.12', '2001:db8::100']
"""
ret = set()
ifaces = linux_interfaces(conn)
if interface is None:
target_ifaces = ifaces
else:
target_ifaces = dict((k, v) for k, v in ifaces.items()
if k == interface)
if not target_ifaces:
LOG.error('Interface {0} not found.'.format(interface))
for info in target_ifaces.values():
for ipv4 in info.get('inet', []):
loopback = in_subnet('127.0.0.0/8', [ipv4.get('address')]) or ipv4.get('label') == 'lo'
if not loopback or include_loopback:
ret.add(ipv4['address'])
for secondary in info.get('secondary', []):
addr = secondary.get('address')
if addr and secondary.get('type') == 'inet':
if include_loopback or (not include_loopback and not in_subnet('127.0.0.0/8', [addr])):
ret.add(addr)
for ipv6 in info.get('inet6', []):
# When switching to Python 3 the IPAddress module can do all this work for us
if ipv6.get('address').startswith('fe80::'):
continue
if not include_loopback and '::1' == ipv6.get('address'):
continue
ret.add(ipv6['address'])
if ret:
conn.logger.debug('IP addresses found: %s' % str(list(ret)))
return sorted(list(ret))
def linux_interfaces(conn):
"""
Obtain interface information for *NIX/BSD variants in remote servers.
Example output from a remote node with a couple of interfaces::
{'eth0': {'hwaddr': '08:00:27:08:c2:e4',
'inet': [{'address': '10.0.2.15',
'broadcast': '10.0.2.255',
'label': 'eth0',
'netmask': '255.255.255.0'}],
'inet6': [{'address': 'fe80::a00:27ff:fe08:c2e4',
'prefixlen': '64'}],
'up': True},
'eth1': {'hwaddr': '08:00:27:70:06:f1',
'inet': [{'address': '192.168.111.101',
'broadcast': '192.168.111.255',
'label': 'eth1',
'netmask': '255.255.255.0'}],
'inet6': [{'address': 'fe80::a00:27ff:fe70:6f1',
'prefixlen': '64'}],
'up': True},
'lo': {'hwaddr': '00:00:00:00:00:00',
'inet': [{'address': '127.0.0.1',
'broadcast': None,
'label': 'lo',
'netmask': '255.0.0.0'}],
'inet6': [{'address': '::1', 'prefixlen': '128'}],
'up': True}}
:param conn: A connection object to a remote node
"""
ifaces = dict()
ip_path = conn.remote_module.which('ip')
ifconfig_path = None if ip_path else conn.remote_module.which('ifconfig')
if ip_path:
cmd1, _, _ = remoto.process.check(
conn,
[
'{0}'.format(ip_path),
'link',
'show',
],
)
cmd2, _, _ = remoto.process.check(
conn,
[
'{0}'.format(ip_path),
'addr',
'show',
],
)
ifaces = _interfaces_ip('\n'.join(cmd1) + '\n' +
'\n'.join(cmd2))
elif ifconfig_path:
cmd, _, _ = remoto.process.check(
conn,
[
'{0}'.format(ifconfig_path),
'-a',
]
)
ifaces = _interfaces_ifconfig('\n'.join(cmd))
return ifaces
def _interfaces_ip(out):
"""
Uses ip to return a dictionary of interfaces with various information about
each (up/down state, ip address, netmask, and hwaddr)
"""
ret = dict()
def parse_network(value, cols):
"""
Return a tuple of ip, netmask, broadcast
based on the current set of cols
"""
brd = None
if '/' in value: # we have a CIDR in this address
ip, cidr = value.split('/') # pylint: disable=C0103
else:
ip = value # pylint: disable=C0103
cidr = 32
if type_ == 'inet':
mask = cidr_to_ipv4_netmask(int(cidr))
if 'brd' in cols:
brd = cols[cols.index('brd') + 1]
elif type_ == 'inet6':
mask = cidr
return (ip, mask, brd)
groups = re.compile('\r?\n\\d').split(out)
for group in groups:
iface = None
data = dict()
for line in group.splitlines():
if ' ' not in line:
continue
match = re.match(r'^\d*:\s+([\w.\-]+)(?:@)?([\w.\-]+)?:\s+<(.+)>', line)
if match:
iface, parent, attrs = match.groups()
if 'UP' in attrs.split(','):
data['up'] = True
else:
data['up'] = False
if parent:
data['parent'] = parent
continue
cols = line.split()
if len(cols) >= 2:
type_, value = tuple(cols[0:2])
iflabel = cols[-1:][0]
if type_ in ('inet', 'inet6'):
if 'secondary' not in cols:
ipaddr, netmask, broadcast = parse_network(value, cols)
if type_ == 'inet':
if 'inet' not in data:
data['inet'] = list()
addr_obj = dict()
addr_obj['address'] = ipaddr
addr_obj['netmask'] = netmask
addr_obj['broadcast'] = broadcast
addr_obj['label'] = iflabel
data['inet'].append(addr_obj)
elif type_ == 'inet6':
if 'inet6' not in data:
data['inet6'] = list()
addr_obj = dict()
addr_obj['address'] = ipaddr
addr_obj['prefixlen'] = netmask
data['inet6'].append(addr_obj)
else:
if 'secondary' not in data:
data['secondary'] = list()
ip_, mask, brd = parse_network(value, cols)
data['secondary'].append({
'type': type_,
'address': ip_,
'netmask': mask,
'broadcast': brd,
'label': iflabel,
})
del ip_, mask, brd
elif type_.startswith('link'):
data['hwaddr'] = value
if iface:
ret[iface] = data
del iface, data
return ret
def _interfaces_ifconfig(out):
"""
Uses ifconfig to return a dictionary of interfaces with various information
about each (up/down state, ip address, netmask, and hwaddr)
"""
ret = dict()
piface = re.compile(r'^([^\s:]+)')
pmac = re.compile('.*?(?:HWaddr|ether|address:|lladdr) ([0-9a-fA-F:]+)')
pip = re.compile(r'.*?(?:inet addr:|inet )(.*?)\s')
pip6 = re.compile('.*?(?:inet6 addr: (.*?)/|inet6 )([0-9a-fA-F:]+)')
pmask = re.compile(r'.*?(?:Mask:|netmask )(?:((?:0x)?[0-9a-fA-F]{8})|([\d\.]+))')
pmask6 = re.compile(r'.*?(?:inet6 addr: [0-9a-fA-F:]+/(\d+)|prefixlen (\d+)).*')
pupdown = re.compile('UP')
pbcast = re.compile(r'.*?(?:Bcast:|broadcast )([\d\.]+)')
groups = re.compile('\r?\n(?=\\S)').split(out)
for group in groups:
data = dict()
iface = ''
updown = False
for line in group.splitlines():
miface = piface.match(line)
mmac = pmac.match(line)
mip = pip.match(line)
mip6 = pip6.match(line)
mupdown = pupdown.search(line)
if miface:
iface = miface.group(1)
if mmac:
data['hwaddr'] = mmac.group(1)
if mip:
if 'inet' not in data:
data['inet'] = list()
addr_obj = dict()
addr_obj['address'] = mip.group(1)
mmask = pmask.match(line)
if mmask:
if mmask.group(1):
mmask = _number_of_set_bits_to_ipv4_netmask(
int(mmask.group(1), 16))
else:
mmask = mmask.group(2)
addr_obj['netmask'] = mmask
mbcast = pbcast.match(line)
if mbcast:
addr_obj['broadcast'] = mbcast.group(1)
data['inet'].append(addr_obj)
if mupdown:
updown = True
if mip6:
if 'inet6' not in data:
data['inet6'] = list()
addr_obj = dict()
addr_obj['address'] = mip6.group(1) or mip6.group(2)
mmask6 = pmask6.match(line)
if mmask6:
addr_obj['prefixlen'] = mmask6.group(1) or mmask6.group(2)
data['inet6'].append(addr_obj)
data['up'] = updown
ret[iface] = data
del data
return ret
def _number_of_set_bits_to_ipv4_netmask(set_bits): # pylint: disable=C0103
"""
Returns an IPv4 netmask from the integer representation of that mask.
Ex. 0xffffff00 -> '255.255.255.0'
"""
return cidr_to_ipv4_netmask(_number_of_set_bits(set_bits))
def _number_of_set_bits(x):
"""
Returns the number of bits that are set in a 32bit int
"""
# Taken from http://stackoverflow.com/a/4912729. Many thanks!
x -= (x >> 1) & 0x55555555
x = ((x >> 2) & 0x33333333) + (x & 0x33333333)
x = ((x >> 4) + x) & 0x0f0f0f0f
x += x >> 8
x += x >> 16
return x & 0x0000003f
def cidr_to_ipv4_netmask(cidr_bits):
"""
Returns an IPv4 netmask
"""
try:
cidr_bits = int(cidr_bits)
if not 1 <= cidr_bits <= 32:
return ''
except ValueError:
return ''
netmask = ''
for idx in range(4):
if idx:
netmask += '.'
if cidr_bits >= 8:
netmask += '255'
cidr_bits -= 8
else:
netmask += '{0:d}'.format(256 - (2 ** (8 - cidr_bits)))
cidr_bits = 0
return netmask
def get_request(url):
try:
return urlopen(url)
except HTTPError as err:
LOG.error('repository might not be available yet')
raise RuntimeError('%s, failed to fetch %s' % (err, url))
def get_chacra_repo(shaman_url):
"""
From a Shaman URL, get the chacra url for a repository, read the
contents that point to the repo and return it as a string.
"""
shaman_response = get_request(shaman_url)
chacra_url = shaman_response.geturl()
chacra_response = get_request(chacra_url)
return chacra_response.read()
| 7,495 |
2,151 | // Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/os_crypt/keyring_util_linux.h"
#include <dlfcn.h>
#include "base/logging.h"
decltype(&::gnome_keyring_is_available)
GnomeKeyringLoader::gnome_keyring_is_available_ptr;
decltype(&::gnome_keyring_store_password)
GnomeKeyringLoader::gnome_keyring_store_password_ptr;
decltype(&::gnome_keyring_delete_password)
GnomeKeyringLoader::gnome_keyring_delete_password_ptr;
decltype(&::gnome_keyring_find_items)
GnomeKeyringLoader::gnome_keyring_find_items_ptr;
decltype(&::gnome_keyring_find_password_sync)
GnomeKeyringLoader::gnome_keyring_find_password_sync_ptr;
decltype(&::gnome_keyring_store_password_sync)
GnomeKeyringLoader::gnome_keyring_store_password_sync_ptr;
decltype(&::gnome_keyring_result_to_message)
GnomeKeyringLoader::gnome_keyring_result_to_message_ptr;
decltype(&::gnome_keyring_attribute_list_free)
GnomeKeyringLoader::gnome_keyring_attribute_list_free_ptr;
decltype(&::gnome_keyring_attribute_list_new)
GnomeKeyringLoader::gnome_keyring_attribute_list_new_ptr;
decltype(&::gnome_keyring_attribute_list_append_string)
GnomeKeyringLoader::gnome_keyring_attribute_list_append_string_ptr;
decltype(&::gnome_keyring_attribute_list_append_uint32)
GnomeKeyringLoader::gnome_keyring_attribute_list_append_uint32_ptr;
decltype(&::gnome_keyring_free_password)
GnomeKeyringLoader::gnome_keyring_free_password_ptr;
bool GnomeKeyringLoader::keyring_loaded = false;
const GnomeKeyringLoader::FunctionInfo GnomeKeyringLoader::functions[] = {
{"gnome_keyring_is_available",
reinterpret_cast<void**>(&gnome_keyring_is_available_ptr)},
{"gnome_keyring_store_password",
reinterpret_cast<void**>(&gnome_keyring_store_password_ptr)},
{"gnome_keyring_delete_password",
reinterpret_cast<void**>(&gnome_keyring_delete_password_ptr)},
{"gnome_keyring_find_items",
reinterpret_cast<void**>(&gnome_keyring_find_items_ptr)},
{"gnome_keyring_find_password_sync",
reinterpret_cast<void**>(&gnome_keyring_find_password_sync_ptr)},
{"gnome_keyring_store_password_sync",
reinterpret_cast<void**>(&gnome_keyring_store_password_sync_ptr)},
{"gnome_keyring_result_to_message",
reinterpret_cast<void**>(&gnome_keyring_result_to_message_ptr)},
{"gnome_keyring_attribute_list_free",
reinterpret_cast<void**>(&gnome_keyring_attribute_list_free_ptr)},
{"gnome_keyring_attribute_list_new",
reinterpret_cast<void**>(&gnome_keyring_attribute_list_new_ptr)},
{"gnome_keyring_attribute_list_append_string",
reinterpret_cast<void**>(&gnome_keyring_attribute_list_append_string_ptr)},
{"gnome_keyring_attribute_list_append_uint32",
reinterpret_cast<void**>(&gnome_keyring_attribute_list_append_uint32_ptr)},
{"gnome_keyring_free_password",
reinterpret_cast<void**>(&gnome_keyring_free_password_ptr)}};
/* Load the library and initialize the function pointers. */
bool GnomeKeyringLoader::LoadGnomeKeyring() {
if (keyring_loaded)
return true;
void* handle = dlopen("libgnome-keyring.so.0", RTLD_NOW | RTLD_GLOBAL);
if (!handle) {
// We wanted to use GNOME Keyring, but we couldn't load it. Warn, because
// either the user asked for this, or we autodetected it incorrectly. (Or
// the system has broken libraries, which is also good to warn about.)
LOG(WARNING) << "Could not load libgnome-keyring.so.0: " << dlerror();
return false;
}
for (size_t i = 0; i < arraysize(functions); ++i) {
dlerror();
*functions[i].pointer = dlsym(handle, functions[i].name);
const char* error = dlerror();
if (error) {
LOG(ERROR) << "Unable to load symbol " << functions[i].name << ": "
<< error;
dlclose(handle);
return false;
}
}
keyring_loaded = true;
// We leak the library handle. That's OK: this function is called only once.
return true;
}
| 1,530 |
369 | <filename>FreeRTOSv10.4.1/FreeRTOS/Demo/CORTEX_LM3S811_IAR/LuminaryCode/flash.c
//*****************************************************************************
//
// flash.c - Driver for programming the on-chip flash.
//
// Copyright (c) 2005,2006 Luminary Micro, Inc. All rights reserved.
//
// Software License Agreement
//
// Luminary Micro, Inc. (LMI) is supplying this software for use solely and
// exclusively on LMI's Stellaris Family of microcontroller products.
//
// The software is owned by LMI and/or its suppliers, and is protected under
// applicable copyright laws. All rights are reserved. Any use in violation
// of the foregoing restrictions may subject the user to criminal sanctions
// under applicable laws, as well as to civil liability for the breach of the
// terms and conditions of this license.
//
// THIS SOFTWARE IS PROVIDED "AS IS". NO WARRANTIES, WHETHER EXPRESS, IMPLIED
// OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE APPLY TO THIS SOFTWARE.
// LMI SHALL NOT, IN ANY CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL, OR
// CONSEQUENTIAL DAMAGES, FOR ANY REASON WHATSOEVER.
//
// This is part of revision 991 of the Stellaris Driver Library.
//
//*****************************************************************************
//*****************************************************************************
//
//! \addtogroup flash_api
//! @{
//
//*****************************************************************************
#include "../hw_flash.h"
#include "../hw_ints.h"
#include "../hw_memmap.h"
#include "../hw_sysctl.h"
#include "../hw_types.h"
#include "debug.h"
#include "flash.h"
#include "interrupt.h"
//*****************************************************************************
//
//! Gets the number of processor clocks per micro-second.
//!
//! This function returns the number of clocks per micro-second, as presently
//! known by the flash controller.
//!
//! \return Returns the number of processor clocks per micro-second.
//
//*****************************************************************************
#if defined(GROUP_usecget) || defined(BUILD_ALL) || defined(DOXYGEN)
unsigned long
FlashUsecGet(void)
{
//
// Return the number of clocks per micro-second.
//
return(HWREG(FLASH_USECRL) + 1);
}
#endif
//*****************************************************************************
//
//! Sets the number of processor clocks per micro-second.
//!
//! \param ulClocks is the number of processor clocks per micro-second.
//!
//! This function is used to tell the flash controller the number of processor
//! clocks per micro-second. This value must be programmed correctly or the
//! flash most likely will not program correctly; it has no affect on reading
//! flash.
//!
//! \return None.
//
//*****************************************************************************
#if defined(GROUP_usecset) || defined(BUILD_ALL) || defined(DOXYGEN)
void
FlashUsecSet(unsigned long ulClocks)
{
//
// Set the number of clocks per micro-second.
//
HWREG(FLASH_USECRL) = ulClocks - 1;
}
#endif
//*****************************************************************************
//
//! Erases a block of flash.
//!
//! \param ulAddress is the start address of the flash block to be erased.
//!
//! This function will erase a 1 kB block of the on-chip flash. After erasing,
//! the block will be filled with 0xFF bytes. Read-only and execute-only
//! blocks cannot be erased.
//!
//! This function will not return until the block has been erased.
//!
//! \return Returns 0 on success, or -1 if an invalid block address was
//! specified or the block is write-protected.
//
//*****************************************************************************
#if defined(GROUP_erase) || defined(BUILD_ALL) || defined(DOXYGEN)
long
FlashErase(unsigned long ulAddress)
{
//
// Check the arguments.
//
ASSERT(!(ulAddress & (FLASH_ERASE_SIZE - 1)));
//
// Clear the flash access interrupt.
//
HWREG(FLASH_FCMISC) = FLASH_FCMISC_ACCESS;
//
// Erase the block.
//
HWREG(FLASH_FMA) = ulAddress;
HWREG(FLASH_FMC) = FLASH_FMC_WRKEY | FLASH_FMC_ERASE;
//
// Wait until the word has been programmed.
//
while(HWREG(FLASH_FMC) & FLASH_FMC_ERASE)
{
}
//
// Return an error if an access violation occurred.
//
if(HWREG(FLASH_FCRIS) & FLASH_FCRIS_ACCESS)
{
return(-1);
}
//
// Success.
//
return(0);
}
#endif
//*****************************************************************************
//
//! Programs flash.
//!
//! \param pulData is a pointer to the data to be programmed.
//! \param ulAddress is the starting address in flash to be programmed. Must
//! be a multiple of four.
//! \param ulCount is the number of bytes to be programmed. Must be a multiple
//! of four.
//!
//! This function will program a sequence of words into the on-chip flash.
//! Programming each location consists of the result of an AND operation
//! of the new data and the existing data; in other words bits that contain
//! 1 can remain 1 or be changed to 0, but bits that are 0 cannot be changed
//! to 1. Therefore, a word can be programmed multiple times as long as these
//! rules are followed; if a program operation attempts to change a 0 bit to
//! a 1 bit, that bit will not have its value changed.
//!
//! Since the flash is programmed one word at a time, the starting address and
//! byte count must both be multiples of four. It is up to the caller to
//! verify the programmed contents, if such verification is required.
//!
//! This function will not return until the data has been programmed.
//!
//! \return Returns 0 on success, or -1 if a programming error is encountered.
//
//*****************************************************************************
#if defined(GROUP_program) || defined(BUILD_ALL) || defined(DOXYGEN)
long
FlashProgram(unsigned long *pulData, unsigned long ulAddress,
unsigned long ulCount)
{
//
// Check the arguments.
//
ASSERT(!(ulAddress & 3));
ASSERT(!(ulCount & 3));
//
// Clear the flash access interrupt.
//
HWREG(FLASH_FCMISC) = FLASH_FCMISC_ACCESS;
//
// Loop over the words to be programmed.
//
while(ulCount)
{
//
// Program the next word.
//
HWREG(FLASH_FMA) = ulAddress;
HWREG(FLASH_FMD) = *pulData;
HWREG(FLASH_FMC) = FLASH_FMC_WRKEY | FLASH_FMC_WRITE;
//
// Wait until the word has been programmed.
//
while(HWREG(FLASH_FMC) & FLASH_FMC_WRITE)
{
}
//
// Increment to the next word.
//
pulData++;
ulAddress += 4;
ulCount -= 4;
}
//
// Return an error if an access violation occurred.
//
if(HWREG(FLASH_FCRIS) & FLASH_FCRIS_ACCESS)
{
return(-1);
}
//
// Success.
//
return(0);
}
#endif
//*****************************************************************************
//
//! Gets the protection setting for a block of flash.
//!
//! \param ulAddress is the start address of the flash block to be queried.
//!
//! This function will get the current protection for the specified 2 kB block
//! of flash. Each block can be read/write, read-only, or execute-only.
//! Read/write blocks can be read, executed, erased, and programmed. Read-only
//! blocks can be read and executed. Execute-only blocks can only be executed;
//! processor and debugger data reads are not allowed.
//!
//! \return Returns the protection setting for this block. See
//! FlashProtectSet() for possible values.
//
//*****************************************************************************
#if defined(GROUP_protectget) || defined(BUILD_ALL) || defined(DOXYGEN)
tFlashProtection
FlashProtectGet(unsigned long ulAddress)
{
unsigned long ulFMPRE, ulFMPPE;
//
// Check the argument.
//
ASSERT(!(ulAddress & (FLASH_PROTECT_SIZE - 1)));
//
// Read the flash protection register and get the bits that apply to the
// specified block.
//
ulFMPRE = HWREG(FLASH_FMPRE);
ulFMPPE = HWREG(FLASH_FMPPE);
switch((((ulFMPRE >> (ulAddress / FLASH_PROTECT_SIZE)) &
FLASH_FMP_BLOCK_0) << 1) |
((ulFMPPE >> (ulAddress / FLASH_PROTECT_SIZE)) & FLASH_FMP_BLOCK_0))
{
//
// This block is marked as execute only (i.e. it can not be erased or
// programmed, and the only reads allowed are via the instruction fecth
// interface).
//
case 0:
case 1:
{
return(FlashExecuteOnly);
}
//
// This block is marked as read only (i.e. it can not be erased or
// programmed).
//
case 2:
{
return(FlashReadOnly);
}
//
// This block is read/write; it can be read, erased, and programmed.
//
case 3:
default:
{
return(FlashReadWrite);
}
}
}
#endif
//*****************************************************************************
//
//! Sets the protection setting for a block of flash.
//!
//! \param ulAddress is the start address of the flash block to be protected.
//! \param eProtect is the protection to be applied to the block. Can be one
//! of \b FlashReadWrite, \b FlashReadOnly, or \b FlashExecuteOnly.
//!
//! This function will set the protection for the specified 2 kB block of
//! flash. Blocks which are read/write can be made read-only or execute-only.
//! Blocks which are read-only can be made execute-only. Blocks which are
//! execute-only cannot have their protection modified. Attempts to make the
//! block protection less stringent (i.e. read-only to read/write) will result
//! in a failure (and be prevented by the hardware).
//!
//! Changes to the flash protection are maintained only until the next reset.
//! This allows the application to be executed in the desired flash protection
//! environment to check for inappropriate flash access (via the flash
//! interrupt). To make the flash protection permanent, use the
//! FlashProtectSave() function.
//!
//! \return Returns 0 on success, or -1 if an invalid address or an invalid
//! protection was specified.
//
//*****************************************************************************
#if defined(GROUP_protectset) || defined(BUILD_ALL) || defined(DOXYGEN)
long
FlashProtectSet(unsigned long ulAddress, tFlashProtection eProtect)
{
unsigned long ulProtectRE, ulProtectPE;
//
// Check the argument.
//
ASSERT(!(ulAddress & (FLASH_PROTECT_SIZE - 1)));
ASSERT((eProtect == FlashReadWrite) || (eProtect == FlashReadOnly) ||
(eProtect == FlashExecuteOnly));
//
// Convert the address into a block number.
//
ulAddress /= FLASH_PROTECT_SIZE;
//
// Get the current protection.
//
ulProtectRE = HWREG(FLASH_FMPRE);
ulProtectPE = HWREG(FLASH_FMPPE);
//
// Set the protection based on the requested proection.
//
switch(eProtect)
{
//
// Make this block execute only.
//
case FlashExecuteOnly:
{
//
// Turn off the read and program bits for this block.
//
ulProtectRE &= ~(FLASH_FMP_BLOCK_0 << ulAddress);
ulProtectPE &= ~(FLASH_FMP_BLOCK_0 << ulAddress);
//
// We're done handling this protection.
//
break;
}
//
// Make this block read only.
//
case FlashReadOnly:
{
//
// The block can not be made read only if it is execute only.
//
if(((ulProtectRE >> ulAddress) & FLASH_FMP_BLOCK_0) !=
FLASH_FMP_BLOCK_0)
{
return(-1);
}
//
// Make this block read only.
//
ulProtectPE &= ~(FLASH_FMP_BLOCK_0 << ulAddress);
//
// We're done handling this protection.
//
break;
}
//
// Make this block read/write.
//
case FlashReadWrite:
default:
{
//
// The block can not be made read/write if it is not already
// read/write.
//
if((((ulProtectRE >> ulAddress) & FLASH_FMP_BLOCK_0) !=
FLASH_FMP_BLOCK_0) ||
(((ulProtectPE >> ulAddress) & FLASH_FMP_BLOCK_0) !=
FLASH_FMP_BLOCK_0))
{
return(-1);
}
//
// The block is already read/write, so there is nothing to do.
//
return(0);
}
}
//
// Set the new protection.
//
HWREG(FLASH_FMPRE) = ulProtectRE;
HWREG(FLASH_FMPPE) = ulProtectPE;
//
// Success.
//
return(0);
}
#endif
//*****************************************************************************
//
//! Saves the flash protection settings.
//!
//! This function will make the currently programmed flash protection settings
//! permanent. This is a non-reversible operation; a chip reset or power cycle
//! will not change the flash protection.
//!
//! This function will not return until the protection has been saved.
//!
//! \return Returns 0 on success, or -1 if a hardware error is encountered.
//
//*****************************************************************************
#if defined(GROUP_protectsave) || defined(BUILD_ALL) || defined(DOXYGEN)
long
FlashProtectSave(void)
{
//
// Tell the flash controller to write the flash read protection register.
//
HWREG(FLASH_FMA) = 0;
HWREG(FLASH_FMC) = FLASH_FMC_WRKEY | FLASH_FMC_COMT;
//
// Wait until the write has completed.
//
while(HWREG(FLASH_FMC) & FLASH_FMC_COMT)
{
}
//
// Tell the flash controller to write the flash program protection
// register.
//
HWREG(FLASH_FMA) = 1;
HWREG(FLASH_FMC) = FLASH_FMC_WRKEY | FLASH_FMC_COMT;
//
// Wait until the write has completed.
//
while(HWREG(FLASH_FMC) & FLASH_FMC_COMT)
{
}
//
// Success.
//
return(0);
}
#endif
//*****************************************************************************
//
//! Registers an interrupt handler for the flash interrupt.
//!
//! \param pfnHandler is a pointer to the function to be called when the flash
//! interrupt occurs.
//!
//! This sets the handler to be called when the flash interrupt occurs. The
//! flash controller can generate an interrupt when an invalid flash access
//! occurs, such as trying to program or erase a read-only block, or trying to
//! read from an execute-only block. It can also generate an interrupt when a
//! program or erase operation has completed. The interrupt will be
//! automatically enabled when the handler is registered.
//!
//! \sa IntRegister() for important information about registering interrupt
//! handlers.
//!
//! \return None.
//
//*****************************************************************************
#if defined(GROUP_intregister) || defined(BUILD_ALL) || defined(DOXYGEN)
void
FlashIntRegister(void (*pfnHandler)(void))
{
//
// Register the interrupt handler, returning an error if an error occurs.
//
IntRegister(INT_FLASH, pfnHandler);
//
// Enable the flash interrupt.
//
IntEnable(INT_FLASH);
}
#endif
//*****************************************************************************
//
//! Unregisters the interrupt handler for the flash interrupt.
//!
//! This function will clear the handler to be called when the flash interrupt
//! occurs. This will also mask off the interrupt in the interrupt controller
//! so that the interrupt handler is no longer called.
//!
//! \sa IntRegister() for important information about registering interrupt
//! handlers.
//!
//! \return None.
//
//*****************************************************************************
#if defined(GROUP_intunregister) || defined(BUILD_ALL) || defined(DOXYGEN)
void
FlashIntUnregister(void)
{
//
// Disable the interrupt.
//
IntDisable(INT_FLASH);
//
// Unregister the interrupt handler.
//
IntUnregister(INT_FLASH);
}
#endif
//*****************************************************************************
//
//! Enables individual flash controller interrupt sources.
//!
//! \param ulIntFlags is a bit mask of the interrupt sources to be enabled.
//! Can be any of the \b FLASH_FCIM_PROGRAM or \b FLASH_FCIM_ACCESS values.
//!
//! Enables the indicated flash controller interrupt sources. Only the sources
//! that are enabled can be reflected to the processor interrupt; disabled
//! sources have no effect on the processor.
//!
//! \return None.
//
//*****************************************************************************
#if defined(GROUP_intenable) || defined(BUILD_ALL) || defined(DOXYGEN)
void
FlashIntEnable(unsigned long ulIntFlags)
{
//
// Enable the specified interrupts.
//
HWREG(FLASH_FCIM) |= ulIntFlags;
}
#endif
//*****************************************************************************
//
//! Disables individual flash controller interrupt sources.
//!
//! \param ulIntFlags is a bit mask of the interrupt sources to be disabled.
//! Can be any of the \b FLASH_FCIM_PROGRAM or \b FLASH_FCIM_ACCESS values.
//!
//! Disables the indicated flash controller interrupt sources. Only the
//! sources that are enabled can be reflected to the processor interrupt;
//! disabled sources have no effect on the processor.
//!
//! \return None.
//
//*****************************************************************************
#if defined(GROUP_intdisable) || defined(BUILD_ALL) || defined(DOXYGEN)
void
FlashIntDisable(unsigned long ulIntFlags)
{
//
// Disable the specified interrupts.
//
HWREG(FLASH_FCIM) &= ~(ulIntFlags);
}
#endif
//*****************************************************************************
//
//! Gets the current interrupt status.
//!
//! \param bMasked is false if the raw interrupt status is required and true if
//! the masked interrupt status is required.
//!
//! This returns the interrupt status for the flash controller. Either the raw
//! interrupt status or the status of interrupts that are allowed to reflect to
//! the processor can be returned.
//!
//! \return The current interrupt status, enumerated as a bit field of
//! \b FLASH_FCMISC_PROGRAM and \b FLASH_FCMISC_ACCESS.
//
//*****************************************************************************
#if defined(GROUP_intgetstatus) || defined(BUILD_ALL) || defined(DOXYGEN)
unsigned long
FlashIntGetStatus(tBoolean bMasked)
{
//
// Return either the interrupt status or the raw interrupt status as
// requested.
//
if(bMasked)
{
return(HWREG(FLASH_FCMISC));
}
else
{
return(HWREG(FLASH_FCRIS));
}
}
#endif
//*****************************************************************************
//
//! Clears flash controller interrupt sources.
//!
//! \param ulIntFlags is the bit mask of the interrupt sources to be cleared.
//! Can be any of the \b FLASH_FCMISC_PROGRAM or \b FLASH_FCMISC_ACCESS
//! values.
//!
//! The specified flash controller interrupt sources are cleared, so that they
//! no longer assert. This must be done in the interrupt handler to keep it
//! from being called again immediately upon exit.
//!
//! \return None.
//
//*****************************************************************************
#if defined(GROUP_intclear) || defined(BUILD_ALL) || defined(DOXYGEN)
void
FlashIntClear(unsigned long ulIntFlags)
{
//
// Clear the flash interrupt.
//
HWREG(FLASH_FCMISC) = ulIntFlags;
}
#endif
//*****************************************************************************
//
// Close the Doxygen group.
//! @}
//
//*****************************************************************************
| 6,756 |
803 | <reponame>ScriptBox99/Extensible-Storage-Engine
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
#include "std.hxx"
// default values are set based on small pages (2/4/8kiB)
WORD TAGFLD::fExtendedInfo = 0x4000 ;
//WORD TAGFLD::fNull = 0x2000;
WORD TAGFLD::maskIb = 0x1fff;
INLINE VOID DeleteEntryAndData(
BYTE * const pbEntry,
const ULONG cbEntry,
BYTE * const pbData,
const ULONG cbData,
BYTE * const pbMax )
{
const BYTE * const pbNextEntry = pbEntry + cbEntry;
UtilMemMove(
pbEntry,
pbNextEntry,
pbMax - pbNextEntry );
if( 0 != cbData )
{
// we have already shifted the data down by cbEntry so pbNextEntry has changes
const BYTE * const pbMaxNew = pbMax - cbEntry;
BYTE * const pbDataNew = pbData - cbEntry;
const BYTE * const pbNextData = pbData + cbData - cbEntry;
UtilMemMove(
pbDataNew,
pbNextData,
pbMaxNew - pbNextData );
}
}
VOID MULTIVALUES::AddInstance(
const DATA * const pdataToSet,
const JET_COLTYP coltyp,
const BOOL fSeparatedLV )
{
const ULONG imvAdd = CMultiValues();
const ULONG cMultiValuesCurr = CMultiValues();
const ULONG cbMultiValuesCurr = CbMultiValues();
BYTE * const pbMaxCurr = PbMax();
Assert( cMultiValuesCurr >= 2 );
// shift to make room for new MVOFFSET
UtilMemMove(
PbStartOfMVData() + sizeof(MVOFFSET),
PbStartOfMVData(),
PbMax() - PbStartOfMVData() );
// fix up offsets
for ( ULONG imv = 0; imv < cMultiValuesCurr; imv++ )
{
DeltaIb( imv, sizeof(MVOFFSET) );
}
MVOFFSET * const pmvoffAdd = Rgmvoffs() + imvAdd;
*pmvoffAdd = USHORT( sizeof(MVOFFSET) + cbMultiValuesCurr ); // implicitly clears high bit
if ( fSeparatedLV )
{
Assert( Pheader()->FColumnCanBeSeparated() );
Assert( !fSeparatedLV || REC::FValidLidRef( *pdataToSet ) );
SetFSeparatedInstance( imvAdd );
UtilMemCpy(
pbMaxCurr + sizeof(MVOFFSET),
pdataToSet->Pv(),
pdataToSet->Cb() );
}
else
{
RECCopyData(
pbMaxCurr + sizeof(MVOFFSET),
pdataToSet,
coltyp );
}
m_cMultiValues++;
m_cbMultiValues += sizeof(MVOFFSET) + pdataToSet->Cb();
}
VOID MULTIVALUES::RemoveInstance( const ULONG itagSequence )
{
Assert( itagSequence > 0 );
Assert( itagSequence <= CMultiValues() );
Assert( CMultiValues() > 2 );
const ULONG imvDelete = itagSequence - 1;
const ULONG cbDataDelete = CbData( imvDelete );
Assert( imvDelete < CMultiValues() );
DeleteEntryAndData(
(BYTE *)( Rgmvoffs() + imvDelete ),
sizeof(MVOFFSET),
cbDataDelete > 0 ? PbData( imvDelete ) : NULL,
cbDataDelete,
PbMax() );
// update MULTIVALUES members to reflect deleted instance
m_cbMultiValues -= sizeof(MVOFFSET) + cbDataDelete;
m_cMultiValues--;
// update offsets
ULONG imv;
for ( imv = 0;
imv < imvDelete;
imv++ )
{
const INT cbMVOffset = sizeof(MVOFFSET);
DeltaIb( imv, -cbMVOffset );
}
Assert( imvDelete == imv );
for ( ;
imv < CMultiValues();
imv++ )
{
const SHORT cbMVOffsetAndData = SHORT( sizeof(MVOFFSET) + cbDataDelete );
DeltaIb( imv, SHORT( -cbMVOffsetAndData ) );
}
// only the first entry is compressed. if it is removed then the compression flag
// should be reset
if( 0 == imvDelete )
{
Pheader()->ResetFCompressed();
}
}
VOID MULTIVALUES::UpdateInstance(
const ULONG itagSequence,
const DATA * const pdataToSet,
const JET_COLTYP coltyp,
const BOOL fSeparatedLV,
const BOOL fCompressedLV )
{
const ULONG imvReplace = itagSequence - 1;
BYTE * const pbDataReplace = PbData( imvReplace );
const ULONG cbDataReplace = CbData( imvReplace );
const INT delta = pdataToSet->Cb() - cbDataReplace;
BYTE * const pbDataNext = pbDataReplace + cbDataReplace;
Assert( itagSequence > 0 );
Assert( itagSequence <= CMultiValues() );
if ( 0 != delta )
{
// shift data to accommodate updated instance
UtilMemMove(
pbDataReplace + pdataToSet->Cb(),
pbDataNext,
PbMax() - pbDataNext );
// update offsets to reflect shifted data
for ( ULONG imv = imvReplace + 1; imv < CMultiValues(); imv++ )
{
DeltaIb( imv, (SHORT)delta );
}
}
m_cbMultiValues += delta;
if( 0 == imvReplace )
{
if ( fCompressedLV )
{
Pheader()->SetFCompressed();
}
else
{
Pheader()->ResetFCompressed();
}
}
if ( fSeparatedLV )
{
Assert( Pheader()->FColumnCanBeSeparated() );
Assert( REC::FValidLidRef( *pdataToSet ) );
SetFSeparatedInstance( imvReplace );
UtilMemCpy(
pbDataReplace,
pdataToSet->Pv(),
pdataToSet->Cb() );
}
else
{
ResetFSeparatedInstance( imvReplace );
RECCopyData(
pbDataReplace,
pdataToSet,
coltyp );
}
}
VOID TWOVALUES::UpdateInstance(
const ULONG itagSequence,
const DATA * const pdataToSet,
const JET_COLTYP coltyp )
{
Assert( !Pheader()->FColumnCanBeSeparated() );
Assert( !Pheader()->FSeparated() );
Assert( 1 == itagSequence || 2 == itagSequence );
if ( 1 == itagSequence )
{
// shift second value accordingly
UtilMemMove(
PbData() + pdataToSet->Cb(),
PbData() + CbFirstValue(),
CbSecondValue() );
// copy in updated value
RECCopyData(
PbData(),
pdataToSet,
coltyp );
// update length
*m_pcbFirstValue = TVLENGTH( pdataToSet->Cb() );
}
else
{
RECCopyData(
PbData() + CbFirstValue(),
pdataToSet,
coltyp );
m_cbSecondValue = TVLENGTH( pdataToSet->Cb() );
}
}
VOID TAGFIELDS::ConvertTwoValuesToMultiValues(
TWOVALUES * const ptv,
const DATA * const pdataToSet,
const JET_COLTYP coltyp )
{
TAGFLD_HEADER * const pheader = ptv->Pheader();
const ULONG cbFirstValue = ptv->CbFirstValue();
const ULONG cbSecondValue = ptv->CbSecondValue();
BYTE * const pbData = ptv->PbData();
Assert( !FRECLongValue( coltyp ) );
Assert( !pheader->FSeparated() );
Assert( !pheader->FColumnCanBeSeparated() );
// must be adding a 3rd instance, so make room for the
// appropriate offsets array
UtilMemMove(
pbData + ( 3 * sizeof(MULTIVALUES::MVOFFSET) ) - sizeof(TWOVALUES::TVLENGTH),
pbData,
cbFirstValue + cbSecondValue );
MULTIVALUES::MVOFFSET * const rgmvoffs = (MULTIVALUES::MVOFFSET *)ptv->PcbFirstValue();
rgmvoffs[0] = 3 * sizeof(MULTIVALUES::MVOFFSET);
rgmvoffs[1] = USHORT( rgmvoffs[0] + cbFirstValue );
rgmvoffs[2] = USHORT( rgmvoffs[1] + cbSecondValue );
// copy in new instance
RECCopyData(
(BYTE *)rgmvoffs + rgmvoffs[2],
pdataToSet,
coltyp );
Assert( pheader->FMultiValues() );
pheader->ResetFTwoValues();
}
VOID TAGFIELDS::InsertTagfld(
const ULONG itagfldInsert,
TAGFLD * const ptagfldInsert,
const DATA * const pdataToInsert,
const JET_COLTYP coltyp,
const BOOL fSeparatedLV,
const BOOL fCompressedLV,
const BOOL fEncryptedLV )
{
// if pdataToInsert is NULL, we must be setting a column explicitly
// to NULL to override a default value
Assert( ( ptagfldInsert->FNull( NULL ) && NULL == pdataToInsert )
|| ( !ptagfldInsert->FNull( NULL ) && NULL != pdataToInsert ) );
Assert( itagfldInsert <= CTaggedColumns() );
ULONG cbDataToInsert = pdataToInsert ? pdataToInsert->Cb() : 0;
cbDataToInsert += ptagfldInsert->FExtendedInfo() ? sizeof( TAGFLD_HEADER ) : 0;
ULONG ibTagFld = sizeof( TAGFLD );
if ( itagfldInsert < CTaggedColumns() )
{
const ULONG ibMoveDataFrom = Ptagfld( itagfldInsert )->Ib();
// make room for the new tagged data
BYTE* pbMoveFrom = PbData( itagfldInsert );
UtilMemMove(
pbMoveFrom + sizeof(TAGFLD) + cbDataToInsert,
pbMoveFrom,
PbMax() - pbMoveFrom );
// make room for the new TAGFLD entry
pbMoveFrom = (BYTE *)Ptagfld( itagfldInsert );
UtilMemMove(
pbMoveFrom + sizeof(TAGFLD),
pbMoveFrom,
ibMoveDataFrom - ( itagfldInsert * sizeof(TAGFLD) ) );
// insert at the point we vacated, after making room for the new TAGFLD entry
ibTagFld += ibMoveDataFrom;
}
else if ( CTaggedColumns() > 0 )
{
// append to the end, after making room the new TAGFLD entry
BYTE* pbMoveFrom = PbStartOfTaggedData();
UtilMemMove(
pbMoveFrom + sizeof(TAGFLD),
pbMoveFrom,
CbTaggedData() );
ibTagFld += CbTaggedColumns();
}
// IMPORTANT!!!!
// ptagfldInsert is in small page format, read fNull flag before trash it with SetIb()
//
BOOL fNullVal = ptagfldInsert->FNull( NULL );
Assert( ibTagFld == USHORT( ibTagFld ));
ptagfldInsert->SetIb( USHORT( ibTagFld ) );
// update TAGFIELD members to reflect new TAGFLD
m_cbTaggedColumns += sizeof(TAGFLD) + cbDataToInsert;
m_cTaggedColumns++;
// insert new data
if ( cbDataToInsert > 0 )
{
BYTE * pbInsert = PbTaggedColumns() + ptagfldInsert->Ib();
Assert( pdataToInsert || !FIsSmallPage() );
Assert( !fNullVal || !FIsSmallPage() );
if ( ptagfldInsert->FExtendedInfo() )
{
// reserve byte for extended info
new( (TAGFLD_HEADER *)pbInsert ) TAGFLD_HEADER( coltyp, fSeparatedLV, fNullVal, fCompressedLV, fEncryptedLV );
pbInsert += sizeof( TAGFLD_HEADER );
}
if ( ( ptagfldInsert->FExtendedInfo() ? sizeof( TAGFLD_HEADER ) : 0 ) < cbDataToInsert )
{
RECCopyData(
pbInsert,
pdataToInsert,
coltyp );
}
}
// insert new TAGFLD
UtilMemCpy(
Ptagfld( itagfldInsert ),
ptagfldInsert,
sizeof(TAGFLD) );
Assert( Ptagfld( itagfldInsert )->Ib() >= ( sizeof(TAGFLD) * CTaggedColumns() ) );
Assert( Ptagfld( itagfldInsert )->Ib() <= CbTaggedColumns() );
// just need to update tag array
ULONG itagfld;
for ( itagfld = 0;
itagfld < itagfldInsert;
itagfld++ )
{
Ptagfld( itagfld )->DeltaIb( sizeof(TAGFLD) );
}
for ( itagfld++; // skip inserted TAGFLD
itagfld < CTaggedColumns();
itagfld++ )
{
Ptagfld( itagfld )->DeltaIb( SHORT( sizeof(TAGFLD) + cbDataToInsert ) );
}
}
VOID TAGFIELDS::ResizeTagfld(
const ULONG itagfldResize,
const INT delta )
{
Assert( itagfldResize < CTaggedColumns() );
Assert( 0 != delta );
// shift data after this column as needed
// to collapse space or make room
if ( itagfldResize < CTaggedColumns() - 1 )
{
BYTE * const pbMoveFrom = PbData( itagfldResize+1 );
UtilMemMove(
pbMoveFrom + delta,
pbMoveFrom,
CbTaggedColumns() - Ptagfld( itagfldResize+1 )->Ib() );
}
// update TAGFIELDS members to reflect new data
m_cbTaggedColumns += delta;
// update offsets
for ( ULONG itagfld = itagfldResize+1;
itagfld < CTaggedColumns();
itagfld++ )
{
Ptagfld( itagfld )->DeltaIb( (SHORT)delta );
}
}
VOID TAGFIELDS::ReplaceTagfldData(
const ULONG itagfldReplace,
const DATA * const pdataNew,
const JET_COLTYP coltyp,
const BOOL fSeparatedLV,
const BOOL fCompressedLV,
const BOOL fEncryptedLV )
{
TAGFLD * const ptagfld = Ptagfld( itagfldReplace );
TAGFLD_HEADER * pheader = Pheader( itagfldReplace );
BYTE * pbData = PbData( itagfldReplace );
ptagfld->ResetFNull( this );
if ( NULL != pheader )
{
Assert( CbData( itagfldReplace ) == pdataNew->Cb() + sizeof(TAGFLD_HEADER) );
pbData += sizeof(TAGFLD_HEADER);
if ( fCompressedLV )
{
Assert( pheader->FLongValue() );
pheader->SetFCompressed();
}
else
{
pheader->ResetFCompressed();
}
if ( fEncryptedLV )
{
Assert( pheader->FLongValue() );
pheader->SetFEncrypted();
}
else
{
pheader->ResetFEncrypted();
}
if ( fSeparatedLV )
{
// force fSeparated flag to be set
Assert( pheader->FColumnCanBeSeparated() );
pheader->SetFSeparated();
}
else
{
// reset fSeparated flag
pheader->ResetFSeparated();
}
Assert( ! ( pheader->FSeparated() && pheader->FCompressed() ) );
}
else
{
const BOOL fNeedHeader = ( FRECLongValue( coltyp ) );
if ( fNeedHeader )
{
Assert( CbData( itagfldReplace ) == pdataNew->Cb() + sizeof(TAGFLD_HEADER) );
pheader = (TAGFLD_HEADER *)pbData;
new( pheader ) TAGFLD_HEADER( coltyp, fSeparatedLV, fFalse, fCompressedLV, fEncryptedLV );
ptagfld->SetFExtendedInfo();
pbData += sizeof(TAGFLD_HEADER);
}
else
{
Assert( CbData( itagfldReplace ) == (ULONG)pdataNew->Cb() );
#ifdef UNLIMITED_MULTIVALUES
UNDONE: convert from intrinsic to separated
#else
Assert( !FRECLongValue( coltyp ) );
Assert( !fSeparatedLV );
#endif
}
}
RECCopyData( pbData, pdataNew, coltyp );
}
VOID TAGFIELDS::DeleteTagfld(
const ULONG itagfldDelete )
{
const ULONG cbDataDelete = CbData( itagfldDelete );
Assert( itagfldDelete < CTaggedColumns() );
DeleteEntryAndData(
(BYTE *)Ptagfld( itagfldDelete ),
sizeof(TAGFLD),
cbDataDelete > 0 ? PbData( itagfldDelete ) : NULL,
cbDataDelete,
PbMax() );
// update TAGFLD members to reflect deleted TAGFLD
m_cbTaggedColumns -= sizeof(TAGFLD) + cbDataDelete;
m_cTaggedColumns--;
// update offsets
ULONG itagfld;
for ( itagfld = 0;
itagfld < itagfldDelete;
itagfld++ )
{
const INT cbTagfld = sizeof(TAGFLD);
Ptagfld( itagfld )->DeltaIb( -cbTagfld );
}
Assert( itagfldDelete == itagfld );
for ( ;
itagfld < CTaggedColumns();
itagfld++ )
{
const SHORT cbTagfldAndData = SHORT( sizeof(TAGFLD) + cbDataDelete );
Ptagfld( itagfld )->DeltaIb( SHORT( -cbTagfldAndData ) );
}
}
VOID TAGFIELDS::ConvertToTwoValues(
const ULONG itagfld,
const DATA * const pdataToSet,
const JET_COLTYP coltyp )
{
Assert( itagfld < CTaggedColumns() );
BYTE * const pbData = PbData( itagfld );
const ULONG cbDataCurr = CbData( itagfld );
Assert( cbDataCurr <= JET_cbColumnMost ); // otherwise, it would have been LongText/Binary
Assert( NULL != pdataToSet );
ResizeTagfld(
itagfld,
sizeof(TAGFLD_HEADER) + sizeof(TWOVALUES::TVLENGTH) + pdataToSet->Cb() );
// make room for the header and cbFirstValue
UtilMemMove(
pbData + sizeof(TAGFLD_HEADER) + sizeof(TWOVALUES::TVLENGTH),
pbData,
cbDataCurr );
TAGFLD_HEADER * const pheader = (TAGFLD_HEADER *)pbData;
TWOVALUES::TVLENGTH * const pcbFirstValue = (TWOVALUES::TVLENGTH *)( pbData + sizeof(TAGFLD_HEADER) );
Assert( !FRECLongValue( coltyp ) );
new( pheader ) TAGFLD_HEADER( coltyp, fFalse, fFalse, fFalse, fFalse );
pheader->SetFMultiValues();
pheader->SetFTwoValues();
*pcbFirstValue = (TWOVALUES::TVLENGTH)cbDataCurr;
RECCopyData(
(BYTE *)pcbFirstValue + sizeof(TWOVALUES::TVLENGTH) + cbDataCurr,
pdataToSet,
coltyp );
Assert( CbData( itagfld ) ==
sizeof(TAGFLD_HEADER)
+ sizeof(TWOVALUES::TVLENGTH)
+ cbDataCurr
+ pdataToSet->Cb() );
}
VOID TAGFIELDS::ConvertToMultiValues(
const ULONG itagfld,
const DATA * const pdataToSet,
const BOOL fDataToSetIsSeparated )
{
Assert( itagfld < CTaggedColumns() );
BYTE * const pbData = PbData( itagfld );
const ULONG cbDataCurr = CbData( itagfld );
TAGFLD_HEADER * const pheader = (TAGFLD_HEADER *)pbData;
Assert( cbDataCurr >= sizeof(TAGFLD_HEADER) );
const ULONG cbDataCurrWithoutHeader = cbDataCurr - sizeof(TAGFLD_HEADER);
// must already have a header (ie. LongValue or !g_fSmallPage),
// just upgrade it to MultiValues
Assert( NULL != Pheader( itagfld ) );
Assert( pheader == Pheader( itagfld ) );
Assert( pheader->FColumnCanBeSeparated() || !FIsSmallPage() );
const BOOL fDataCurrIsSeparated = pheader->FSeparated();
pheader->ResetFSeparated();
Assert( !pheader->FMultiValues() );
Assert( !pheader->FTwoValues() );
pheader->SetFMultiValues();
Assert( NULL != pdataToSet );
ResizeTagfld(
itagfld,
( 2 * sizeof(MULTIVALUES::MVOFFSET) ) + pdataToSet->Cb() );
// make room for the offset info
UtilMemMove(
pbData + sizeof(TAGFLD_HEADER) + ( 2 * sizeof(MULTIVALUES::MVOFFSET) ),
pbData + sizeof(TAGFLD_HEADER),
cbDataCurrWithoutHeader );
MULTIVALUES::MVOFFSET * const rgmvoffs = (MULTIVALUES::MVOFFSET *)( pbData + sizeof(TAGFLD_HEADER) );
rgmvoffs[0] = ( 2 * sizeof(MULTIVALUES::MVOFFSET) );
rgmvoffs[1] = USHORT( ( 2 * sizeof(MULTIVALUES::MVOFFSET) )
+ cbDataCurrWithoutHeader );
// must be long values, so no need to do endian conversion
// RECCopyData( (BYTE *)rgmvoffs + rgmvoffs[1], pdataToSet, coltyp );
Assert( !( rgmvoffs[0] & MULTIVALUES::maskFlags ) );
Assert( !( rgmvoffs[1] & MULTIVALUES::maskFlags ) );
UtilMemCpy(
(BYTE *)rgmvoffs + rgmvoffs[1],
pdataToSet->Pv(),
pdataToSet->Cb() );
if ( fDataCurrIsSeparated )
{
Assert( sizeof( _LID64 ) == cbDataCurrWithoutHeader || sizeof( _LID32 ) == cbDataCurrWithoutHeader );
rgmvoffs[0] = USHORT( rgmvoffs[0] | MULTIVALUES::fSeparatedInstance );
}
if ( fDataToSetIsSeparated )
{
Assert( REC::FValidLidRef( *pdataToSet ) );
rgmvoffs[1] = USHORT( rgmvoffs[1] | MULTIVALUES::fSeparatedInstance );
}
Assert( CbData( itagfld ) ==
sizeof(TAGFLD_HEADER)
+ ( 2 * sizeof(MULTIVALUES::MVOFFSET) )
+ cbDataCurrWithoutHeader
+ pdataToSet->Cb() );
}
ULONG TAGFIELDS::CbConvertTwoValuesToSingleValue(
const ULONG itagfld,
const ULONG itagSequence )
{
ULONG cbShrink = 0;
#ifdef DEBUG
Assert( itagfld < CTaggedColumns() );
const TAGFLD_HEADER * const pheader = Pheader( itagfld );
Assert( NULL != pheader );
Assert( pheader->FMultiValues() );
Assert( pheader->FTwoValues() );
Assert( !pheader->FLongValue() );
#ifdef UNLIMITED_MULTIVALUES
#else
Assert( !pheader->FSeparated() );
#endif
#endif
if ( 1 == itagSequence || 2 == itagSequence )
{
BYTE * const pbDataCurr = PbData( itagfld );
const ULONG cbDataCurr = CbData( itagfld );
const BYTE * pbDataRemaining;
ULONG cbDataRemaining;
TWOVALUES tv( pbDataCurr, cbDataCurr );
if ( 1 == itagSequence )
{
// remove first value, retain second value
pbDataRemaining = tv.PbData() + tv.CbFirstValue();
cbDataRemaining = tv.CbSecondValue();
}
else
{
// remove second value, retain first value
pbDataRemaining = tv.PbData();
cbDataRemaining = tv.CbFirstValue();
}
Assert( cbDataCurr > cbDataRemaining );
cbShrink = cbDataCurr - cbDataRemaining;
// move remaining data to beginning of this TAGFLD<
// since we have no more need for the TAGFLD_HEADER
UtilMemMove(
pbDataCurr,
pbDataRemaining,
cbDataRemaining );
// shift rest of columns, if necessary
if ( itagfld < CTaggedColumns() - 1 )
{
const BYTE * const pbDataNextColumn = PbData( itagfld+1 );
UtilMemMove(
pbDataCurr + cbDataRemaining,
pbDataNextColumn,
PbMax() - pbDataNextColumn );
}
// clear flags
Assert( Ptagfld( itagfld )->FExtendedInfo() );
Ptagfld( itagfld )->ResetFExtendedInfo();
// update offsets
for ( ULONG itagfldT = itagfld+1;
itagfldT < CTaggedColumns();
itagfldT++ )
{
const SHORT cbT = (SHORT)cbShrink;
Ptagfld( itagfldT )->DeltaIb( SHORT( -cbT ) );
}
}
else
{
// appending NULL, which is a NOP
}
// update size
m_cbTaggedColumns -= cbShrink;
return cbShrink;
}
ULONG TAGFIELDS::CbDeleteMultiValue(
const ULONG itagfld,
const ULONG itagSequence )
{
ULONG cbShrink = 0;
MULTIVALUES mv( PbData( itagfld ), CbData( itagfld ) );
TAGFLD_HEADER * pheader;
#ifdef DEBUG
Assert( itagfld < CTaggedColumns() );
pheader = Pheader( itagfld );
Assert( NULL != pheader );
Assert( pheader->FMultiValues() );
Assert( !pheader->FTwoValues() );
#endif
if ( itagSequence > 0 && itagSequence <= mv.CMultiValues() )
{
pheader = mv.Pheader();
ULONG cbDataRemaining;
Assert( NULL != pheader );
Assert( sizeof(TAGFLD_HEADER) + mv.CbMultiValues() == CbData( itagfld ) );
if ( mv.CMultiValues() > 2 )
{
mv.RemoveInstance( itagSequence );
cbDataRemaining = sizeof(TAGFLD_HEADER) + mv.CbMultiValues();
}
else
{
// only one value will be left, so convert to non-multivalue
Assert( 1 == itagSequence || 2 == itagSequence );
const ULONG imvRemaining = 2 - itagSequence; // calculate remaining itagSequence and convert to imv
const BOOL fSeparatedInstance = mv.FSeparatedInstance( imvRemaining );
BYTE * const pbMoveFrom = mv.PbData( imvRemaining );
const ULONG cbMove = mv.CbData( imvRemaining );
BYTE * pbMoveTo;
cbDataRemaining = cbMove;
#ifdef UNLIMITED_MULTIVALUES
#else
Assert( !pheader->FSeparated() );
#endif
Assert( !pheader->FTwoValues() );
pheader->ResetFMultiValues();
if ( pheader->FNeedExtendedInfo() )
{
if( 1 == itagSequence )
{
// itagSequence 1 is the only instance that can be compressed in an MV
// if that is being removed, reset the compression flag
pheader->ResetFCompressed();
}
#ifdef UNLIMITED_MULTIVALUES
UNDONE: how to deal with one remaining
multi-value (may or may not be separated)
when the MULTIVALUES structure itself has
been separated?
#else
// if remaining instance is separated,
// must now flag it as such in the
// extended info
Assert( pheader->FColumnCanBeSeparated() || !FIsSmallPage() );
if ( fSeparatedInstance )
pheader->SetFSeparated();
else
{
Assert( !pheader->FSeparated() ); // should already be unset, but better safe than sorry
pheader->ResetFSeparated();
}
#endif
pbMoveTo = (BYTE *)pheader + sizeof(TAGFLD_HEADER);
cbDataRemaining++;
}
else
{
// if no other flags set, then can get rid of header byte
Assert( !fSeparatedInstance );
Ptagfld( itagfld )->ResetFExtendedInfo();
pbMoveTo = (BYTE *)pheader;
}
Assert( pbMoveFrom > pbMoveTo );
UtilMemMove(
pbMoveTo,
pbMoveFrom,
cbMove );
}
Assert( cbDataRemaining < CbData( itagfld ) );
cbShrink = CbData( itagfld ) - cbDataRemaining;
// shift rest of columns, if necessary
if ( itagfld < CTaggedColumns() - 1 )
{
const BYTE * const pbDataNextColumn = PbData( itagfld+1 );
UtilMemMove(
(BYTE *)pheader + cbDataRemaining,
pbDataNextColumn,
PbMax() - pbDataNextColumn );
}
// update size
m_cbTaggedColumns -= cbShrink;
// update offsets
for ( ULONG itagfldT = itagfld+1;
itagfldT < CTaggedColumns();
itagfldT++ )
{
const SHORT cbT = (SHORT)cbShrink;
Ptagfld( itagfldT )->DeltaIb( SHORT( -cbT ) );
}
}
else
{
// appending NULL, which is a NOP
}
return cbShrink;
}
ERR TWOVALUES::ErrCheckUnique(
_In_ const FIELD * const pfield,
_In_ const DATA& dataToSet,
_In_ const ULONG itagSequence,
_In_ const NORM_LOCALE_VER* const pnlv,
_In_ const BOOL fNormalizedDataToSetIsTruncated )
{
ERR err;
const BOOL fNormalize = ( pfieldNil != pfield );
DATA dataRec;
Assert( !fNormalizedDataToSetIsTruncated || fNormalize );
#ifdef UNLIMITED_MULTIVALUES
#else
Assert( !Pheader()->FSeparated() );
#endif
Assert( !Pheader()->FLongValue() );
if ( 1 != itagSequence )
{
dataRec.SetPv( PbData() );
dataRec.SetCb( CbFirstValue() );
CallR( ErrRECICheckUnique(
pfield,
dataToSet,
dataRec,
pnlv,
fNormalizedDataToSetIsTruncated ) );
}
if ( 2 != itagSequence )
{
dataRec.SetPv( PbData() + CbFirstValue() );
dataRec.SetCb( CbSecondValue() );
CallR( ErrRECICheckUnique(
pfield,
dataToSet,
dataRec,
pnlv,
fNormalizedDataToSetIsTruncated ) );
}
return JET_errSuccess;
}
ERR MULTIVALUES::ErrCheckUnique(
_In_ const FIELD * const pfield,
_In_ const DATA& dataToSet,
_In_ const ULONG itagSequence,
_In_ const NORM_LOCALE_VER* pnlv,
_In_ const BOOL fNormalizedDataToSetIsTruncated )
{
ERR err;
const BOOL fNormalize = ( pfieldNil != pfield );
DATA dataRec;
ULONG imv;
Assert( !fNormalizedDataToSetIsTruncated || fNormalize );
#ifdef UNLIMITED_MULTIVALUES
#else
Assert( !Pheader()->FSeparated() );
#endif
Assert( !Pheader()->FLongValue() );
for ( imv = 0; imv < CMultiValues(); imv++ )
{
Assert( !FSeparatedInstance( imv ) );
if ( itagSequence != imv+1 )
{
dataRec.SetPv( PbData( imv ) );
dataRec.SetCb( CbData( imv ) );
CallR( ErrRECICheckUnique(
pfield,
dataToSet,
dataRec,
pnlv,
fNormalizedDataToSetIsTruncated ) );
}
}
return JET_errSuccess;
}
ERR TAGFIELDS::ErrCheckUniqueMultiValues(
_In_ const FIELD * const pfield,
_In_ const DATA& dataToSet,
_In_ const ULONG itagfld,
_In_ const ULONG itagSequence,
_In_ const NORM_LOCALE_VER* const pnlv,
_In_ const BOOL fNormalizedDataToSetIsTruncated )
{
ERR err = JET_errSuccess;
Assert( !fNormalizedDataToSetIsTruncated || pfieldNil != pfield );
Assert( !Ptagfld( itagfld )->FNull( this ) );
const TAGFLD_HEADER * const pheader = Pheader( itagfld );
if ( NULL != pheader
&& pheader->FMultiValues() )
{
Assert( !pheader->FSeparated() );
Assert( !pheader->FLongValue() ); // long values are handled in ErrFLDSetOneColumn()
if ( pheader->FTwoValues() )
{
TWOVALUES tv( PbData( itagfld ), CbData( itagfld ) );
err = tv.ErrCheckUnique(
pfield,
dataToSet,
itagSequence,
pnlv,
fNormalizedDataToSetIsTruncated );
}
else
{
MULTIVALUES mv( PbData( itagfld ), CbData( itagfld ) );
err = mv.ErrCheckUnique(
pfield,
dataToSet,
itagSequence,
pnlv,
fNormalizedDataToSetIsTruncated );
}
}
else if ( 1 != itagSequence )
{
DATA dataRec;
dataRec.SetPv( PbData( itagfld ) );
dataRec.SetCb( CbData( itagfld ) );
err = ErrRECICheckUnique(
pfield,
dataToSet,
dataRec,
pnlv,
fNormalizedDataToSetIsTruncated );
}
else
{
// overwriting the only instance, so no need to check anything
}
return err;
}
ERR TAGFIELDS::ErrCheckUniqueNormalizedMultiValues(
_In_ const FIELD * const pfield,
_In_ const DATA& dataToSet,
_In_ const ULONG itagfld,
_In_ const ULONG itagSequence,
_In_ const NORM_LOCALE_VER* const pnlv )
{
ERR err;
DATA dataToSetNorm;
BYTE rgbNorm[JET_cbKeyMost_OLD];
BOOL fNormalizedDataToSetIsTruncated;
Assert( pfieldNil != pfield );
dataToSetNorm.SetPv( rgbNorm );
CallR( ErrFLDNormalizeTaggedData(
pfield,
dataToSet,
dataToSetNorm,
pnlv,
fFalse, /* GUID collation does not affect uniqueness */
&fNormalizedDataToSetIsTruncated ) );
CallR( ErrCheckUniqueMultiValues(
pfield,
dataToSetNorm,
itagfld,
itagSequence,
pnlv,
fNormalizedDataToSetIsTruncated ) );
return JET_errSuccess;
}
ERR TAGFIELDS::ErrSetColumn(
FUCB * const pfucb,
const FIELD * const pfield,
const COLUMNID columnid,
const ULONG itagSequence,
const DATA * const pdataToSet,
const JET_GRBIT grbit )
{
const FID fid = FidOfColumnid( columnid );
const ULONG cbRec = pfucb->dataWorkBuf.Cb();
const BOOL fUseDerivedBit = ( grbit & grbitSetColumnUseDerivedBit );
const BOOL fEnforceUniqueMultiValues = ( ( grbit & ( JET_bitSetUniqueMultiValues|JET_bitSetUniqueNormalizedMultiValues ) )
&& NULL != pdataToSet
&& !FRECLongValue( pfield->coltyp ) ); // long value uniqueness is checked in ErrFLDSetOneColumn()
const bool fLargePage = !FIsSmallPage();
const ULONG cbTagHeaderDefault = fLargePage ? sizeof( TAGFLD_HEADER ) : 0; // default tag header size for given page size
#ifdef DEBUG
ULONG flagCodePath = 0; // bit field to track code path
#endif
Assert( ptdbNil != pfucb->u.pfcb->Ptdb() );
AssertValid( pfucb->u.pfcb->Ptdb() );
TAGFLD tagfldNew( fid, fUseDerivedBit );
const ULONG itagfld = ItagfldFind( tagfldNew );
Assert( itagfld <= CTaggedColumns() );
Assert( itagfld == CTaggedColumns()
|| !Ptagfld( itagfld )->FNull( this )
|| cbTagHeaderDefault == CbData( itagfld ) ); // If null, length is 0.
const BOOL fExists = ( itagfld < CTaggedColumns()
&& Ptagfld( itagfld )->FIsEqual( fid, fUseDerivedBit ) );
Assert( fExists
|| itagfld == CTaggedColumns()
|| Ptagfld( itagfld )->FIsGreaterThan( fid, fUseDerivedBit ) );
//================================================
// Specified column not found, so insert or append new
//================================================
if ( !fExists )
{
Assert( flagCodePath |= 0x1 );
ULONG cbField = tagfldNew.FExtendedInfo() ? sizeof( TAGFLD_HEADER ) : 0;
// Adding NULL: In most cases, we do nothing. However, there
// is one specialised case where we have to insert a null entry.
// This is the case where there are currently no instances of this
// column in the record, and where there is also a default value
// for this column.
//
if ( pdataToSet == NULL )
{
Assert( flagCodePath |= 0x10 );
if ( FFIELDDefault( pfield->ffield )
&& 1 == itagSequence
&& !( grbit & JET_bitSetRevertToDefaultValue ) )
{
Assert( flagCodePath |= 0x100 );
// INTENTIONALLY treat tagfldNew as small page format,
// so we have an easy place to put fNull flag, later inside InsertTagfld(),
// we will grab fNull flag out before call SetIb() to trash it.
tagfldNew.SetFNull( NULL );
}
else
return JET_errSuccess;
}
// add non-null (in JET term, null != empty, null is an empty empty, or a super empty)
else
{
Assert( flagCodePath |= 0x20 );
cbField += pdataToSet->Cb();
if ( !fLargePage )
{
if ( !tagfldNew.FExtendedInfo() && FRECLongValue( pfield->coltyp ) )
{
cbField += sizeof( TAGFLD_HEADER );
tagfldNew.SetFExtendedInfo();
}
else
{
Assert( cbField <= JET_cbColumnMost );
Assert( !( grbit & grbitSetColumnSeparated ) );
}
}
}
// will column fit?
//
if ( cbRec + sizeof(TAGFLD) + cbField > (SIZE_T)REC::CbRecordMost( pfucb ) )
return ErrERRCheck( JET_errRecordTooBig );
InsertTagfld(
itagfld,
&tagfldNew,
pdataToSet,
pfield->coltyp,
grbit & grbitSetColumnSeparated,
grbit & grbitSetColumnCompressed,
grbit & grbitSetColumnEncrypted );
pfucb->dataWorkBuf.DeltaCb( sizeof(TAGFLD) + cbField );
}
//================================================
// Overwrite with a non-null value.
//================================================
else if ( pdataToSet )
{
#ifdef DEBUG
Assert( flagCodePath |= 0x2 );
Assert( itagfld < CTaggedColumns() );
Assert( Ptagfld( itagfld )->FIsEqual( fid, fUseDerivedBit ) );
if ( Ptagfld( itagfld )->FNull( this ) )
{
Assert( FFIELDDefault( pfield->ffield ) );
Assert( !Ptagfld( itagfld )->FExtendedInfo() || fLargePage );
Assert( cbTagHeaderDefault == CbData( itagfld ) );
}
#endif
if ( fEnforceUniqueMultiValues && !Ptagfld( itagfld )->FNull( this ) )
{
ERR errT;
Assert( !FRECLongValue( pfield->coltyp ) ); // long values are handled in ErrFLDSetOneColumn()
NORM_LOCALE_VER nlv =
{
SORTIDNil, // Sort GUID
PinstFromPfucb( pfucb )->m_dwLCMapFlagsDefault,
0, // NLS Version
0, // NLS Defined Version
L'\0', // Locale name
};
OSStrCbCopyW( &nlv.m_wszLocaleName[0], sizeof(nlv.m_wszLocaleName), PinstFromPfucb( pfucb )->m_wszLocaleNameDefault );
if ( grbit & JET_bitSetUniqueNormalizedMultiValues )
{
errT = ErrCheckUniqueNormalizedMultiValues(
pfield,
*pdataToSet,
itagfld,
itagSequence,
&nlv );
}
else
{
errT = ErrCheckUniqueMultiValues(
pfieldNil, // not normalising, so don't need pfield
*pdataToSet,
itagfld,
itagSequence,
&nlv,
fFalse );
}
if ( errT < 0 )
return errT;
}
Assert( FRECLongValue( pfield->coltyp )
|| pdataToSet->Cb() <= JET_cbColumnMost );
const TAGFLD_HEADER * const pheader = Pheader( itagfld );
//--------------------------------
// overwrite multivalue with non-null
//--------------------------------
if ( NULL != pheader
&& pheader->FMultiValues() )
{
Assert( flagCodePath |= 0x10 );
INT delta = pdataToSet->Cb();
Assert( !pheader->FSeparated() );
//================
// two values
if ( pheader->FTwoValues() )
{
Assert( flagCodePath |= 0x100 );
Assert( !(grbit & grbitSetColumnCompressed) );
Assert( !(grbit & grbitSetColumnEncrypted) );
TWOVALUES tv( PbData( itagfld ), CbData( itagfld ) );
// large page format doesn't use two values
Assert( !fLargePage );
Assert( !pheader->FColumnCanBeSeparated() );
Assert( !( grbit & grbitSetColumnSeparated ) );
if ( 1 == itagSequence || 2 == itagSequence )
{
Assert( flagCodePath |= 0x1000 );
// updating an existing instance
delta -= ( 1 == itagSequence ? tv.CbFirstValue() : tv.CbSecondValue() );
if ( cbRec + delta > (SIZE_T)REC::CbRecordMost( pfucb ) )
return ErrERRCheck( JET_errRecordTooBig );
// if value is growing, must make room for it
// if shrinking, cannot resize until after update is done
if ( delta > 0 )
ResizeTagfld( itagfld, delta );
tv.UpdateInstance(
itagSequence,
pdataToSet,
pfield->coltyp );
if ( delta < 0 )
ResizeTagfld( itagfld, delta );
Assert( sizeof(TAGFLD_HEADER)
+ sizeof(TWOVALUES::TVLENGTH)
+ tv.CbFirstValue()
+ tv.CbSecondValue()
== CbData( itagfld ) );
}
else
{
Assert( flagCodePath |= 0x2000 );
// adding a new instance, so must convert to MULTIVALUES
delta += ( ( 3 * sizeof(MULTIVALUES::MVOFFSET) ) - sizeof(TWOVALUES::TVLENGTH) );
Assert( delta > 0 );
if ( cbRec + delta > (SIZE_T)REC::CbRecordMost( pfucb ) )
return ErrERRCheck( JET_errRecordTooBig );
// first make room for the new data
ResizeTagfld( itagfld, delta );
ConvertTwoValuesToMultiValues(
&tv,
pdataToSet,
pfield->coltyp );
}
}
//================
// multivalues
else
{
Assert( flagCodePath |= 0x200 );
MULTIVALUES mv( PbData( itagfld ), CbData( itagfld ) );
Assert( !(grbit & grbitSetColumnEncrypted) );
if ( 1 != itagSequence && ( grbit & grbitSetColumnCompressed ) )
{
// only itag 1 can be compressed
return ErrERRCheck( errRECCompressionNotPossible );
}
if ( 0 == itagSequence || itagSequence > mv.CMultiValues() )
{
Assert( flagCodePath |= 0x1000 );
// adding a new instance
delta += sizeof(MULTIVALUES::MVOFFSET);
Assert( delta > 0 );
if ( cbRec + delta > (SIZE_T)REC::CbRecordMost( pfucb ) )
return ErrERRCheck( JET_errRecordTooBig );
ResizeTagfld( itagfld, delta );
mv.AddInstance(
pdataToSet,
pfield->coltyp,
grbit & grbitSetColumnSeparated );
}
else
{
Assert( flagCodePath |= 0x2000 );
// updating an existing instance
delta -= mv.CbData( itagSequence-1 );
if ( cbRec + delta > (SIZE_T)REC::CbRecordMost( pfucb ) )
return ErrERRCheck( JET_errRecordTooBig );
// if value is growing, must make room for it
// if shrinking, cannot resize until after update is done
if ( delta > 0 )
ResizeTagfld( itagfld, delta );
mv.UpdateInstance(
itagSequence,
pdataToSet,
pfield->coltyp,
grbit & grbitSetColumnSeparated,
grbit & grbitSetColumnCompressed );
if ( delta < 0 )
ResizeTagfld( itagfld, delta );
}
Assert( sizeof(TAGFLD_HEADER) + mv.CbMultiValues() == CbData( itagfld ) );
}
pfucb->dataWorkBuf.DeltaCb( delta );
}
//--------------------------------
// overwrite single value or turn null to non-null
//--------------------------------
else if ( 1 == itagSequence || Ptagfld( itagfld )->FNull( this ) )
{
Assert( flagCodePath |= 0x20 );
// overwrite with non-NULL value: have to shift record data
// Compute change in column size.
//
const ULONG cbTagField = CbData( itagfld );
INT dbFieldData = pdataToSet->Cb() + cbTagHeaderDefault - cbTagField;
if ( !fLargePage )
{
if ( FRECLongValue( pfield->coltyp ) )
{
// need header byte
dbFieldData += sizeof(TAGFLD_HEADER);
}
else
{
Assert( !Ptagfld( itagfld )->FExtendedInfo() );
Assert( NULL == pheader );
}
}
if ( cbRec + dbFieldData > (SIZE_T)REC::CbRecordMost( pfucb ) )
return ErrERRCheck( JET_errRecordTooBig );
// this column will no longer be NULL
Ptagfld( itagfld )->ResetFNull( this );
if ( 0 != dbFieldData )
ResizeTagfld( itagfld, dbFieldData );
ReplaceTagfldData(
itagfld,
pdataToSet,
pfield->coltyp,
grbit & grbitSetColumnSeparated,
grbit & grbitSetColumnCompressed,
grbit & grbitSetColumnEncrypted );
pfucb->dataWorkBuf.DeltaCb( dbFieldData );
}
//--------------------------------
// add a 2nd value
//--------------------------------
else
{
Assert( !(grbit & grbitSetColumnEncrypted) );
if ( grbit & grbitSetColumnCompressed )
{
// only itag 1 can be compressed
return ErrERRCheck( errRECCompressionNotPossible );
}
Assert( flagCodePath |= 0x30 );
ULONG cbGrow;
// adding a second instance, so must convert to TWOVALUES/MULTIVALUES
// tag column in large page ALWAYS turns into multivalues
if ( NULL != pheader )
{
Assert( flagCodePath |= 0x100 );
Assert( FRECLongValue( pfield->coltyp ) || fLargePage );
Assert( pheader->FColumnCanBeSeparated() || fLargePage );
cbGrow = ( 2 * sizeof(MULTIVALUES::MVOFFSET) )
+ pdataToSet->Cb();
if ( cbRec + cbGrow > (SIZE_T)REC::CbRecordMost( pfucb ) )
return ErrERRCheck( JET_errRecordTooBig );
// first make room for new data
ConvertToMultiValues(
itagfld,
pdataToSet,
grbit & grbitSetColumnSeparated );
}
else
{
Assert( flagCodePath |= 0x200 );
Assert ( !fLargePage );
Assert( !FRECLongValue( pfield->coltyp ) );
cbGrow = sizeof(TAGFLD_HEADER)
+ sizeof(TWOVALUES::TVLENGTH)
+ pdataToSet->Cb();
if ( cbRec + cbGrow > (SIZE_T)REC::CbRecordMost( pfucb ) )
return ErrERRCheck( JET_errRecordTooBig );
// first make room for new data
ConvertToTwoValues(
itagfld,
pdataToSet,
pfield->coltyp );
Assert( !Ptagfld( itagfld )->FExtendedInfo() );
Ptagfld( itagfld )->SetFExtendedInfo();
}
pfucb->dataWorkBuf.DeltaCb( cbGrow );
}
}
//================================================
// Overwriting non-null with null
//================================================
else if ( !Ptagfld( itagfld )->FNull( this ) )
{
Assert( flagCodePath |= 0x3 );
// Ensure that we've found a field
//
Assert( itagfld < CTaggedColumns() );
Assert( Ptagfld( itagfld )->FIsEqual( fid, fUseDerivedBit ) );
TAGFLD_HEADER* const pheader = Pheader( itagfld );
//--------------------------------
// Delete one from multivalues
//--------------------------------
if ( NULL != pheader
&& pheader->FMultiValues() )
{
Assert( flagCodePath |= 0x10 );
const ULONG cbDataOld = CbData( itagfld );
const ULONG cbShrink = ( pheader->FTwoValues() ?
CbConvertTwoValuesToSingleValue( itagfld, itagSequence ) :
CbDeleteMultiValue( itagfld, itagSequence ) );
Assert( cbShrink < cbDataOld );
pfucb->dataWorkBuf.DeltaCb( 0 - cbShrink );
}
//--------------------------------
// Overwrite single value with NULL
//--------------------------------
else if ( 1 == itagSequence )
{
Assert( flagCodePath |= 0x20 );
// Overwrite with NULL: In most cases, just delete the occurrence from
// the record. However, there is one rare case where we have to
// leave behind a null entry. This is the case where there are no
// other instances of this column for this record, and where this
// column has a default value.
const ULONG cbTagField = CbData( itagfld );
if ( FFIELDDefault( pfield->ffield )
&& !( grbit & JET_bitSetRevertToDefaultValue ) )
{
Assert( flagCodePath |= 0x100 );
// leave a null entry
const ULONG cbTagFieldDiff = cbTagField - cbTagHeaderDefault;
if ( cbTagFieldDiff > 0 )
ResizeTagfld( itagfld, 0 - cbTagFieldDiff );
if ( fLargePage )
{
pheader->ResetFSeparated();
}
else
{
Ptagfld( itagfld )->ResetFExtendedInfo();
}
Ptagfld( itagfld )->SetFNull( this );
pfucb->dataWorkBuf.DeltaCb( 0 - cbTagFieldDiff );
}
else
{
Assert( flagCodePath |= 0x200 );
// delete everything
DeleteTagfld( itagfld );
pfucb->dataWorkBuf.DeltaCb( 0 - ( sizeof(TAGFLD) + cbTagField ) );
}
}
//--------------------------------
// appending NULL, which is a NOP
//--------------------------------
else
{
Assert( flagCodePath |= 0x30 );
}
}
//================================================
// Overwriting null with null. Either revert to default or do nothing.
//================================================
else
{
Assert( flagCodePath |= 0x4 );
Assert( itagfld < CTaggedColumns() );
Assert( Ptagfld( itagfld )->FIsEqual( fid, fUseDerivedBit ) );
Assert( FFIELDDefault( pfield->ffield ) );
Assert( !Ptagfld( itagfld )->FExtendedInfo() || fLargePage );
Assert( cbTagHeaderDefault == CbData( itagfld ) );
if ( grbit & JET_bitSetRevertToDefaultValue )
{
Assert( flagCodePath |= 0x10 );
DeleteTagfld( itagfld );
pfucb->dataWorkBuf.DeltaCb( INT( 0 - sizeof(TAGFLD) - cbTagHeaderDefault ) );
}
}
#ifdef DEBUG
//================================
// validate this column when it is not deleted
if ( 0x14 != flagCodePath && 0x223 != flagCodePath )
{
const TAGFLD* const ptagfld = Ptagfld( itagfld );
const TAGFLD_HEADER* const pheader = Pheader( itagfld );
// tag column in large page MUST have TAGFLD_HEADER
Assert( FIsSmallPage() || pheader );
if ( pheader )
{
// column type MUST be consistent with fLongValue flag
Assert( !!FRECLongValue( pfield->coltyp ) == !!pheader->FLongValue() );
}
else
{
// without header, long value column must be NULL
Assert( !FRECLongValue( pfield->coltyp ) || ptagfld->FNull( NULL ) );
}
}
//================================
// validate whole record
const REC * prec = (REC *)pfucb->dataWorkBuf.Pv();
const BYTE * pbRecMax = (BYTE *)prec + pfucb->dataWorkBuf.Cb();
const BYTE * pbStartOfTaggedColumns = prec->PbTaggedData();
Assert( pbStartOfTaggedColumns <= pbRecMax );
Assert( (BYTE *)Rgtagfld() == pbStartOfTaggedColumns );
Assert( pbStartOfTaggedColumns + CbTaggedColumns() == pbRecMax );
AssertValid( pfucb->u.pfcb->Ptdb() );
AssertValidTagColumns( pfucb );
#endif
return JET_errSuccess;
}
ERR TAGFIELDS::ErrRetrieveColumn(
FCB * const pfcb,
const COLUMNID columnid,
const ULONG itagSequence,
const DATA& dataRec,
DATA * const pdataRetrieveBuffer,
const ULONG grbit )
{
const TDB * const ptdb = ( pfcbNil == pfcb ? ptdbNil : pfcb->Ptdb() );
const FID fid = FidOfColumnid( columnid );
const BOOL fUseDerivedBit = ( grbit & grbitRetrieveColumnUseDerivedBit );
ULONG itagfld;
#ifdef DEBUG
const BOOL fUseDMLLatchDBG = ( fid > ptdb->FidTaggedLastInitial()
&& ( grbit & grbitRetrieveColumnDDLNotLocked ) );
if ( pfcbNil == pfcb )
{
// don't need any meta data info if we're not retrieving default values
Assert( grbit & JET_bitRetrieveIgnoreDefault );
}
else
{
Assert( ptdbNil != ptdb );
Assert( fid >= ptdb->FidTaggedFirst() );
Assert( fid <= ptdb->FidTaggedLast() );
AssertValid( ptdb );
// RECIAccessColumn() should have already been called to verify FID.
if ( fUseDMLLatchDBG )
pfcb->EnterDML();
Assert( fid <= ptdb->FidTaggedLast() );
Assert( JET_coltypNil != ptdb->PfieldTagged( columnid )->coltyp );
if ( fUseDMLLatchDBG )
pfcb->LeaveDML();
}
#endif
itagfld = ItagfldFind( TAGFLD( fid, fUseDerivedBit ) );
Assert( itagfld <= CTaggedColumns() );
Assert( itagfld == CTaggedColumns()
|| !Ptagfld( itagfld )->FNull( this )
|| ( FIsSmallPage() ? 0 : sizeof( TAGFLD_HEADER ) ) == CbData( itagfld ) ); // If null, length is 0.
if ( itagfld < CTaggedColumns()
&& Ptagfld( itagfld )->FIsEqual( fid, fUseDerivedBit ) )
{
const TAGFLD_HEADER * const pheader = Pheader( itagfld );
if ( Ptagfld( itagfld )->FNull( this ) )
{
if ( FIsSmallPage() )
{
Assert( !Ptagfld( itagfld )->FExtendedInfo() );
Assert( NULL == pheader );
Assert( 0 == CbData( itagfld ) );
}
#ifdef DEBUG
if ( pfcbNil != pfcb )
{
if ( fUseDMLLatchDBG )
pfcb->EnterDML();
Assert( FFIELDDefault( ptdb->PfieldTagged( columnid )->ffield ) );
if ( fUseDMLLatchDBG )
pfcb->LeaveDML();
}
#endif
}
else if ( NULL != pheader
&& pheader->FMultiValues() )
{
Assert( Ptagfld( itagfld )->FExtendedInfo() );
Assert( itagSequence > 0 );
if ( pheader->FTwoValues() )
{
if ( itagSequence <= 2 )
{
TWOVALUES tv( PbData( itagfld ), CbData( itagfld ) );
tv.RetrieveInstance( itagSequence, pdataRetrieveBuffer );
return JET_errSuccess;
}
}
else
{
MULTIVALUES mv( PbData( itagfld ), CbData( itagfld ) );
if ( itagSequence <= mv.CMultiValues() )
{
return mv.ErrRetrieveInstance( itagSequence, pdataRetrieveBuffer );
}
}
// If we reached here, our desired occurrence is not in the
// record. Fall through to NullField.
}
else if ( 1 == itagSequence )
{
pdataRetrieveBuffer->SetPv( PbData( itagfld ) );
pdataRetrieveBuffer->SetCb( CbData( itagfld ) );
if ( NULL != pheader )
{
Assert( Ptagfld( itagfld )->FExtendedInfo() );
const INT iDelta = sizeof(TAGFLD_HEADER);
pdataRetrieveBuffer->DeltaPv( iDelta );
pdataRetrieveBuffer->DeltaCb( -iDelta );
return pheader->ErrRetrievalResult();
}
else
{
return JET_errSuccess;
}
}
else
{
// non-existent itagSequence, so return NULL
}
}
else if ( !( grbit & JET_bitRetrieveIgnoreDefault ) && 1 == itagSequence && ptdb->FTableHasNonEscrowDefault() )
{
Assert( pfcbNil != pfcb );
Assert( ptdbNil != ptdb );
const BOOL fUseDMLLatch = ( FidOfColumnid( columnid ) > ptdb->FidTaggedLastInitial()
&& ( grbit & grbitRetrieveColumnDDLNotLocked ) );
if ( fUseDMLLatch )
pfcb->EnterDML();
// assert no infinite recursion
Assert( dataRec.Pv() != ptdb->PdataDefaultRecord() );
const FIELDFLAG ffield = ptdb->PfieldTagged( columnid )->ffield;
if ( FFIELDUserDefinedDefault( ffield ) )
{
Assert( FFIELDDefault( ffield ) );
// no occurrrences found, but a user-defined default value
// exists and we are retrieving first occcurence.
if ( fUseDMLLatch )
pfcb->LeaveDML();
pdataRetrieveBuffer->Nullify();
return ErrERRCheck( wrnRECUserDefinedDefault );
}
else if ( FFIELDDefault( ffield ) )
{
// no occurrrences found, but a default value exists and
// we are retrieving first occcurence.
const ERR errT = ErrRECIRetrieveTaggedDefaultValue(
pfcb,
columnid,
pdataRetrieveBuffer );
Assert( wrnRECCompressed != errT );
if ( fUseDMLLatch )
pfcb->LeaveDML();
return errT;
}
if ( fUseDMLLatch )
pfcb->LeaveDML();
}
// null column common exit point
//
pdataRetrieveBuffer->Nullify();
return ErrERRCheck( JET_wrnColumnNull );
}
ULONG TAGFIELDS::UlColumnInstances(
FCB * const pfcb,
const COLUMNID columnid,
const BOOL fUseDerivedBit )
{
const FID fid = FidOfColumnid( columnid );
ULONG itagfld;
Assert( pfcbNil != pfcb );
const TDB * const ptdb = pfcb->Ptdb();
Assert( ptdbNil != ptdb );
#ifdef DEBUG
const BOOL fUseDMLLatchDBG = ( fid > ptdb->FidTaggedLastInitial() );
if ( fUseDMLLatchDBG )
pfcb->EnterDML();
// RECIAccessColumn() should have already been called to verify FID.
Assert( fid >= ptdb->FidTaggedFirst() );
Assert( fid <= ptdb->FidTaggedLast() );
AssertValid( ptdb );
const FIELD *pfield = ptdb->PfieldTagged( columnid );
Assert( pfieldNil != pfield );
Assert( JET_coltypNil != pfield->coltyp );
const BOOL fDefaultValue = FFIELDDefault( pfield->ffield );
if ( fUseDMLLatchDBG )
pfcb->LeaveDML();
#endif
ULONG ulInstances = 0;
itagfld = ItagfldFind( TAGFLD( fid, fUseDerivedBit ) );
Assert( itagfld <= CTaggedColumns() );
Assert( itagfld == CTaggedColumns()
|| !Ptagfld( itagfld )->FNull( this )
|| ( FIsSmallPage() ? 0 : sizeof( TAGFLD_HEADER ) ) == CbData( itagfld ) ); // If null, length is 0.
if ( itagfld < CTaggedColumns()
&& Ptagfld( itagfld )->FIsEqual( fid, fUseDerivedBit ) )
{
const TAGFLD_HEADER * const pheader = Pheader( itagfld );
if ( Ptagfld( itagfld )->FNull( this ) )
{
if ( FIsSmallPage() )
{
Assert( !Ptagfld( itagfld )->FExtendedInfo() );
Assert( NULL == pheader );
Assert( 0 == CbData( itagfld ) );
}
Assert( 0 == ulInstances );
Assert( fDefaultValue );
}
else if ( NULL != pheader
&& pheader->FMultiValues() )
{
Assert( Ptagfld( itagfld )->FExtendedInfo() );
if ( pheader->FTwoValues() )
{
Assert( !pheader->FColumnCanBeSeparated() );
ulInstances = 2;
}
else
{
MULTIVALUES mv( PbData( itagfld ), CbData( itagfld ) );
ulInstances = mv.CMultiValues();
}
}
else
{
ulInstances = 1;
}
}
else
{
const BOOL fUseDMLLatch = ( fid > ptdb->FidTaggedLastInitial() );
if ( fUseDMLLatch )
pfcb->EnterDML();
if ( FFIELDDefault( pfcb->Ptdb()->PfieldTagged( columnid )->ffield ) )
{
// no occurrrences found, but a default value exists
//
ulInstances = 1;
Assert( fDefaultValue );
}
else
{
Assert( 0 == ulInstances );
Assert( !fDefaultValue );
}
if ( fUseDMLLatch )
pfcb->LeaveDML();
}
return ulInstances;
}
ERR TAGFIELDS::ErrScan(
FUCB * const pfucb,
const ULONG itagSequence,
const DATA& dataRec,
DATA * const pdataField,
COLUMNID * const pcolumnidRetrieved,
ULONG * const pitagSequenceRetrieved,
BOOL * const pfEncrypted,
const JET_GRBIT grbit )
{
ERR err;
FCB * const pfcb = pfucb->u.pfcb;
const BOOL fRetrieveNulls = ( grbit & JET_bitRetrieveNull );
const BOOL fRefreshNeeded = ( dataRec.Pv() == pfucb->kdfCurr.data.Pv() );
if ( fRefreshNeeded )
{
Assert( dataRec.Cb() == pfucb->kdfCurr.data.Cb() );
// Only need to refresh ptagfld if we're accessing the database,
// in which case we must have the page latched. Note that we
// may have a page latched, but we may not be accessing the
// the database (ie. no refresh needed).
Assert( Pcsr( pfucb )->FLatched() );
}
// Verify we're in a transaction in order to ensure read-consistency
// in case we have a page latch and need to release it while scanning/
Assert( pfucb->ppib->Level() > 0 );
Assert( pfcb != pfcbNil );
Assert( pdataField != NULL );
Assert( pcolumnidRetrieved != NULL );
// If itagSequence == 0, then we're counting the number of tagged columns
// in the record and we will output the result in pitagSequenceRetrieved.
Assert( itagSequence != 0 || pitagSequenceRetrieved != NULL );
Assert( pfcb->Ptdb() != ptdbNil );
const TDB * const ptdb = pfcb->Ptdb();
AssertValid( ptdb );
const BOOL fRetrieveDefaults = ( !( grbit & JET_bitRetrieveIgnoreDefault )
&& ptdb->FTableHasNonEscrowDefault() );
ULONG itagfld;
ULONG ulNumOccurrences = 0;
COLUMNID columnidCurr = ColumnidRECFirstTaggedForScan( ptdb );
for ( itagfld = 0; itagfld < CTaggedColumns(); itagfld++ )
{
Assert( FCOLUMNIDTagged( columnidCurr ) );
#ifdef DEBUG
#else
if ( g_fRepair )
#endif
{
const TAGFLD * ptagfldT = Ptagfld( itagfld );
BOOL fBadColumn;
if ( ptagfldT->FDerived() )
{
const FCB * const pfcbTemplate = ptdb->PfcbTemplateTable();
fBadColumn = ( pfcbNil == pfcbTemplate
|| ptagfldT->Fid() > pfcbTemplate->Ptdb()->FidTaggedLast()
|| ptagfldT->Fid() < pfcbTemplate->Ptdb()->FidTaggedFirst() );
}
else
{
fBadColumn = ( ptagfldT->Fid() > ptdb->FidTaggedLast()
|| ptagfldT->Fid() < ptdb->FidTaggedFirst() );
}
if ( fBadColumn )
{
// log event
//
FireWall( "BadTaggedFieldId" );
UtilReportEvent( eventWarning, REPAIR_CATEGORY, REPAIR_BAD_COLUMN_ID, 0, NULL );
break;
}
}
// Check for any "gaps" caused by default values (if we
// want default values retrieved).
if ( fRetrieveDefaults )
{
// make copy of tagfld so we don't have to worry about losing the page
// latch on column access check
const TAGFLD * ptagfldT = Ptagfld( itagfld );
TAGFLD tagfldT( ptagfldT->Fid(), ptagfldT->FDerived() );
for( ;
tagfldT.FIsGreaterThan( columnidCurr, ptdb );
columnidCurr = ColumnidRECNextTaggedForScan( ptdb, columnidCurr ) )
{
FCB *pfcbT = pfcb;
Assert( ulNumOccurrences < itagSequence || 0 == itagSequence );
if ( FCOLUMNIDTemplateColumn( columnidCurr ) )
{
if ( pfcbNil != ptdb->PfcbTemplateTable() )
{
ptdb->AssertValidDerivedTable();
pfcbT = ptdb->PfcbTemplateTable();
}
else
{
ptdb->AssertValidTemplateTable();
}
}
else
{
err = ErrRECIAccessColumn( pfucb, columnidCurr, NULL, pfEncrypted );
if ( err < 0 )
{
if ( JET_errColumnNotFound == err )
continue;
return err;
}
}
const TDB * const ptdbT = pfcbT->Ptdb();
const BOOL fUseDMLLatch = ( FidOfColumnid( columnidCurr ) > ptdbT->FidTaggedLastInitial() );
if ( fUseDMLLatch )
pfcbT->EnterDML();
Assert( JET_coltypNil != ptdbT->PfieldTagged( columnidCurr )->coltyp );
const FIELDFLAG ffield = ptdbT->PfieldTagged( columnidCurr )->ffield;
if ( FFIELDUserDefinedDefault( ffield ) )
{
if ( ++ulNumOccurrences == itagSequence )
{
Assert( itagSequence != 0 );
*pcolumnidRetrieved = columnidCurr;
if ( pitagSequenceRetrieved != NULL )
*pitagSequenceRetrieved = 1;
// assert no infinite recursion
Assert( dataRec.Pv() != ptdbT->PdataDefaultRecord() );
if ( fUseDMLLatch )
pfcbT->LeaveDML();
pdataField->Nullify();
return ErrERRCheck( wrnRECUserDefinedDefault );
}
}
else if ( FFIELDDefault( ffield ) )
{
if ( ++ulNumOccurrences == itagSequence )
{
Assert( itagSequence != 0 );
*pcolumnidRetrieved = columnidCurr;
if ( pitagSequenceRetrieved != NULL )
*pitagSequenceRetrieved = 1;
// assert no infinite recursion
Assert( dataRec.Pv() != ptdbT->PdataDefaultRecord() );
err = ErrRECIRetrieveTaggedDefaultValue( pfcbT, columnidCurr, pdataField );
Assert( wrnRECCompressed != err );
if ( fUseDMLLatch )
pfcbT->LeaveDML();
return err;
}
}
if ( fUseDMLLatch )
pfcbT->LeaveDML();
}
Assert( tagfldT.FIsEqual( columnidCurr, ptdb ) );
}
else
{
columnidCurr = Ptagfld( itagfld )->Columnid( ptdb );
}
if ( FCOLUMNIDTemplateColumn( columnidCurr ) )
{
#ifdef DEBUG
DATA dataSav;
if ( fRefreshNeeded )
{
dataSav.SetPv( pfucb->kdfCurr.data.Pv() );
dataSav.SetCb( pfucb->kdfCurr.data.Cb() );
}
CallS( ErrRECIAccessColumn( pfucb, columnidCurr, NULL, pfEncrypted ) );
if ( fRefreshNeeded )
{
// verify pointers didn't change - we should not lose latch
// because we shouldn't have to consult catalog
Assert( pfucb->kdfCurr.data == dataSav );
}
#endif
// template column obtained from either TDB or from record,
// so it must exist
err = JET_errSuccess;
}
else
{
err = ErrRECIAccessColumn( pfucb, columnidCurr, NULL, pfEncrypted );
if ( err < 0 && JET_errColumnNotFound != err )
return err;
if ( fRefreshNeeded )
{
// We may have invalidated our pointer if we had to give
// up the latch. Force refresh.
Refresh( pfucb->kdfCurr.data );
}
}
Assert( Ptagfld( itagfld )->FIsEqual( columnidCurr, ptdb ) );
CallSx( err, JET_errColumnNotFound );
if ( JET_errColumnNotFound == err )
{
// Column not visible to this session. Skip to next one.
}
else if ( Ptagfld( itagfld )->FNull( this ) )
{
// If there's an explicit null entry, it should be the only
// occurrence of this fid. Also the only reason for an explict
// null entry is to override a default value.
Assert( !Ptagfld( itagfld )->FExtendedInfo() || !FIsSmallPage() );
Assert( ( FIsSmallPage() ? 0 : sizeof( TAGFLD_HEADER ) ) == CbData( itagfld ) );
Assert( ulNumOccurrences < itagSequence || 0 == itagSequence );
#ifdef DEBUG
pfcb->EnterDML();
Assert( FFIELDDefault( ptdb->PfieldTagged( columnidCurr )->ffield ) );
pfcb->LeaveDML();
#endif
// Only count columns explicitly set to null if the RetrieveNulls
// flag is passed. Otherwise, just skip it.
if ( fRetrieveNulls && ++ulNumOccurrences == itagSequence )
{
Assert( itagSequence != 0 );
*pcolumnidRetrieved = columnidCurr;
if ( pitagSequenceRetrieved != NULL )
*pitagSequenceRetrieved = 1;
pdataField->Nullify();
return ErrERRCheck( JET_wrnColumnSetNull );
}
}
else
{
Assert( ulNumOccurrences < itagSequence || 0 == itagSequence );
const TAGFLD_HEADER * const pheader = Pheader( itagfld );
if ( NULL != pheader
&& pheader->FMultiValues() )
{
const ULONG itagSequenceToRetrieve = ( 0 == itagSequence ? 0 : itagSequence - ulNumOccurrences );
if ( pheader->FTwoValues() )
{
ulNumOccurrences += 2;
if ( 1 == itagSequenceToRetrieve
|| 2 == itagSequenceToRetrieve )
{
*pcolumnidRetrieved = columnidCurr;
if ( NULL != pitagSequenceRetrieved )
*pitagSequenceRetrieved = itagSequenceToRetrieve;
TWOVALUES tv( PbData( itagfld ), CbData( itagfld ) );
tv.RetrieveInstance( itagSequenceToRetrieve, pdataField );
return JET_errSuccess;
}
}
else
{
MULTIVALUES mv( PbData( itagfld ), CbData( itagfld ) );
ulNumOccurrences += mv.CMultiValues();
if ( itagSequenceToRetrieve > 0
&& itagSequenceToRetrieve <= mv.CMultiValues() )
{
*pcolumnidRetrieved = columnidCurr;
if ( NULL != pitagSequenceRetrieved )
*pitagSequenceRetrieved = itagSequenceToRetrieve;
return mv.ErrRetrieveInstance( itagSequenceToRetrieve, pdataField );
}
}
}
else if ( ++ulNumOccurrences == itagSequence )
{
Assert( 0 != itagSequence );
pdataField->SetCb( CbData( itagfld ) );
pdataField->SetPv( PbData( itagfld ) );
if ( NULL != pheader )
{
Assert( Ptagfld( itagfld )->FExtendedInfo() );
const INT iDelta = sizeof(TAGFLD_HEADER);
pdataField->DeltaPv( iDelta );
pdataField->DeltaCb( -iDelta );
}
*pcolumnidRetrieved = columnidCurr;
if ( pitagSequenceRetrieved != NULL )
*pitagSequenceRetrieved = 1;
return ( NULL == pheader ? JET_errSuccess : pheader->ErrRetrievalResult() );
}
}
// if we got here, we haven't found the instance we're looking for
Assert( ulNumOccurrences < itagSequence || 0 == itagSequence );
columnidCurr = ColumnidRECNextTaggedForScan( ptdb, columnidCurr );
} // while ( itagfld < CTaggedColumns() )
if ( fRetrieveDefaults )
{
// Take snapshot of FidTaggedLast. Even if someone's adding
// columns, we don't have access to it anyways.
FID fidTaggedLast = ptdb->FidTaggedLast();
for( ;
( FCOLUMNIDTemplateColumn( columnidCurr ) && !ptdb->FTemplateTable() ) || FidOfColumnid( columnidCurr ) <= fidTaggedLast;
columnidCurr = ColumnidRECNextTaggedForScan( ptdb, columnidCurr ) )
{
FCB *pfcbT = pfcb;
Assert( ulNumOccurrences < itagSequence || 0 == itagSequence );
if ( FCOLUMNIDTemplateColumn( columnidCurr ) )
{
if ( pfcbNil != ptdb->PfcbTemplateTable() )
{
ptdb->AssertValidDerivedTable();
pfcbT = ptdb->PfcbTemplateTable();
}
else
{
ptdb->AssertValidTemplateTable();
}
}
else
{
err = ErrRECIAccessColumn( pfucb, columnidCurr, NULL, pfEncrypted );
if ( err < 0 )
{
if ( JET_errColumnNotFound == err )
continue;
return err;
}
}
const TDB * const ptdbT = pfcbT->Ptdb();
const BOOL fUseDMLLatch = ( FidOfColumnid( columnidCurr ) > ptdbT->FidTaggedLastInitial() );
if ( fUseDMLLatch )
pfcbT->EnterDML();
Assert( JET_coltypNil != ptdbT->PfieldTagged( columnidCurr )->coltyp );
const FIELDFLAG ffield = ptdbT->PfieldTagged( columnidCurr )->ffield;
if ( FFIELDUserDefinedDefault( ffield ) )
{
if ( ++ulNumOccurrences == itagSequence )
{
Assert( itagSequence != 0 );
*pcolumnidRetrieved = columnidCurr;
if ( pitagSequenceRetrieved != NULL )
*pitagSequenceRetrieved = 1;
// assert no infinite recursion
Assert( dataRec.Pv() != ptdbT->PdataDefaultRecord() );
if ( fUseDMLLatch )
pfcbT->LeaveDML();
pdataField->Nullify();
return ErrERRCheck( wrnRECUserDefinedDefault );
}
}
else if ( FFIELDDefault( ffield ) )
{
if ( ++ulNumOccurrences == itagSequence )
{
Assert( itagSequence != 0 );
*pcolumnidRetrieved = columnidCurr;
if ( pitagSequenceRetrieved != NULL )
*pitagSequenceRetrieved = 1;
// assert no infinite recursion
Assert( dataRec.Pv() != ptdbT->PdataDefaultRecord() );
err = ErrRECIRetrieveTaggedDefaultValue( pfcbT, columnidCurr, pdataField );
Assert( wrnRECCompressed != err );
if ( fUseDMLLatch )
pfcbT->LeaveDML();
return err;
}
}
if ( fUseDMLLatch )
pfcbT->LeaveDML();
}
}
// If we reached here, no more tagged columns.
*pcolumnidRetrieved = 0;
if ( pitagSequenceRetrieved != NULL )
*pitagSequenceRetrieved = ( itagSequence == 0 ? ulNumOccurrences : 0 );
// null column common exit point
//
pdataField->Nullify();
return ErrERRCheck( JET_wrnColumnNull );
}
ERR TAGFIELDS::ErrAffectLongValuesInWorkBuf(
FUCB * const pfucb,
const LVAFFECT lvaffect,
const ULONG cbThreshold )
{
ERR err = JET_errSuccess;
TDB * const ptdb = pfucb->u.pfcb->Ptdb();
ULONG itagfld = 0;
BYTE * pbDataDecrypted = NULL;
#ifdef DEBUG
const ULONG cTaggedColumns = CTaggedColumns(); // snapshot original count for debugging
const REC * prec = (REC *)( pfucb->dataWorkBuf.Pv() );
Unused( cTaggedColumns );
Unused( prec );
Assert( prec->PbTaggedData() == (BYTE *)m_rgtagfld );
#endif
Assert( ptdbNil != ptdb );
AssertValid( ptdb );
Assert( !Pcsr( pfucb )->FLatched() );
Assert( cbThreshold < (ULONG)g_cbPage );
Assert( cbThreshold >= (ULONG)LvId::CbLidFromCurrFormat( pfucb ) );
// WARNING: This function performs LV updates and also modifies
// the copy buffer, so if this function returns an error and
// the LV updates are rolled back, it is up to the caller to
// ensure that either the copy buffer is discarded or the
// original copy buffer is re-instated
Assert( pfucb->ppib->Level() > 0 );
Assert( lvaffectSeparateAll == lvaffect
|| ( lvaffectReferenceAll == lvaffect && FFUCBInsertCopyPrepared( pfucb ) ) );
while ( itagfld < CTaggedColumns() )
{
const COLUMNID columnidCurr = Ptagfld( itagfld )->Columnid( ptdb );
TAGFLD_HEADER * const pheader = Pheader( itagfld );
BOOL fEncrypted = fFalse;
BOOL fRemoveColumn = false;
Assert( !Pcsr( pfucb )->FLatched() );
err = ErrRECIAccessColumn( pfucb, columnidCurr, NULL, &fEncrypted );
if ( err < 0 )
{
if ( JET_errColumnNotFound != err )
goto HandleError;
err = JET_errSuccess;
fRemoveColumn = fTrue;
}
else
{
CallS( err );
}
if ( fRemoveColumn )
{
const ULONG cbColumnToRemove = CbData( itagfld );
Assert( !FCOLUMNIDTemplateColumn( columnidCurr ) );
// Case where we must remove the column:
// column not visible to this session. Since the
// column exists in this session's record, the column
// could not have been version-added by someone else.
// Therefore, it must have been deleted by this session,
// or deleted and committed before this transaction began.
#ifdef DEBUG
pfucb->u.pfcb->EnterDML();
Assert( FFIELDDeleted( ptdb->PfieldTagged( columnidCurr )->ffield ) );
pfucb->u.pfcb->LeaveDML();
#endif
// if SeparateAll, must first deref all LVs before removing
// them from the record
// if ReferenceAll, don't deref because we're coming
// from InsertCopy and the only ref belongs to the
// original record
if ( lvaffectSeparateAll == lvaffect
&& NULL != pheader
&& pheader->FColumnCanBeSeparated() )
{
Assert( !pheader->FTwoValues() );
if ( pheader->FMultiValues() )
{
MULTIVALUES mv( PbData( itagfld ), CbData( itagfld ) );
ULONG imv;
#ifdef UNLIMITED_MULTIVALUES
#else
Assert( !pheader->FSeparated() );
#endif
for ( imv = 0; imv < mv.CMultiValues(); imv++ )
{
if ( mv.FSeparatedInstance( imv ) )
{
// set flag so that on prepCancel, delta RCE
// will be properly rolled back
FUCBSetUpdateSeparateLV( pfucb );
LvId lidT = LidOfSeparatedLV( mv.PbData( imv ), mv.CbData( imv ) );
Call( ErrRECAffectSeparateLV( pfucb, &lidT, fLVDereference ) );
Assert( JET_wrnCopyLongValue != err );
}
}
}
else if ( pheader->FSeparated() )
{
FUCBSetUpdateSeparateLV( pfucb );
ULONG cbLid = CbData( itagfld ) - sizeof( TAGFLD_HEADER );
LvId lidT = LidOfSeparatedLV( PbData( itagfld ) + sizeof(TAGFLD_HEADER), cbLid );
Call( ErrRECAffectSeparateLV( pfucb, &lidT, fLVDereference ) );
Assert( JET_wrnCopyLongValue != err );
}
}
DeleteTagfld( itagfld );
pfucb->dataWorkBuf.DeltaCb( 0 - ( sizeof(TAGFLD) + cbColumnToRemove ) );
// don't increment itagfld, so we will retrieve whatever
// tagged column now occupies the space vacated by
// the deleted column
continue;
}
if ( NULL != pheader
&& pheader->FColumnCanBeSeparated()
&& !Ptagfld( itagfld )->FNull( this ) )
{
Assert( !pheader->FTwoValues() );
Assert( CbData( itagfld ) >= sizeof(TAGFLD_HEADER) );
switch ( lvaffect )
{
case lvaffectSeparateAll:
// note that we do not separate those long values that are
// so short that they take even less space in a record
// than a LID for separated long value would.
if ( pheader->FMultiValues() )
{
Assert( !fEncrypted );
#ifdef UNLIMITED_MULTIVALUES
#else
Assert( !pheader->FSeparated() );
#endif
MULTIVALUES mv( PbData( itagfld ), CbData( itagfld ) );
ULONG imv;
ULONG cbColumnShrink = 0;
for ( imv = 0; imv < mv.CMultiValues(); imv++ )
{
Assert( cbThreshold >= (ULONG) LvId::CbLidFromCurrFormat( pfucb ) );
if ( !mv.FSeparatedInstance( imv )
&& mv.CbData( imv ) > cbThreshold )
{
DATA dataField;
LvId lid;
const ULONG cbData = mv.CbData( imv );
BYTE rgbT[ sizeof( LvId ) ];
// set flag so that on prepCancel, insert RCE for
// this new separated LV will be properly rolled back
FUCBSetUpdateSeparateLV( pfucb );
dataField.SetPv( mv.PbData( imv ) );
dataField.SetCb( cbData );
if( pheader->FCompressed() && 0 == imv )
{
BYTE * pbDecompressed = NULL;
INT cbActual = 0;
Call( ErrPKAllocAndDecompressData(
dataField,
pfucb,
&pbDecompressed,
&cbActual ) );
DATA dataDecompressed;
dataDecompressed.SetPv( pbDecompressed );
dataDecompressed.SetCb( cbActual );
// we are decompressing and then recompressing the data.
// to optimize this we could just pass the compressed data to
// ErrRECSeparatedLV, along with the logical size
err = ErrRECSeparateLV(
pfucb,
&dataDecompressed,
pheader->FCompressed( ) ? CompressFlags( compress7Bit | compressXpress ) : compressNone,
fFalse,
&lid,
NULL );
delete[] pbDecompressed;
Call( err );
pheader->ResetFCompressed();
}
else
{
Call( ErrRECSeparateLV(
pfucb,
&dataField,
pheader->FCompressed() ? CompressFlags( compress7Bit | compressXpress ) : compressNone,
fFalse,
&lid,
NULL ) );
}
Assert( JET_wrnCopyLongValue == err );
// The newly generated LID should obey the current format
Assert( lid.FLidObeysCurrFormat( pfucb ) );
const INT cbLid = CbLVSetLidInRecord( rgbT, sizeof( rgbT ), lid );
const ULONG cbShrink = cbData - cbLid;
dataField.SetPv( rgbT );
dataField.SetCb( cbLid );
mv.UpdateInstance(
imv + 1,
&dataField,
JET_coltypNil,
fTrue,
fFalse );
cbColumnShrink += cbShrink;
}
}
if ( cbColumnShrink > 0 )
{
ResizeTagfld( itagfld, 0 - cbColumnShrink );
// update record size
pfucb->dataWorkBuf.DeltaCb( 0 - cbColumnShrink );
}
}
else if ( !pheader->FSeparated()
&& CbData( itagfld ) > sizeof(TAGFLD_HEADER) + cbThreshold )
{
DATA dataField;
LvId lid;
const ULONG cbData = CbData( itagfld ) - sizeof(TAGFLD_HEADER);
Assert( !fEncrypted == !pheader->FEncrypted() );
// set flag so that on prepCancel, insert RCE for
// this new separated LV will be properly rolled back
FUCBSetUpdateSeparateLV( pfucb );
dataField.SetPv( PbData( itagfld ) + sizeof(TAGFLD_HEADER) );
dataField.SetCb( cbData );
if ( fEncrypted )
{
Assert( pbDataDecrypted == NULL );
Alloc( pbDataDecrypted = new BYTE[ dataField.Cb() ] );
ULONG cbDataDecryptedActual = dataField.Cb();
Call( ErrOSUDecrypt(
(BYTE*)dataField.Pv(),
pbDataDecrypted,
&cbDataDecryptedActual,
pfucb->pbEncryptionKey,
pfucb->cbEncryptionKey,
PinstFromPfucb( pfucb )->m_iInstance,
pfucb->u.pfcb->TCE() ) );
dataField.SetPv( pbDataDecrypted );
dataField.SetCb( cbDataDecryptedActual );
}
if( pheader->FCompressed() )
{
BYTE * pbDecompressed = NULL;
INT cbActual = 0;
Call( ErrPKAllocAndDecompressData(
dataField,
pfucb,
&pbDecompressed,
&cbActual ) );
DATA dataDecompressed;
dataDecompressed.SetPv( pbDecompressed );
dataDecompressed.SetCb( cbActual );
err = ErrRECSeparateLV(
pfucb,
&dataDecompressed,
pheader->FCompressed() ? CompressFlags( compress7Bit | compressXpress ) : compressNone,
fEncrypted,
&lid,
NULL );
delete [] pbDecompressed;
Call( err );
}
else
{
Call( ErrRECSeparateLV(
pfucb,
&dataField,
pheader->FCompressed() ? CompressFlags( compress7Bit | compressXpress ) : compressNone,
fEncrypted,
&lid,
NULL ) );
}
Assert( JET_wrnCopyLongValue == err );
// The newly generated LID should obey the current efv
Assert( lid.FLidObeysCurrFormat( pfucb ) );
if ( pbDataDecrypted )
{
delete[] pbDataDecrypted;
pbDataDecrypted = NULL;
}
const INT cbLid = CbLVSetLidInRecord( PbData( itagfld ) + sizeof( TAGFLD_HEADER ), cbData, lid );
const ULONG cbShrink = cbData - cbLid;
ResizeTagfld( itagfld, 0 - cbShrink );
pheader->ResetFCompressed();
pheader->ResetFEncrypted();
pheader->SetFSeparated();
// update record size
pfucb->dataWorkBuf.DeltaCb( 0 - cbShrink );
}
break;
case lvaffectReferenceAll:
if ( pheader->FMultiValues() )
{
MULTIVALUES mv( PbData( itagfld ), CbData( itagfld ) );
ULONG imv;
for ( imv = 0; imv < mv.CMultiValues(); imv++ )
{
if ( mv.FSeparatedInstance( imv ) )
{
// set flag so that on prepCancel, delta RCE's will
// be properly rolled back
FUCBSetUpdateSeparateLV( pfucb );
const LvId lidOld = LidOfSeparatedLV( mv.PbData( imv ), mv.CbData( imv ) );
LvId lidNew = lidOld;
Call( ErrRECAffectSeparateLV( pfucb, &lidNew, fLVReference ) );
if ( JET_wrnCopyLongValue == err )
{
// long value got burst, update LID
Assert( lidNew > lidOld );
Assert( lidNew.FLidObeysCurrFormat( pfucb ) );
if ( lidNew.Cb() == lidOld.Cb() )
{
CbLVSetLidInRecord( mv.PbData( imv ), mv.CbData( imv ), lidNew );
}
else
{
// This case is an esoteric artifact of upgrading LIDs from 32-bit to 64-bit.
// While incrementing refcount on LVs, the LV can potentially be burst returning a new LID.
// This could cause us to go from a LID32 to a LID64, potentially increasing record size.
// So we need to move data around and resize the tag field to handle this properly.
// There is also a possibility now that this code path may return JET_errRecordTooBig.
// Since we are coming in here from InsertCopy, that would be really weird for clients to
// suddenly get JET_errRecordTooBig for records that they were able to store previously.
// But this is the best we can do in this scenario.
BYTE rgbT[ sizeof( LvId ) ];
const INT cbLid = CbLVSetLidInRecord( rgbT, sizeof( rgbT ), lidNew );
const INT cbColumnExpand = cbLid - lidOld.Cb();
DATA dataField;
if ( pfucb->dataWorkBuf.Cb() + cbColumnExpand > REC::CbRecordMost( pfucb ) )
{
Error( ErrERRCheck( JET_errRecordTooBig ) );
}
ResizeTagfld( itagfld, cbColumnExpand );
// update record size
pfucb->dataWorkBuf.DeltaCb( cbColumnExpand );
dataField.SetPv( rgbT );
dataField.SetCb( cbLid );
mv.UpdateInstance(imv + 1, &dataField, JET_coltypNil, fTrue, fFalse );
}
}
}
}
}
else if ( pheader->FSeparated() )
{
// set flag so that on prepCancel, delta RCE's will
// be properly rolled back
FUCBSetUpdateSeparateLV( pfucb );
const INT cbData = CbData( itagfld ) - sizeof( TAGFLD_HEADER );
const LvId lidOld = LidOfSeparatedLV( PbData( itagfld ) + sizeof(TAGFLD_HEADER), cbData );
LvId lidNew = lidOld;
Call( ErrRECAffectSeparateLV( pfucb, &lidNew, fLVReference ) );
if ( JET_wrnCopyLongValue == err )
{
// long value got burst, update LID
Assert( lidNew > lidOld );
Assert( lidNew.FLidObeysCurrFormat( pfucb ) );
if ( lidNew.Cb() == lidOld.Cb() )
{
CbLVSetLidInRecord( PbData( itagfld ) + sizeof( TAGFLD_HEADER ), cbData, lidNew );
}
else
{
// This case is an esoteric artifact of upgrading LIDs from 32-bit to 64-bit.
// While incrementing refcount on LVs, the LV can potentially be burst returning a new LID.
// This could cause us to go from a LID32 to a LID64, potentially increasing record size.
// So we need to move data around and resize the tag field to handle this properly.
// There is also a possibility now that this code path may return JET_errRecordTooBig.
// Since we are coming in here from InsertCopy, that would be really weird for clients to
// suddenly get JET_errRecordTooBig for records that they were able to store previously.
// But this is the best we can do in this scenario.
const INT cbColumnExpand = lidNew.Cb() - lidOld.Cb();
if ( pfucb->dataWorkBuf.Cb() + cbColumnExpand > REC::CbRecordMost( pfucb ) )
{
Error( ErrERRCheck( JET_errRecordTooBig ) );
}
ResizeTagfld( itagfld, cbColumnExpand );
// update record size
pfucb->dataWorkBuf.DeltaCb( cbColumnExpand );
CbLVSetLidInRecord( PbData( itagfld ) + sizeof( TAGFLD_HEADER ), cbData + cbColumnExpand, lidNew );
}
}
}
break;
default:
Assert( fFalse );
break;
}
}
itagfld++;
} // while ( itagfld < CTaggedColumns() )
HandleError:
// this function should never increase the size of the record
// unless we are referencing LVs and an the LV was burst
// (in fact, we typically call this function to free up record space)
Assert( JET_errRecordTooBig != err || lvaffectReferenceAll == lvaffect );
if ( pbDataDecrypted )
{
delete[] pbDataDecrypted;
pbDataDecrypted = NULL;
}
Assert( !Pcsr( pfucb )->FLatched() );
return err;
}
ERR TAGFIELDS::ErrDereferenceLongValuesInRecord(
FUCB * const pfucb )
{
ERR err;
TDB * const ptdb = pfucb->u.pfcb->Ptdb();
ULONG itagfld;
Assert( ptdbNil != ptdb );
AssertValid( ptdb );
Assert( Pcsr( pfucb )->FLatched() );
for ( itagfld = 0; itagfld < CTaggedColumns(); itagfld++ )
{
Assert( Pcsr( pfucb )->FLatched() );
const TAGFLD_HEADER * const pheader = Pheader( itagfld );
if ( NULL != pheader
&& pheader->FColumnCanBeSeparated() )
{
Assert( !pheader->FTwoValues() );
if ( pheader->FMultiValues() )
{
#ifdef UNLIMITED_MULTIVALUES
#else
Assert( !pheader->FSeparated() );
#endif
Assert( Pcsr( pfucb )->FLatched() );
MULTIVALUES mv( PbData( itagfld ), CbData( itagfld ) );
ULONG imv;
for ( imv = 0; imv < mv.CMultiValues(); imv++ )
{
Assert( Pcsr( pfucb )->FLatched() );
const BOOL fSeparatedLV = mv.FSeparatedInstance( imv );
if ( fSeparatedLV )
{
LvId lidToDeref = LidOfSeparatedLV( mv.PbData( imv ), mv.CbData( imv ) );
CallR( ErrDIRRelease( pfucb ) );
CallR( ErrRECAffectSeparateLV( pfucb, &lidToDeref, fLVDereference ) );
Assert( JET_wrnCopyLongValue != err );
// re-latch for next iteration
CallR( ErrDIRGet( pfucb ) );
Refresh( pfucb->kdfCurr.data );
mv.Refresh( PbData( itagfld ), CbData( itagfld ) );
}
}
}
else
{
Assert( Pcsr( pfucb )->FLatched() );
const BOOL fSeparatedLV = pheader->FSeparated();
if ( fSeparatedLV )
{
INT cbLID = CbData( itagfld ) - sizeof( TAGFLD_HEADER );
Assert( !pheader->FSeparated() || sizeof( _LID64 ) == cbLID || sizeof( _LID32 ) == cbLID );
LvId lidToDeref = ( fSeparatedLV ?
LidOfSeparatedLV( PbData( itagfld ) + sizeof(TAGFLD_HEADER), cbLID ) :
0 );
CallR( ErrDIRRelease( pfucb ) );
if ( fSeparatedLV )
{
CallR( ErrRECAffectSeparateLV( pfucb, &lidToDeref, fLVDereference ) );
Assert( JET_wrnCopyLongValue != err );
}
// re-latch for next iteration
CallR( ErrDIRGet( pfucb ) );
Refresh( pfucb->kdfCurr.data );
}
}
}
}
Assert( Pcsr( pfucb )->FLatched() );
return JET_errSuccess;
}
VOID TAGFIELDS::CopyTaggedColumns(
FUCB * const pfucbSrc,
FUCB * const pfucbDest,
JET_COLUMNID * const mpcolumnidcolumnidTagged )
{
const TDB * const ptdbSrc = pfucbSrc->u.pfcb->Ptdb();
BOOL fESE97DerivedColumnsExist = fFalse;
BOOL fESE98DerivedColumnsExist = fFalse;
ULONG cColumnsToCopy = 0;
ULONG itagfldToCopy = 0;
ULONG itagfld;
for ( itagfld = 0; itagfld < CTaggedColumns(); itagfld++ )
{
const TAGFLD * const ptagfld = Ptagfld( itagfld );
const COLUMNID columnid = ptagfld->Columnid( ptdbSrc );
const FIELD * const pfieldTagged = ptdbSrc->PfieldTagged( columnid );
Assert( JET_coltypNil != pfieldTagged->coltyp
|| !FCOLUMNIDTemplateColumn( columnid ) );
if ( JET_coltypNil != pfieldTagged->coltyp )
{
cColumnsToCopy++;
if ( FCOLUMNIDTemplateColumn( columnid )
&& !ptdbSrc->FTemplateTable() )
{
if ( ptagfld->FDerived() )
{
// shouldn't have seen yet any derived columns with the derived bit not set
Assert( !fESE97DerivedColumnsExist );
fESE98DerivedColumnsExist = fTrue;
}
else
{
Assert( ptdbSrc->FESE97DerivedTable() );
fESE97DerivedColumnsExist = fTrue;
}
}
}
}
if ( 0 == cColumnsToCopy )
return;
USHORT ibDataDest = USHORT( cColumnsToCopy * sizeof(TAGFLD) );
TAGFLD * const rgtagfldDest = (TAGFLD *)(
(BYTE *)pfucbDest->dataWorkBuf.Pv()
+ pfucbDest->dataWorkBuf.Cb() );
// verify currently no tagged data
Assert( (BYTE *)rgtagfldDest
== ( (REC *)pfucbDest->dataWorkBuf.Pv() )->PbTaggedData() );
// if both ESE97 and ESE98 derived columns exist, must copy ESE97 derived columns first
const BOOL fNeedSeparatePassForESE97DerivedColumns = ( fESE97DerivedColumnsExist
&& fESE98DerivedColumnsExist );
if ( fNeedSeparatePassForESE97DerivedColumns )
{
Assert( !ptdbSrc->FTemplateTable() );
Assert( ptdbSrc->FESE97DerivedTable() );
ptdbSrc->AssertValidDerivedTable();
for ( itagfld = 0; itagfld < CTaggedColumns(); itagfld++ )
{
const TAGFLD * const ptagfld = Ptagfld( itagfld );
const COLUMNID columnid = ptagfld->Columnid( ptdbSrc );
const FIELD * const pfieldTagged = ptdbSrc->PfieldTagged( columnid );
Assert( JET_coltypNil != pfieldTagged->coltyp
|| !FCOLUMNIDTemplateColumn( columnid ) );
if ( JET_coltypNil != pfieldTagged->coltyp )
{
const FID fidSrc = ptagfld->Fid();
Assert( itagfldToCopy < cColumnsToCopy );
if ( !FCOLUMNIDTemplateColumn( columnid ) )
{
Assert( FCOLUMNIDTagged( mpcolumnidcolumnidTagged[fidSrc-fidTaggedLeast] ) );
Assert( !FCOLUMNIDTemplateColumn( mpcolumnidcolumnidTagged[fidSrc-fidTaggedLeast] ) );
Assert( mpcolumnidcolumnidTagged[fidSrc-fidTaggedLeast] <= pfucbDest->u.pfcb->Ptdb()->FidTaggedLast() );
Assert( mpcolumnidcolumnidTagged[fidSrc-fidTaggedLeast] <= fidSrc );
Assert( !ptagfld->FDerived() );
// hit the non-derived columns, so should be no more derived columns left
break;
}
Assert( pfucbSrc->u.pfcb->Ptdb()->PfcbTemplateTable()->Ptdb()->FidTaggedLast()
== pfucbDest->u.pfcb->Ptdb()->PfcbTemplateTable()->Ptdb()->FidTaggedLast() );
Assert( fidSrc <= pfucbDest->u.pfcb->Ptdb()->PfcbTemplateTable()->Ptdb()->FidTaggedLast() );
// ignore ESE98 derived columns
if ( !ptagfld->FDerived() )
{
// If column belongs to base table, then FID will not have changed,
// since base table's DDL is fixed. Thus, we don't have to bother
// updating the FID in the destination record.
new( rgtagfldDest + itagfldToCopy ) TAGFLD( fidSrc, fTrue );
Assert( rgtagfldDest[itagfldToCopy].FDerived() );
rgtagfldDest[itagfldToCopy].SetIb( ibDataDest );
if ( ptagfld->FNull( this ) )
{
rgtagfldDest[itagfldToCopy].SetFNull( this );
}
else
{
if ( ptagfld->FExtendedInfo() )
{
rgtagfldDest[itagfldToCopy].SetFExtendedInfo();
}
const ULONG cbData = CbData( itagfld );
UtilMemCpy(
(BYTE *)rgtagfldDest + ibDataDest,
PbData( itagfld ),
cbData );
ibDataDest = USHORT( ibDataDest + cbData );
}
itagfldToCopy++;
}
}
}
Assert( itagfldToCopy <= cColumnsToCopy );
}
for ( itagfld = 0; itagfld < CTaggedColumns(); itagfld++ )
{
const TAGFLD * const ptagfld = Ptagfld( itagfld );
const COLUMNID columnid = ptagfld->Columnid( ptdbSrc );
const FIELD * const pfieldTagged = ptdbSrc->PfieldTagged( columnid );
Assert( JET_coltypNil != pfieldTagged->coltyp
|| !FCOLUMNIDTemplateColumn( columnid ) );
if ( JET_coltypNil != pfieldTagged->coltyp )
{
const FID fidSrc = ptagfld->Fid();
FID fidDest;
BOOL fDerivedDest = fFalse;
Assert( itagfldToCopy <= cColumnsToCopy );
if ( !FCOLUMNIDTemplateColumn( columnid ) )
{
Assert( FCOLUMNIDTagged( mpcolumnidcolumnidTagged[fidSrc-fidTaggedLeast] ) );
Assert( !FCOLUMNIDTemplateColumn( mpcolumnidcolumnidTagged[fidSrc-fidTaggedLeast] ) );
Assert( mpcolumnidcolumnidTagged[fidSrc-fidTaggedLeast] <= pfucbDest->u.pfcb->Ptdb()->FidTaggedLast() );
Assert( mpcolumnidcolumnidTagged[fidSrc-fidTaggedLeast] <= fidSrc );
fidDest = FidOfColumnid( mpcolumnidcolumnidTagged[fidSrc-fidTaggedLeast] );
Assert( !ptagfld->FDerived() );
}
else
{
if ( ptdbSrc->FTemplateTable() )
{
ptdbSrc->AssertValidTemplateTable();
Assert( !ptagfld->FDerived() );
Assert( !ptdbSrc->FESE97DerivedTable() );
}
else
{
ptdbSrc->AssertValidDerivedTable();
Assert( pfucbSrc->u.pfcb->Ptdb()->PfcbTemplateTable()->Ptdb()->FidTaggedLast()
== pfucbDest->u.pfcb->Ptdb()->PfcbTemplateTable()->Ptdb()->FidTaggedLast() );
Assert( ptagfld->FDerived() || ptdbSrc->FESE97DerivedTable() );
Assert( fidSrc <= pfucbDest->u.pfcb->Ptdb()->PfcbTemplateTable()->Ptdb()->FidTaggedLast() );
if ( !ptagfld->FDerived() )
{
Assert( ptdbSrc->FESE97DerivedTable() );
if ( fNeedSeparatePassForESE97DerivedColumns )
{
// ESE97 derived columns were copied in the previous pass
continue;
}
}
fDerivedDest = fTrue;
}
// If column belongs to base table, then FID will not have changed,
// since base table's DDL is fixed. Thus, we don't have to bother
// updating the FID in the destination record.
fidDest = fidSrc;
}
new( rgtagfldDest + itagfldToCopy ) TAGFLD( fidDest, fDerivedDest );
rgtagfldDest[itagfldToCopy].SetIb( ibDataDest );
if ( ptagfld->FNull( this ) )
{
rgtagfldDest[itagfldToCopy].SetFNull( this );
}
if ( !ptagfld->FNull( this ) || !FIsSmallPage() )
{
if ( ptagfld->FExtendedInfo() )
{
rgtagfldDest[itagfldToCopy].SetFExtendedInfo();
}
const ULONG cbData = CbData( itagfld );
UtilMemCpy(
(BYTE *)rgtagfldDest + ibDataDest,
PbData( itagfld ),
cbData );
ibDataDest = USHORT( ibDataDest + cbData );
}
Assert( itagfldToCopy < cColumnsToCopy );
itagfldToCopy++;
}
}
Assert( itagfldToCopy == cColumnsToCopy );
pfucbDest->dataWorkBuf.DeltaCb( ibDataDest );
Assert( pfucbDest->dataWorkBuf.Cb() >= ibRECStartFixedColumns );
Assert( pfucbDest->dataWorkBuf.Cb() <= pfucbSrc->kdfCurr.data.Cb() );
}
ERR TAGFIELDS::ErrUpdateSeparatedLongValuesAfterCopy(
FUCB * const pfucbSrc,
FUCB * const pfucbDest,
JET_COLUMNID* const mpcolumnidcolumnidTagged,
STATUSINFO * const pstatus )
{
ERR err;
ULONG itagfld;
TDB * const ptdbDest = pfucbDest->u.pfcb->Ptdb();
Assert( ptdbNil != ptdbDest );
AssertValid( ptdbDest );
Assert( !Pcsr( pfucbSrc )->FLatched() );
Assert( !Pcsr( pfucbDest )->FLatched() );
for ( itagfld = 0; itagfld < CTaggedColumns(); itagfld++ )
{
TAGFLD_HEADER * const pheader = Pheader( itagfld );
if ( NULL != pheader
&& pheader->FColumnCanBeSeparated() )
{
const COLUMNID columnidCurr = Ptagfld( itagfld )->Columnid( ptdbDest );
Assert( FRECLongValue( ptdbDest->PfieldTagged( columnidCurr )->coltyp ) );
Assert( !pheader->FTwoValues() );
if ( pheader->FMultiValues() )
{
#ifdef UNLIMITED_MULTIVALUES
#else
Assert( !pheader->FSeparated() );
#endif
MULTIVALUES mv( PbData( itagfld ), CbData( itagfld ) );
ULONG imv;
for ( imv = 0; imv < mv.CMultiValues(); imv++ )
{
const BOOL fSeparatedLV = mv.FSeparatedInstance( imv );
if ( fSeparatedLV )
{
BYTE* const pbLid = mv.PbData( imv );
const INT cbLid = mv.CbData( imv );
const LvId lidSrc = LidOfSeparatedLV( pbLid, cbLid );
LvId lidDest;
CallR( ErrSORTIncrementLVRefcountDest(
pfucbSrc,
lidSrc,
&lidDest ) );
// During table copy (e.g. defrag), special care is taken to generate an
// LV tree with LIDs that are the same size as the LIDs in the src tree.
// This allows the records to stay the same size and avoid restructuring
// of the tag field.
// Breaking this assumption will cause data corruption !
EnforceSz( lidSrc.Cb() == lidDest.Cb(), "DataCorruptionLidSizeMismatch" );
CbLVSetLidInRecord( pbLid, cbLid, lidDest );
}
else if ( NULL != pstatus )
{
pstatus->cbRawData += mv.CbData( imv );
}
}
}
else
{
const BOOL fSeparatedLV = pheader->FSeparated();
if ( fSeparatedLV )
{
BYTE* const pbLid = PbData( itagfld ) + sizeof( TAGFLD_HEADER );
const INT cbLid = CbData( itagfld ) - sizeof( TAGFLD_HEADER );
const LvId lidSrc = LidOfSeparatedLV( pbLid, cbLid );
LvId lidDest;
CallR( ErrSORTIncrementLVRefcountDest(
pfucbSrc,
lidSrc,
&lidDest ) );
// During table copy (e.g. defrag), special care is taken to generate an
// LV tree with LIDs that are the same size as the LIDs in the src tree.
// This allows the records to stay the same size and avoid restructuring
// of the tag field.
// Breaking this assumption will cause data corruption !
EnforceSz( lidSrc.Cb() == lidDest.Cb(), "DataCorruptionLidSizeMismatch" );
CbLVSetLidInRecord( pbLid, cbLid, lidDest );
}
else if ( NULL != pstatus )
{
pstatus->cbRawData += CbData( itagfld ) - sizeof(TAGFLD_HEADER);
}
}
}
else if ( NULL != pstatus )
{
pstatus->cbRawData +=
CbData( itagfld )
- ( NULL != pheader ? sizeof(TAGFLD_HEADER) : 0 );
}
}
#ifdef DEBUG
AssertValid( ptdbDest );
#endif
return JET_errSuccess;
}
ERR TAGFIELDS::ErrCheckLongValues(
const KEYDATAFLAGS& kdf,
RECCHECKTABLE * const precchecktable )
{
Assert( NULL != precchecktable );
ERR err;
ULONG itagfld;
for ( itagfld = 0; itagfld < CTaggedColumns(); itagfld++ )
{
const TAGFLD_HEADER * const pheader = Pheader( itagfld );
if ( NULL != pheader
&& pheader->FColumnCanBeSeparated() )
{
const TAGFLD * const ptagfld = Ptagfld( itagfld );
const COLUMNID columnidCurr = ColumnidOfFid(
ptagfld->Fid(),
ptagfld->FDerived() );
DATA dataT;
Assert( !pheader->FTwoValues() );
if ( pheader->FMultiValues() )
{
MULTIVALUES mv( PbData( itagfld ), CbData( itagfld ) );
ULONG imv;
for ( imv = 0; imv < mv.CMultiValues(); imv++ )
{
dataT.SetPv( mv.PbData( imv ) );
dataT.SetCb( mv.CbData( imv ) );
if ( pheader->FLongValue() )
{
CallR( precchecktable->ErrCheckLV(
kdf,
columnidCurr,
imv+1,
dataT,
mv.FSeparatedInstance( imv ) ) );
}
}
}
else if ( pheader->FLongValue() )
{
dataT.SetPv( PbData( itagfld ) + sizeof(TAGFLD_HEADER) );
dataT.SetCb( CbData( itagfld ) - sizeof(TAGFLD_HEADER) );
CallR( precchecktable->ErrCheckLV(
kdf,
columnidCurr,
1,
dataT,
pheader->FSeparated() ) );
}
else
{
// should be LV
Assert( fFalse );
}
}
else
{
#ifdef UNLIMITED_MULTIVALUES
#else
Assert( NULL == pheader
|| !pheader->FSeparated() );
#endif
}
}
return JET_errSuccess;
}
BOOL TAGFIELDS::FIsValidTwoValues(
const ULONG itagfld,
CPRINTF * const pcprintf ) const
{
const TAGFLD_HEADER * const pheader = (TAGFLD_HEADER *)PbData( itagfld );
Assert( NULL != pheader );
Assert( pheader->FMultiValues() );
Assert( pheader->FTwoValues() );
Assert( !pheader->FSeparated() );
Assert( !pheader->FColumnCanBeSeparated() );
if ( CbData( itagfld ) < sizeof(TAGFLD_HEADER) + sizeof(TWOVALUES::TVLENGTH) )
{
(*pcprintf)( "Column is too small to contain TwoValues.\r\n" );
AssertSz( fFalse, "Column is too small to contain TwoValues." );
return fFalse;
}
if ( CbData( itagfld ) > sizeof(TAGFLD_HEADER) + sizeof(TWOVALUES::TVLENGTH) + ( 2 * JET_cbColumnMost ) )
{
(*pcprintf)( "Column is larger than maximum possible size for TWOVALUES.\r\n" );
AssertSz( fFalse, "Column is larger than maximum possible size for TWOVALUES." );
return fFalse;
}
const ULONG cbTwoValues = CbData( itagfld ) - sizeof(TAGFLD_HEADER ) - sizeof(TWOVALUES::TVLENGTH );
const TWOVALUES::TVLENGTH cbFirstValue = *(TWOVALUES::TVLENGTH *)( pheader + 1 );
if ( cbFirstValue > cbTwoValues )
{
(*pcprintf)( "First TWOVALUE is too long.\r\n" );
AssertSz( fFalse, "First TWOVALUE is too long." );
return fFalse;
}
if ( cbTwoValues - cbFirstValue > JET_cbColumnMost )
{
(*pcprintf)( "Column is greater than 255 bytes, but is not a LongValue column.\r\n" );
AssertSz( fFalse, "Column is greater than 255 bytes, but is not a LongValue column." );
return fFalse;
}
return fTrue;
}
BOOL MULTIVALUES::FValidate(
CPRINTF * const pcprintf ) const
{
const BOOL fLongValue = ( Pheader()->FLongValue() );
ULONG imv;
for ( imv = 0; imv < CMultiValues(); imv++ )
{
const ULONG ibCurr = Ib( imv );
const ULONG ibNext = ( imv < CMultiValues() - 1 ) ? Ib( imv + 1 ) : CbMultiValues();
if ( ibCurr > ibNext || ibCurr > CbMultiValues() )
{
(*pcprintf)( "MULTIVALUE either overlaps previous MULTIVALUE or is out of TAGFLD range.\r\n" );
AssertSz( fFalse, "MULTIVALUE either overlaps previous MULTIVALUE or is out of TAGFLD range." );
return fFalse;
}
if ( !fLongValue )
{
if ( ibNext - ibCurr > JET_cbColumnMost )
{
(*pcprintf)( "Column is greater than 255 bytes, but is not a LongValue column.\r\n" );
AssertSz( fFalse, "Column is greater than 255 bytes, but is not a LongValue column." );
return fFalse;
}
}
if ( FSeparatedInstance( imv ) )
{
if ( !Pheader()->FColumnCanBeSeparated() )
{
(*pcprintf)( "Separated column is not a LongValue.\r\n" );
AssertSz( fFalse, "Separated column is not a LongValue." );
return fFalse;
}
// It is safe to access this value now because we have verified ibNext
const BYTE* pbLid = PbData( imv );
const LvId lid = LidOfSeparatedLV( pbLid, ibNext - ibCurr );
if ( lid == lidMin || ibNext - ibCurr != (ULONG) lid.Cb() )
{
( *pcprintf )( "Separated column has invalid LID.\r\n" );
AssertSz( fFalse, "Separated column has invalid LID." );
return fFalse;
}
}
}
return fTrue;
}
BOOL TAGFIELDS::FIsValidMultiValues(
const ULONG itagfld,
CPRINTF * const pcprintf ) const
{
const TAGFLD_HEADER * const pheader = (TAGFLD_HEADER *)PbData( itagfld );
Assert( NULL != pheader );
Assert( pheader->FMultiValues() );
Assert( !pheader->FTwoValues() );
#ifdef UNLIMITED_MULTIVALUES
#else
Assert( !pheader->FSeparated() );
#endif
if ( CbData( itagfld ) < sizeof(TAGFLD_HEADER) + ( 2 * sizeof(MULTIVALUES::MVOFFSET) ) )
{
(*pcprintf)( "Column is too small to contain MultiValues.\r\n" );
AssertSz( fFalse, "Column is too small to contain MultiValues." );
return fFalse;
}
const MULTIVALUES::MVOFFSET * const rgmvoffs = (MULTIVALUES::MVOFFSET *)( pheader + 1 );
const ULONG cbMultiValues = CbData( itagfld ) - sizeof(TAGFLD_HEADER);
const ULONG ibFirstMV = ( rgmvoffs[0] & MULTIVALUES::maskIb );
if ( ibFirstMV < 2 * sizeof(MULTIVALUES::MVOFFSET)
|| ibFirstMV > cbMultiValues
|| ibFirstMV % sizeof(MULTIVALUES::MVOFFSET) != 0 )
{
(*pcprintf)( "First MULTIVALUE has invalid Ib.\r\n" );
AssertSz( fFalse, "First MULTIVALUE has invalid Ib." );
return fFalse;
}
MULTIVALUES mv( PbData( itagfld ), CbData( itagfld ) );
return mv.FValidate( pcprintf );
}
BOOL TAGFIELDS::FValidate(
CPRINTF * const pcprintf ) const
{
BOOL fSawNonDerived = fFalse;
FID fidPrev = 0;
ULONG itagfld;
for ( itagfld = 0; itagfld < CTaggedColumns(); itagfld++ )
{
const TAGFLD * const ptagfld = Ptagfld( itagfld );
if ( !FTaggedFid( ptagfld->Fid() ) )
{
(*pcprintf)( "FID %d is not a tagged column.\r\n", ptagfld->Fid() );
AssertSz( fFalse, "FID is not a tagged column." );
return fFalse;
}
if ( ptagfld->FDerived() )
{
if ( fSawNonDerived )
{
// all derived columns must come first
(*pcprintf)( "Derived/NonDerived columns out of order.\r\n" );
AssertSz( fFalse, "Derived/NonDerived columns out of order." );
return fFalse;
}
if ( ptagfld->Fid() <= fidPrev )
{
// FIDs must be monotonically increasing
(*pcprintf)( "Columns are not in monotonically-increasing FID order (FID %d <= FID %d).\r\n", ptagfld->Fid(), fidPrev );
AssertSz( fFalse, "Columns are not in monotonically-increasing FID order." );
return fFalse;
}
}
else if ( fSawNonDerived )
{
if ( ptagfld->Fid() <= fidPrev )
{
// FIDs must be monotonically increasing
(*pcprintf)( "Columns are not in monotonically-increasing FID order (FID %d <= FID %d).\r\n", ptagfld->Fid(), fidPrev );
AssertSz( fFalse, "Columns are not in monotonically-increasing FID order." );
return fFalse;
}
}
else
{
fSawNonDerived = fTrue;
}
fidPrev = ptagfld->Fid(); // save off FID for next iteration
const ULONG ibNext = ( itagfld < ( CTaggedColumns() - 1 ) ? Ptagfld( itagfld + 1 )->Ib() : CbTaggedColumns() );
if ( ptagfld->Ib() > ibNext )
{
(*pcprintf)( "TAGFLD %d either overlaps previous TAGFLD or is out of record range.\r\n", ptagfld->Fid() );
AssertSz( fFalse, "TAGFLD either overlaps previous TAGFLD or is out of record range." );
return fFalse;
}
// if we needed to extract the length of the current TAGFLD, we can
// now do it because we've validated the ib of this TAGFLD
if ( ptagfld->FNull( this ) )
{
if ( ptagfld->FExtendedInfo() && FIsSmallPage() )
{
// these two bits are mutually exclusive
(*pcprintf)( "TAGFLD %d has both NULL and ExtendedInfo flags set.\r\n", ptagfld->Fid() );
AssertSz( fFalse, "TAGFLD has both NULL and ExtendedInfo flags set." );
return fFalse;
}
if ( ibNext != ptagfld->Ib() + ( FIsSmallPage() ? 0 : sizeof( TAGFLD_HEADER ) ) )
{
if ( itagfld < CTaggedColumns() - 1 )
{
( *pcprintf )( "Current TAGFLD is NULL but not zero-length.\r\n" );
AssertSz( fFalse, "Current TAGFLD is NULL but not zero-length." );
return fFalse;
}
else
{
// if last column is NULL, it must point to the end of the tagged data
( *pcprintf )( "Last TAGFLD is NULL but does not point to the end of the tagged data.\r\n" );
AssertSz( fFalse, "Last TAGFLD is NULL but does not point to the end of the tagged data." );
return fFalse;
}
}
}
if ( ptagfld->FExtendedInfo() )
{
const TAGFLD_HEADER * const pheader = Pheader( itagfld );
// these are already checked, so just assert
Assert( NULL != pheader );
Assert( (BYTE *)pheader >= PbStartOfTaggedData() );
Assert( (BYTE *)pheader <= PbStartOfTaggedData() + CbTaggedData() );
if ( *(BYTE *)pheader & BYTE( ~TAGFLD_HEADER::maskFlags ) )
{
// these bits should be unused
(*pcprintf)( "TAGFLD header (%x) has invalid bits set.\r\n", *(BYTE*)pheader );
AssertSz( fFalse, "TAGFLD header has invalid bits set." );
return fFalse;
}
if ( !pheader->FLongValue()
&& !pheader->FMultiValues()
&& FIsSmallPage() )
{
// if MultiValues not set, no other reason for non-LV
// column to have a header byte
// except if jet is in large (16/32kiB) page mode (extended info byte always ON for tags)
(*pcprintf)( "Column %d has inappropriate header byte.\r\n", ptagfld->Fid() );
AssertSz( fFalse, "Column has inappropriate header byte." );
return fFalse;
}
if ( pheader->FTwoValues() )
{
if ( !pheader->FMultiValues() )
{
(*pcprintf)( "TAGFLD %d is marked as TwoValues but not MultiValues.\r\n", ptagfld->Fid() );
AssertSz( fFalse, "TAGFLD is marked as TwoValues but not MultiValues." );
return fFalse;
}
if ( pheader->FLongValue()
|| pheader->FSeparated() ) // even with UNLIMITED_MULTIVALUES, we would make this a true MULTIVALUES before separating it
{
(*pcprintf)( "TAGFLD %d is marked as TwoValues but cannot be a LongValue, or Separated.\r\n", ptagfld->Fid() );
AssertSz( fFalse, "A TAGFLD marked as TwoValues cannot be a LongValue, or Separated." );
return fFalse;
}
if ( !FIsValidTwoValues( itagfld, pcprintf ) )
{
return fFalse;
}
}
else if ( pheader->FMultiValues() )
{
if ( pheader->FSeparated() )
{
#ifdef UNLIMITED_MULTIVALUES
#else
(*pcprintf)( "Separated multi-value list not currently supported.\r\n" );
AssertSz( fFalse, "Separated multi-value list not currently supported." );
return fFalse;
#endif
}
if ( !FIsValidMultiValues( itagfld, pcprintf ) )
{
return fFalse;
}
}
else if ( pheader->FSeparated() )
{
if ( !pheader->FColumnCanBeSeparated() )
{
(*pcprintf)( "Separated column %d is not a LongValue.\r\n", ptagfld->Fid() );
AssertSz( fFalse, "Separated column is not a LongValue." );
return fFalse;
}
const INT cbLid = ibNext - ptagfld->Ib() - sizeof( TAGFLD_HEADER );
const LvId lid = LidOfSeparatedLV( PbData( itagfld ) + sizeof( TAGFLD_HEADER ), cbLid );
if ( lid == lidMin || cbLid != lid.Cb() )
{
(*pcprintf)( "Separated column %d has invalid LID.\r\n", ptagfld->Fid() );
AssertSz( fFalse, "Separated column has invalid LID." );
return fFalse;
}
}
}
else if ( ibNext - ptagfld->Ib() > JET_cbColumnMost )
{
(*pcprintf)( "Column %d is greater than 255 bytes, but is not a LongValue column.\r\n", ptagfld->Fid() );
AssertSz( fFalse, "Column is greater than 255 bytes, but is not a LongValue column." );
return fFalse;
}
}
return fTrue;
}
BOOL TAGFIELDS::FIsValidTagfields(
const LONG cbPage,
const DATA& dataRec,
CPRINTF * const pcprintf )
{
if ( NULL == dataRec.Pv()
|| dataRec.Cb() < REC::cbRecordMin
|| dataRec.Cb() > REC::CbRecordMostCHECK( cbPage ) )
{
(*pcprintf)( "Record is an invalid size.\r\n" );
AssertSz( g_fRepair, "Record is an invalid size." );
if ( !g_fRepair )
{
FireWall( "FIsValidTagfieldsRecTooBig13.1" );
}
return fFalse;
}
const REC * prec = (REC *)dataRec.Pv();
const BYTE * pbRecMax = (BYTE *)prec + dataRec.Cb();
// WARNING: PbTaggedData() could GPF if the record is messed up
const BYTE * pbStartOfTaggedColumns = prec->PbTaggedData();
if ( pbStartOfTaggedColumns < (BYTE *)dataRec.Pv() + REC::cbRecordMin
|| pbStartOfTaggedColumns > pbRecMax )
{
(*pcprintf)( "Start of tagged columns is out of record range.\r\n" );
AssertSz( g_fRepair, "Start of tagged columns is out of record range." );
return fFalse;
}
const SIZE_T cbTaggedColumns = pbRecMax - pbStartOfTaggedColumns;
if ( cbTaggedColumns > 0 )
{
// there's at least some tagged data
const TAGFLD * const ptagfldFirst = (TAGFLD *)pbStartOfTaggedColumns;
if ( ptagfldFirst->Ib() < sizeof(TAGFLD) // must be at least one TAGFLD
|| ptagfldFirst->Ib() > cbTaggedColumns
|| ptagfldFirst->Ib() % sizeof(TAGFLD) != 0 )
{
(*pcprintf)( "First TAGFLD has an invalid Ib.\r\n" );
AssertSz( g_fRepair, "First TAGFLD has an invalid Ib." );
return fFalse;
}
}
// at this point, it should be safe to call the constructor
TAGFIELDS tagfields( dataRec );
return tagfields.FValidate( pcprintf );
}
// ****************************************************************
// TAGFLD_ITERATOR
// ****************************************************************
TAGFLD_ITERATOR::TAGFLD_ITERATOR()
{
}
TAGFLD_ITERATOR::~TAGFLD_ITERATOR()
{
}
INT TAGFLD_ITERATOR::Ctags() const
{
return 0;
}
ERR TAGFLD_ITERATOR::ErrSetItag( const INT itag )
{
return ErrERRCheck( JET_errNoCurrentRecord );
}
VOID TAGFLD_ITERATOR::MoveBeforeFirst()
{
}
VOID TAGFLD_ITERATOR::MoveAfterLast()
{
}
ERR TAGFLD_ITERATOR::ErrMovePrev()
{
return ErrERRCheck( JET_errNoCurrentRecord );
}
ERR TAGFLD_ITERATOR::ErrMoveNext()
{
return ErrERRCheck( JET_errNoCurrentRecord );
}
INT TAGFLD_ITERATOR::Itag() const
{
Assert( fFalse );
return 0;
}
BOOL TAGFLD_ITERATOR::FSeparated() const
{
Assert( fFalse );
return fFalse;
}
BOOL TAGFLD_ITERATOR::FCompressed() const
{
return fFalse;
}
BOOL TAGFLD_ITERATOR::FEncrypted() const
{
return fFalse;
}
INT TAGFLD_ITERATOR::CbData() const
{
Assert( fFalse );
return 0;
}
const BYTE * TAGFLD_ITERATOR::PbData() const
{
Assert( fFalse );
return NULL;
}
// ****************************************************************
// TAGFLD_ITERATOR_INVALID
// ****************************************************************
class TAGFLD_ITERATOR_INVALID : public TAGFLD_ITERATOR
{
public:
TAGFLD_ITERATOR_INVALID() {}
~TAGFLD_ITERATOR_INVALID() {}
};
// ****************************************************************
// TAGFLD_ITERATOR_NULLVALUE
// ****************************************************************
class TAGFLD_ITERATOR_NULLVALUE : public TAGFLD_ITERATOR
{
public:
TAGFLD_ITERATOR_NULLVALUE() {}
~TAGFLD_ITERATOR_NULLVALUE() {}
};
// ****************************************************************
// TAGFLD_ITERATOR_SINGLEVALUE
// ****************************************************************
class TAGFLD_ITERATOR_SINGLEVALUE : public TAGFLD_ITERATOR
{
public:
TAGFLD_ITERATOR_SINGLEVALUE( const DATA& data, const BOOL fSeparated, const BOOL fCompressed, const BOOL fEncrypted );
~TAGFLD_ITERATOR_SINGLEVALUE();
public:
VOID MoveBeforeFirst();
VOID MoveAfterLast();
ERR ErrMovePrev();
ERR ErrMoveNext();
INT Ctags() const;
ERR ErrSetItag( const INT itag );
public:
INT Itag() const;
BOOL FSeparated() const;
BOOL FCompressed() const;
BOOL FEncrypted() const;
INT CbData() const;
const BYTE * PbData() const;
private:
const BOOL m_fSeparated;
const BOOL m_fCompressed;
const BOOL m_fEncrypted;
const INT m_cbData;
const BYTE * const m_pbData;
INT m_itag; // our current location
};
INT TAGFLD_ITERATOR_SINGLEVALUE::Ctags() const { return 1; }
INT TAGFLD_ITERATOR_SINGLEVALUE::Itag() const { return ( 1 == m_itag ) ? 1 : 0; }
BOOL TAGFLD_ITERATOR_SINGLEVALUE::FSeparated() const { return ( 1 == m_itag ) ? m_fSeparated : fFalse; }
BOOL TAGFLD_ITERATOR_SINGLEVALUE::FCompressed() const { return ( 1 == m_itag ) ? m_fCompressed : fFalse; }
BOOL TAGFLD_ITERATOR_SINGLEVALUE::FEncrypted() const { return m_fEncrypted; }
INT TAGFLD_ITERATOR_SINGLEVALUE::CbData() const { return ( 1 == m_itag ) ? m_cbData : 0; }
const BYTE * TAGFLD_ITERATOR_SINGLEVALUE::PbData() const { return m_pbData; }
TAGFLD_ITERATOR_SINGLEVALUE::TAGFLD_ITERATOR_SINGLEVALUE( const DATA& data, const BOOL fSeparated, const BOOL fCompressed, const BOOL fEncrypted ) :
m_fSeparated( fSeparated ),
m_fCompressed( fCompressed ),
m_fEncrypted( fEncrypted ),
m_cbData( data.Cb() ),
m_pbData( reinterpret_cast<BYTE *>( data.Pv() ) ),
m_itag( 0 )
{
}
TAGFLD_ITERATOR_SINGLEVALUE::~TAGFLD_ITERATOR_SINGLEVALUE()
{
}
ERR TAGFLD_ITERATOR_SINGLEVALUE::ErrSetItag( const INT itag )
{
if( 1 == itag )
{
m_itag = 1;
return JET_errSuccess;
}
MoveBeforeFirst();
return ErrERRCheck( JET_errNoCurrentRecord );
}
VOID TAGFLD_ITERATOR_SINGLEVALUE::MoveBeforeFirst()
{
m_itag = 0;
}
VOID TAGFLD_ITERATOR_SINGLEVALUE::MoveAfterLast()
{
m_itag = 2;
}
ERR TAGFLD_ITERATOR_SINGLEVALUE::ErrMovePrev()
{
ERR err;
switch( m_itag )
{
case 2:
m_itag = 1;
err = JET_errSuccess;
break;
case 1:
case 0:
MoveBeforeFirst();
err = ErrERRCheck( JET_errNoCurrentRecord );
break;
default:
Assert( fFalse );
err = ErrERRCheck( JET_errInternalError );
break;
}
return err;
}
ERR TAGFLD_ITERATOR_SINGLEVALUE::ErrMoveNext()
{
ERR err;
switch( m_itag )
{
case 0:
m_itag = 1;
err = JET_errSuccess;
break;
case 1:
case 2:
MoveAfterLast();
err = ErrERRCheck( JET_errNoCurrentRecord );
break;
default:
Assert( fFalse );
err = ErrERRCheck( JET_errInternalError );
break;
}
return err;
}
// ****************************************************************
// TAGFLD_ITERATOR_TWOVALUES
// ****************************************************************
class TAGFLD_ITERATOR_TWOVALUES : public TAGFLD_ITERATOR
{
public:
TAGFLD_ITERATOR_TWOVALUES( const DATA& data );
~TAGFLD_ITERATOR_TWOVALUES();
public:
VOID MoveBeforeFirst();
VOID MoveAfterLast();
ERR ErrMovePrev();
ERR ErrMoveNext();
INT Ctags() const;
ERR ErrSetItag( const INT itag );
public:
INT Itag() const;
BOOL FSeparated() const;
INT CbData() const;
const BYTE * PbData() const;
private:
const TWOVALUES m_twovalues;
INT m_itag; // our current location
};
TAGFLD_ITERATOR_TWOVALUES::TAGFLD_ITERATOR_TWOVALUES( const DATA& data ) :
m_twovalues( reinterpret_cast<BYTE *>( data.Pv() ), data.Cb() ),
m_itag( 0 )
{
}
TAGFLD_ITERATOR_TWOVALUES::~TAGFLD_ITERATOR_TWOVALUES()
{
}
INT TAGFLD_ITERATOR_TWOVALUES::Ctags() const
{
return 2;
}
ERR TAGFLD_ITERATOR_TWOVALUES::ErrSetItag( const INT itag )
{
if( 1 == itag
|| 2 == itag )
{
m_itag = 1;
return JET_errSuccess;
}
MoveBeforeFirst();
return ErrERRCheck( JET_errNoCurrentRecord );
}
VOID TAGFLD_ITERATOR_TWOVALUES::MoveBeforeFirst()
{
m_itag = 0;
}
VOID TAGFLD_ITERATOR_TWOVALUES::MoveAfterLast()
{
m_itag = 3;
}
ERR TAGFLD_ITERATOR_TWOVALUES::ErrMovePrev()
{
ERR err;
if( --m_itag < 1 )
{
MoveBeforeFirst();
err = ErrERRCheck( JET_errNoCurrentRecord );
}
else
{
err = JET_errSuccess;
}
return err;
}
ERR TAGFLD_ITERATOR_TWOVALUES::ErrMoveNext()
{
ERR err;
if( ++m_itag > 2 )
{
MoveAfterLast();
err = ErrERRCheck( JET_errNoCurrentRecord );
}
else
{
err = JET_errSuccess;
}
return err;
}
INT TAGFLD_ITERATOR_TWOVALUES::Itag() const
{
if( 1 == m_itag || 2 == m_itag )
{
return m_itag;
}
return 0;
}
BOOL TAGFLD_ITERATOR_TWOVALUES::FSeparated() const
{
// TWOVALUES are never used for LV columns
return fFalse;
}
INT TAGFLD_ITERATOR_TWOVALUES::CbData() const
{
INT cbData;
switch( m_itag )
{
case 2:
cbData = m_twovalues.CbSecondValue();
break;
case 1:
cbData = m_twovalues.CbFirstValue();
break;
case 0:
Assert( fFalse );
cbData = 0;
break;
default:
Assert( fFalse );
cbData = 0xffffffff;
break;
}
return cbData;
}
const BYTE * TAGFLD_ITERATOR_TWOVALUES::PbData() const
{
const BYTE * pbData;
switch( m_itag )
{
case 2:
pbData = m_twovalues.PbData() + m_twovalues.CbFirstValue();
break;
case 1:
pbData = m_twovalues.PbData();
break;
case 0:
Assert( fFalse );
pbData = 0;
break;
default:
Assert( fFalse );
pbData = (BYTE *)(~0);
break;
}
return pbData;
}
// ****************************************************************
// TAGFLD_ITERATOR_MULTIVALUES
// ****************************************************************
class TAGFLD_ITERATOR_MULTIVALUES : public TAGFLD_ITERATOR
{
public:
TAGFLD_ITERATOR_MULTIVALUES( const DATA& data, const BOOL fCompressed );
~TAGFLD_ITERATOR_MULTIVALUES();
public:
VOID MoveBeforeFirst();
VOID MoveAfterLast();
ERR ErrMovePrev();
ERR ErrMoveNext();
INT Ctags() const;
ERR ErrSetItag( const INT itag );
public:
INT Itag() const;
BOOL FSeparated() const;
BOOL FCompressed() const;
INT CbData() const;
const BYTE * PbData() const;
private:
const MULTIVALUES m_multivalues;
INT m_itag; // our current location
const BOOL m_fCompressed;
};
TAGFLD_ITERATOR_MULTIVALUES::TAGFLD_ITERATOR_MULTIVALUES( const DATA& data, const BOOL fCompressed ) :
m_multivalues( reinterpret_cast<BYTE *>( data.Pv() ), data.Cb() ),
m_itag( 0 ),
m_fCompressed( fCompressed )
{
}
TAGFLD_ITERATOR_MULTIVALUES::~TAGFLD_ITERATOR_MULTIVALUES()
{
}
INT TAGFLD_ITERATOR_MULTIVALUES::Ctags() const
{
return m_multivalues.CMultiValues();
}
ERR TAGFLD_ITERATOR_MULTIVALUES::ErrSetItag( const INT itag )
{
ERR err;
if ( itag > 0 && (ULONG)itag <= m_multivalues.CMultiValues() )
{
m_itag = itag;
err = JET_errSuccess;
}
else
{
MoveBeforeFirst();
err = ErrERRCheck( JET_errNoCurrentRecord );
}
return err;
}
VOID TAGFLD_ITERATOR_MULTIVALUES::MoveBeforeFirst()
{
m_itag = 0;
}
VOID TAGFLD_ITERATOR_MULTIVALUES::MoveAfterLast()
{
m_itag = m_multivalues.CMultiValues() + 1;
}
ERR TAGFLD_ITERATOR_MULTIVALUES::ErrMovePrev()
{
ERR err;
m_itag--;
if( m_itag < 1 )
{
MoveBeforeFirst();
err = ErrERRCheck( JET_errNoCurrentRecord );
}
else
{
err = JET_errSuccess;
}
return err;
}
ERR TAGFLD_ITERATOR_MULTIVALUES::ErrMoveNext()
{
ERR err;
m_itag++;
if ( (ULONG)m_itag > m_multivalues.CMultiValues() )
{
MoveAfterLast();
err = ErrERRCheck( JET_errNoCurrentRecord );
}
else
{
err = JET_errSuccess;
}
return err;
}
INT TAGFLD_ITERATOR_MULTIVALUES::Itag() const
{
return ( m_itag >= 1 && (ULONG)m_itag <= m_multivalues.CMultiValues() ? m_itag : 0 );
}
BOOL TAGFLD_ITERATOR_MULTIVALUES::FSeparated() const
{
return ( m_itag >= 1 && (ULONG)m_itag <= m_multivalues.CMultiValues() ?
m_multivalues.FSeparatedInstance( m_itag - 1 ) :
0 );
}
BOOL TAGFLD_ITERATOR_MULTIVALUES::FCompressed() const
{
return ( m_itag == 1 && m_fCompressed );
}
INT TAGFLD_ITERATOR_MULTIVALUES::CbData() const
{
return ( m_itag >= 1 && (ULONG)m_itag <= m_multivalues.CMultiValues() ?
m_multivalues.CbData( m_itag - 1 ) :
0 );
}
const BYTE * TAGFLD_ITERATOR_MULTIVALUES::PbData() const
{
return ( m_itag >= 1 && (ULONG)m_itag <= m_multivalues.CMultiValues() ?
m_multivalues.PbData( m_itag - 1 ) :
0 );
}
// ****************************************************************
// TAGFIELDS_ITERATOR
// ****************************************************************
TAGFIELDS_ITERATOR::TAGFIELDS_ITERATOR( const DATA& dataRec ) :
m_tagfields( dataRec ),
m_ptagfldMic( m_tagfields.Rgtagfld() - 1 ),
m_ptagfldMax( m_tagfields.Rgtagfld() + m_tagfields.CTaggedColumns() ),
m_ptagfldCurr( m_ptagfldMic ),
m_ptagflditerator( new( m_rgbTagfldIteratorBuf ) TAGFLD_ITERATOR_INVALID )
{
}
TAGFIELDS_ITERATOR::~TAGFIELDS_ITERATOR()
{
}
#ifdef DEBUG
VOID TAGFIELDS_ITERATOR::AssertValid() const
{
Assert( m_ptagflditerator == (TAGFLD_ITERATOR *)m_rgbTagfldIteratorBuf );
Assert( m_ptagfldCurr >= m_ptagfldMic );
Assert( m_ptagfldCurr <= m_ptagfldMax );
}
#endif
VOID TAGFIELDS_ITERATOR::MoveBeforeFirst()
{
m_ptagfldCurr = m_ptagfldMic;
new( m_rgbTagfldIteratorBuf ) TAGFLD_ITERATOR_INVALID;
}
VOID TAGFIELDS_ITERATOR::MoveAfterLast()
{
m_ptagfldCurr = m_ptagfldMax;
new( m_rgbTagfldIteratorBuf ) TAGFLD_ITERATOR_INVALID;
}
ERR TAGFIELDS_ITERATOR::ErrMovePrev()
{
if( --m_ptagfldCurr <= m_ptagfldMic )
{
MoveBeforeFirst();
return ErrERRCheck( JET_errNoCurrentRecord );
}
CreateTagfldIterator_();
return JET_errSuccess;
}
ERR TAGFIELDS_ITERATOR::ErrMoveNext()
{
if( ++m_ptagfldCurr >= m_ptagfldMax )
{
MoveAfterLast();
return ErrERRCheck( JET_errNoCurrentRecord );
}
CreateTagfldIterator_();
return JET_errSuccess;
}
FID TAGFIELDS_ITERATOR::Fid() const
{
if( m_ptagfldCurr >= m_ptagfldMax || m_ptagfldCurr <= m_ptagfldMic )
{
Assert( fFalse );
return 0;
}
return m_ptagfldCurr->Fid();
}
COLUMNID TAGFIELDS_ITERATOR::Columnid( const TDB * const ptdb ) const
{
if( m_ptagfldCurr >= m_ptagfldMax || m_ptagfldCurr <= m_ptagfldMic )
{
Assert( fFalse );
return 0;
}
return m_ptagfldCurr->Columnid( ptdb );
}
BOOL TAGFIELDS_ITERATOR::FTemplateColumn( const TDB * const ptdb ) const
{
if( m_ptagfldCurr >= m_ptagfldMax || m_ptagfldCurr <= m_ptagfldMic )
{
Assert( fFalse );
return 0;
}
return m_ptagfldCurr->FTemplateColumn( ptdb );
}
BOOL TAGFIELDS_ITERATOR::FNull() const
{
if( m_ptagfldCurr >= m_ptagfldMax || m_ptagfldCurr <= m_ptagfldMic )
{
Assert( fFalse );
return fFalse;
}
return m_ptagfldCurr->FNull( &m_tagfields );
}
BOOL TAGFIELDS_ITERATOR::FDerived() const
{
if( m_ptagfldCurr >= m_ptagfldMax || m_ptagfldCurr <= m_ptagfldMic )
{
Assert( fFalse );
return fFalse;
}
return m_ptagfldCurr->FDerived();
}
BOOL TAGFIELDS_ITERATOR::FLV() const
{
if( m_ptagfldCurr >= m_ptagfldMax || m_ptagfldCurr <= m_ptagfldMic )
{
Assert( fFalse );
return fFalse;
}
if( !m_ptagfldCurr->FExtendedInfo() )
{
return fFalse;
}
const BYTE * const pbData = m_tagfields.PbTaggedColumns() + m_ptagfldCurr->Ib();
const BYTE bExtendedInfo = *pbData;
return bExtendedInfo & TAGFLD_HEADER::fLongValue;
}
TAGFLD_ITERATOR& TAGFIELDS_ITERATOR::TagfldIterator()
{
return *m_ptagflditerator;
}
const TAGFLD_ITERATOR& TAGFIELDS_ITERATOR::TagfldIterator() const
{
return *m_ptagflditerator;
}
VOID TAGFIELDS_ITERATOR::CreateTagfldIterator_()
{
Assert( m_ptagfldCurr > m_ptagfldMic );
Assert( m_ptagfldCurr < m_ptagfldMax );
if( m_ptagfldCurr->FNull( &m_tagfields ) )
{
Assert( sizeof( m_rgbTagfldIteratorBuf ) >= sizeof( TAGFLD_ITERATOR_NULLVALUE ) );
new( m_rgbTagfldIteratorBuf ) TAGFLD_ITERATOR_NULLVALUE;
}
else
{
DATA data;
const BYTE * const pbData = m_tagfields.PbTaggedColumns() + m_ptagfldCurr->Ib();
const SIZE_T cbData = m_tagfields.CbData( ULONG( m_ptagfldCurr - m_ptagfldMic - 1 ) );
data.SetPv( const_cast<BYTE *>( pbData ) );
data.SetCb( cbData );
if( !m_ptagfldCurr->FExtendedInfo() )
{
// ordinary value
Assert( sizeof( m_rgbTagfldIteratorBuf ) >= sizeof( TAGFLD_ITERATOR_SINGLEVALUE ) );
new( m_rgbTagfldIteratorBuf ) TAGFLD_ITERATOR_SINGLEVALUE( data, fFalse, fFalse, fFalse );
}
else
{
const BYTE bExtendedInfo = *pbData;
if( bExtendedInfo & TAGFLD_HEADER::fTwoValues )
{
Assert( sizeof( m_rgbTagfldIteratorBuf ) >= sizeof( TAGFLD_ITERATOR_TWOVALUES ) );
new( m_rgbTagfldIteratorBuf ) TAGFLD_ITERATOR_TWOVALUES( data );
}
else if( bExtendedInfo & TAGFLD_HEADER::fMultiValues )
{
Assert( sizeof( m_rgbTagfldIteratorBuf ) >= sizeof( TAGFLD_ITERATOR_MULTIVALUES ) );
new( m_rgbTagfldIteratorBuf ) TAGFLD_ITERATOR_MULTIVALUES( data, bExtendedInfo & TAGFLD_HEADER::fCompressed );
}
else
{
// ordinary column with header byte. skip the header byte
const BOOL fSeparated = bExtendedInfo & TAGFLD_HEADER::fSeparated;
const BOOL fCompressed = bExtendedInfo & TAGFLD_HEADER::fCompressed;
const BOOL fEncrypted = bExtendedInfo & TAGFLD_HEADER::fEncrypted;
data.DeltaPv( sizeof( TAGFLD_HEADER ) );
data.DeltaCb( 0 - INT( sizeof( TAGFLD_HEADER ) ) );
Assert( sizeof( m_rgbTagfldIteratorBuf ) >= sizeof( TAGFLD_ITERATOR_SINGLEVALUE ) );
new( m_rgbTagfldIteratorBuf ) TAGFLD_ITERATOR_SINGLEVALUE( data, fSeparated, fCompressed, fEncrypted );
}
}
}
}
| 84,605 |
713 | package org.infinispan.rest.framework.impl;
/**
* Path item defined by an expression. The expression supports constant chars plus variable names enclosed by
* '{' and '}'.
*
* A path containing {@link VariablePathItem} can match multiple paths at runtime. Examples:
*
* /rest/{variable} can match /rest/a, /rest/b and so on
* /rest/{var1}_{var2} can match /rest/a_b, rest/var1_var2 but not /rest/path.
*
* @since 10.0
*/
class VariablePathItem extends PathItem {
private final String expression;
private final String normalized;
VariablePathItem(String expression) {
this.expression = expression;
this.normalized = normalize(expression);
}
String getExpression() {
return expression;
}
private static String normalize(String expression) {
if (expression == null) return null;
StringBuilder builder = new StringBuilder();
boolean variable = false;
for (char c : expression.toCharArray()) {
if (!variable) builder.append(c);
if (c == '{') variable = true;
if (c == '}' && variable) {
variable = false;
builder.append("}");
}
}
return builder.toString();
}
@Override
public String toString() {
return expression;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
VariablePathItem that = (VariablePathItem) o;
return normalized.equals(that.normalized);
}
@Override
public int hashCode() {
return normalized.hashCode();
}
@Override
public String getPath() {
return expression;
}
}
| 606 |
13,663 | /*
* dummy.c, a fake hash algorithm, just to test integration capabilities.
* Part of the xxHash project
* Copyright (C) 2020 <NAME>
*
* GPL v2 License
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* You can contact the author at:
* - xxHash homepage: https://www.xxhash.com
* - xxHash source repository: https://github.com/Cyan4973/xxHash
*/
#include <dummy.h>
unsigned badsum32(const void* input, size_t len, unsigned seed)
{
unsigned sum = seed;
const unsigned char* in8 = input;
size_t c;
for (c=0; c<len; c++)
sum += in8[c];
return sum;
}
| 374 |
655 | <reponame>linuzri/samourai-wallet-android<filename>app/src/main/java/com/samourai/codescanner/CodeScannerView.java
/*
* MIT License
*
* Copyright (c) 2017 <NAME> [<EMAIL>]
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.samourai.codescanner;
import android.annotation.SuppressLint;
import android.content.Context;
import android.content.res.TypedArray;
import android.graphics.Color;
import android.os.Build;
import android.util.AttributeSet;
import android.view.MotionEvent;
import android.view.SurfaceView;
import android.view.View;
import android.view.ViewGroup;
import android.widget.ImageView;
import com.samourai.wallet.R;
import androidx.annotation.AttrRes;
import androidx.annotation.ColorInt;
import androidx.annotation.FloatRange;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import androidx.annotation.Px;
import androidx.annotation.RequiresApi;
import androidx.annotation.StyleRes;
/**
* A view to display code scanner preview
*
* @see CodeScanner
*/
public final class CodeScannerView extends ViewGroup {
private static final boolean DEFAULT_AUTO_FOCUS_BUTTON_VISIBLE = true;
private static final boolean DEFAULT_FLASH_BUTTON_VISIBLE = true;
private static final int DEFAULT_AUTO_FOCUS_BUTTON_VISIBILITY = VISIBLE;
private static final int DEFAULT_FLASH_BUTTON_VISIBILITY = VISIBLE;
private static final int DEFAULT_MASK_COLOR = 0x77000000;
private static final int DEFAULT_FRAME_COLOR = Color.WHITE;
private static final int DEFAULT_AUTO_FOCUS_BUTTON_COLOR = Color.WHITE;
private static final int DEFAULT_FLASH_BUTTON_COLOR = Color.WHITE;
private static final float DEFAULT_FRAME_THICKNESS_DP = 2f;
private static final float DEFAULT_FRAME_ASPECT_RATIO_WIDTH = 1f;
private static final float DEFAULT_FRAME_ASPECT_RATIO_HEIGHT = 1f;
private static final float DEFAULT_FRAME_CORNER_SIZE_DP = 50f;
private static final float DEFAULT_FRAME_CORNERS_RADIUS_DP = 0f;
private static final float DEFAULT_FRAME_SIZE = 0.75f;
private static final float BUTTON_SIZE_DP = 56f;
private static final float FOCUS_AREA_SIZE_DP = 20f;
private SurfaceView mPreviewView;
private ViewFinderView mViewFinderView;
private ImageView mAutoFocusButton;
private ImageView mFlashButton;
private Point mPreviewSize;
private SizeListener mSizeListener;
private CodeScanner mCodeScanner;
private int mButtonSize;
private int mAutoFocusButtonColor;
private int mFlashButtonColor;
private int mFocusAreaSize;
/**
* A view to display code scanner preview
*
* @see CodeScanner
*/
public CodeScannerView(@NonNull final Context context) {
super(context);
initialize(context, null, 0, 0);
}
/**
* A view to display code scanner preview
*
* @see CodeScanner
*/
public CodeScannerView(@NonNull final Context context, @Nullable final AttributeSet attrs) {
super(context, attrs);
initialize(context, attrs, 0, 0);
}
/**
* A view to display code scanner preview
*
* @see CodeScanner
*/
public CodeScannerView(@NonNull final Context context, @Nullable final AttributeSet attrs,
@AttrRes final int defStyleAttr) {
super(context, attrs, defStyleAttr);
initialize(context, attrs, defStyleAttr, 0);
}
/**
* A view to display code scanner preview
*
* @see CodeScanner
*/
@RequiresApi(Build.VERSION_CODES.LOLLIPOP)
public CodeScannerView(final Context context, final AttributeSet attrs,
@AttrRes final int defStyleAttr, @StyleRes final int defStyleRes) {
super(context, attrs, defStyleAttr, defStyleRes);
initialize(context, attrs, defStyleAttr, defStyleRes);
}
private void initialize(@NonNull final Context context, @Nullable final AttributeSet attrs,
@AttrRes final int defStyleAttr, @StyleRes final int defStyleRes) {
mPreviewView = new SurfaceView(context);
mPreviewView.setLayoutParams(
new LayoutParams(LayoutParams.MATCH_PARENT, LayoutParams.MATCH_PARENT));
mViewFinderView = new ViewFinderView(context);
mViewFinderView.setLayoutParams(
new LayoutParams(LayoutParams.MATCH_PARENT, LayoutParams.MATCH_PARENT));
final float density = context.getResources().getDisplayMetrics().density;
mButtonSize = Math.round(density * BUTTON_SIZE_DP);
mFocusAreaSize = Math.round(density * FOCUS_AREA_SIZE_DP);
mAutoFocusButton = new ImageView(context);
mAutoFocusButton.setLayoutParams(new LayoutParams(mButtonSize, 400));
mAutoFocusButton.setScaleType(ImageView.ScaleType.CENTER);
mAutoFocusButton.setImageResource(R.drawable.ic_code_scanner_auto_focus_on);
mAutoFocusButton.setOnClickListener(new AutoFocusClickListener());
mFlashButton = new ImageView(context);
mFlashButton.setLayoutParams(new LayoutParams(mButtonSize, mButtonSize) );
mFlashButton.setScaleType(ImageView.ScaleType.CENTER);
mFlashButton.setImageResource(R.drawable.ic_code_scanner_flash_on);
mFlashButton.setOnClickListener(new FlashClickListener());
if (attrs == null) {
mViewFinderView.setFrameAspectRatio(DEFAULT_FRAME_ASPECT_RATIO_WIDTH,
DEFAULT_FRAME_ASPECT_RATIO_HEIGHT);
mViewFinderView.setMaskColor(DEFAULT_MASK_COLOR);
mViewFinderView.setFrameColor(DEFAULT_FRAME_COLOR);
mViewFinderView.setFrameThickness(Math.round(DEFAULT_FRAME_THICKNESS_DP * density));
mViewFinderView.setFrameCornersSize(Math.round(DEFAULT_FRAME_CORNER_SIZE_DP * density));
mViewFinderView
.setFrameCornersRadius(Math.round(DEFAULT_FRAME_CORNERS_RADIUS_DP * density));
mViewFinderView.setFrameSize(DEFAULT_FRAME_SIZE);
mAutoFocusButton.setColorFilter(DEFAULT_AUTO_FOCUS_BUTTON_COLOR);
mFlashButton.setColorFilter(DEFAULT_FLASH_BUTTON_COLOR);
mAutoFocusButton.setVisibility(DEFAULT_AUTO_FOCUS_BUTTON_VISIBILITY);
mFlashButton.setVisibility(DEFAULT_FLASH_BUTTON_VISIBILITY);
} else {
TypedArray a = null;
try {
a = context.getTheme()
.obtainStyledAttributes(attrs, R.styleable.CodeScannerView, defStyleAttr,
defStyleRes);
setMaskColor(a.getColor(R.styleable.CodeScannerView_maskColor, DEFAULT_MASK_COLOR));
setFrameColor(
a.getColor(R.styleable.CodeScannerView_frameColor, DEFAULT_FRAME_COLOR));
setFrameThickness(
a.getDimensionPixelOffset(R.styleable.CodeScannerView_frameThickness,
Math.round(DEFAULT_FRAME_THICKNESS_DP * density)));
setFrameCornersSize(
a.getDimensionPixelOffset(R.styleable.CodeScannerView_frameCornersSize,
Math.round(DEFAULT_FRAME_CORNER_SIZE_DP * density)));
setFrameCornersRadius(
a.getDimensionPixelOffset(R.styleable.CodeScannerView_frameCornersRadius,
Math.round(DEFAULT_FRAME_CORNERS_RADIUS_DP * density)));
setFrameAspectRatio(a.getFloat(R.styleable.CodeScannerView_frameAspectRatioWidth,
DEFAULT_FRAME_ASPECT_RATIO_WIDTH),
a.getFloat(R.styleable.CodeScannerView_frameAspectRatioHeight,
DEFAULT_FRAME_ASPECT_RATIO_HEIGHT));
setFrameSize(a.getFloat(R.styleable.CodeScannerView_frameSize, DEFAULT_FRAME_SIZE));
setAutoFocusButtonVisible(
a.getBoolean(R.styleable.CodeScannerView_autoFocusButtonVisible,
DEFAULT_AUTO_FOCUS_BUTTON_VISIBLE));
setFlashButtonVisible(a.getBoolean(R.styleable.CodeScannerView_flashButtonVisible,
DEFAULT_FLASH_BUTTON_VISIBLE));
setAutoFocusButtonColor(a.getColor(R.styleable.CodeScannerView_autoFocusButtonColor,
DEFAULT_AUTO_FOCUS_BUTTON_COLOR));
setFlashButtonColor(a.getColor(R.styleable.CodeScannerView_flashButtonColor,
DEFAULT_FLASH_BUTTON_COLOR));
} finally {
if (a != null) {
a.recycle();
}
}
}
addView(mPreviewView);
addView(mViewFinderView);
addView(mAutoFocusButton);
addView(mFlashButton);
// enable autofocus by default
final CodeScanner scanner = mCodeScanner;
if (scanner == null || !scanner.isAutoFocusSupportedOrUnknown()) {
return;
}
final boolean enabled = !scanner.isAutoFocusEnabled();
scanner.setAutoFocusEnabled(enabled);
setAutoFocusEnabled(enabled);
}
@Override
protected void onLayout(final boolean changed, final int left, final int top, final int right,
final int bottom) {
performLayout(right - left, bottom - top);
}
@Override
protected void onSizeChanged(final int width, final int height, final int oldWidth,
final int oldHeight) {
performLayout(width, height);
final SizeListener listener = mSizeListener;
if (listener != null) {
listener.onSizeChanged(width, height);
}
}
@Override
@SuppressLint("ClickableViewAccessibility")
public boolean onTouchEvent(@NonNull final MotionEvent event) {
final CodeScanner codeScanner = mCodeScanner;
final Rect frameRect = getFrameRect();
final int x = (int) event.getX();
final int y = (int) event.getY();
if (codeScanner != null && frameRect != null &&
codeScanner.isAutoFocusSupportedOrUnknown() && codeScanner.isTouchFocusEnabled() &&
event.getAction() == MotionEvent.ACTION_DOWN && frameRect.isPointInside(x, y)) {
final int areaSize = mFocusAreaSize;
codeScanner.performTouchFocus(
new Rect(x - areaSize, y - areaSize, x + areaSize, y + areaSize)
.fitIn(frameRect));
}
return super.onTouchEvent(event);
}
/**
* Get current mask color
*
* @see #setMaskColor
*/
@ColorInt
public int getMaskColor() {
return mViewFinderView.getMaskColor();
}
/**
* Set color of the space outside of the framing rect
*
* @param color Mask color
*/
public void setMaskColor(@ColorInt final int color) {
mViewFinderView.setMaskColor(color);
}
/**
* Get current frame color
*
* @see #setFrameColor
*/
@ColorInt
public int getFrameColor() {
return mViewFinderView.getFrameColor();
}
/**
* Set color of the frame
*
* @param color Frame color
*/
public void setFrameColor(@ColorInt final int color) {
mViewFinderView.setFrameColor(color);
}
/**
* Get current frame thickness
*
* @see #setFrameThickness
*/
@Px
public int getFrameThickness() {
return mViewFinderView.getFrameThickness();
}
/**
* Set frame thickness
*
* @param thickness Frame thickness in pixels
*/
public void setFrameThickness(@Px final int thickness) {
if (thickness < 0) {
throw new IllegalArgumentException("Frame thickness can't be negative");
}
mViewFinderView.setFrameThickness(thickness);
}
/**
* Get current frame corners size
*
* @see #setFrameCornersSize
*/
@Px
public int getFrameCornersSize() {
return mViewFinderView.getFrameCornersSize();
}
/**
* Set size of the frame corners
*
* @param size Size in pixels
*/
public void setFrameCornersSize(@Px final int size) {
if (size < 0) {
throw new IllegalArgumentException("Frame corners size can't be negative");
}
mViewFinderView.setFrameCornersSize(size);
}
/**
* Get current frame corners radius
*
* @see #setFrameCornersRadius
*/
@Px
public int getFrameCornersRadius() {
return mViewFinderView.getFrameCornersRadius();
}
/**
* Set current frame corners radius
*
* @param radius Frame corners radius in pixels
*/
public void setFrameCornersRadius(@Px final int radius) {
if (radius < 0) {
throw new IllegalArgumentException("Frame corners radius can't be negative");
}
mViewFinderView.setFrameCornersRadius(radius);
}
/**
* Get current frame size
*
* @see #setFrameSize
*/
@FloatRange(from = 0.1, to = 1.0)
public float getFrameSize() {
return mViewFinderView.getFrameSize();
}
/**
* Set relative frame size where 1.0 means full size
*
* @param size Relative frame size between 0.1 and 1.0
*/
public void setFrameSize(@FloatRange(from = 0.1, to = 1) final float size) {
if (size < 0.1 || size > 1) {
throw new IllegalArgumentException(
"Max frame size value should be between 0.1 and 1, inclusive");
}
mViewFinderView.setFrameSize(size);
}
/**
* Get current frame aspect ratio width
*
* @see #setFrameAspectRatioWidth
* @see #setFrameAspectRatio
*/
@FloatRange(from = 0, fromInclusive = false)
public float getFrameAspectRatioWidth() {
return mViewFinderView.getFrameAspectRatioWidth();
}
/**
* Set frame aspect ratio width
*
* @param ratioWidth Frame aspect ratio width
* @see #setFrameAspectRatio
*/
public void setFrameAspectRatioWidth(
@FloatRange(from = 0, fromInclusive = false) final float ratioWidth) {
if (ratioWidth <= 0) {
throw new IllegalArgumentException(
"Frame aspect ratio values should be greater than zero");
}
mViewFinderView.setFrameAspectRatioWidth(ratioWidth);
}
/**
* Get current frame aspect ratio height
*
* @see #setFrameAspectRatioHeight
* @see #setFrameAspectRatio
*/
@FloatRange(from = 0, fromInclusive = false)
public float getFrameAspectRatioHeight() {
return mViewFinderView.getFrameAspectRatioHeight();
}
/**
* Set frame aspect ratio height
*
* @param ratioHeight Frame aspect ratio width
* @see #setFrameAspectRatio
*/
public void setFrameAspectRatioHeight(
@FloatRange(from = 0, fromInclusive = false) final float ratioHeight) {
if (ratioHeight <= 0) {
throw new IllegalArgumentException(
"Frame aspect ratio values should be greater than zero");
}
mViewFinderView.setFrameAspectRatioHeight(ratioHeight);
}
/**
* Set frame aspect ratio (ex. 1:1, 15:10, 16:9, 4:3)
*
* @param ratioWidth Frame aspect ratio width
* @param ratioHeight Frame aspect ratio height
*/
public void setFrameAspectRatio(
@FloatRange(from = 0, fromInclusive = false) final float ratioWidth,
@FloatRange(from = 0, fromInclusive = false) final float ratioHeight) {
if (ratioWidth <= 0 || ratioHeight <= 0) {
throw new IllegalArgumentException(
"Frame aspect ratio values should be greater than zero");
}
mViewFinderView.setFrameAspectRatio(ratioWidth, ratioHeight);
}
/**
* Whether if auto focus button is currently visible
*
* @see #setAutoFocusButtonVisible
*/
public boolean isAutoFocusButtonVisible() {
return mAutoFocusButton.getVisibility() == VISIBLE;
}
/**
* Set whether auto focus button is visible or not
*
* @param visible Visibility
*/
public void setAutoFocusButtonVisible(final boolean visible) {
mAutoFocusButton.setVisibility(visible ? VISIBLE : INVISIBLE);
}
/**
* Whether if flash button is currently visible
*
* @see #setFlashButtonVisible
*/
public boolean isFlashButtonVisible() {
return mFlashButton.getVisibility() == VISIBLE;
}
/**
* Set whether flash button is visible or not
*
* @param visible Visibility
*/
public void setFlashButtonVisible(final boolean visible) {
mFlashButton.setVisibility(visible ? VISIBLE : INVISIBLE);
}
/**
* Get current auto focus button color
*
* @see #setAutoFocusButtonColor
*/
@ColorInt
public int getAutoFocusButtonColor() {
return mAutoFocusButtonColor;
}
/**
* Set auto focus button color
*
* @param color Color
*/
public void setAutoFocusButtonColor(@ColorInt final int color) {
mAutoFocusButtonColor = color;
mAutoFocusButton.setColorFilter(color);
}
/**
* Get current flash button color
*
* @see #setFlashButtonColor
*/
@ColorInt
public int getFlashButtonColor() {
return mFlashButtonColor;
}
/**
* Set flash button color
*
* @param color Color
*/
public void setFlashButtonColor(@ColorInt final int color) {
mFlashButtonColor = color;
mFlashButton.setColorFilter(color);
}
@NonNull
SurfaceView getPreviewView() {
return mPreviewView;
}
@NonNull
ViewFinderView getViewFinderView() {
return mViewFinderView;
}
@Nullable
Rect getFrameRect() {
return mViewFinderView.getFrameRect();
}
void setPreviewSize(@Nullable final Point previewSize) {
mPreviewSize = previewSize;
requestLayout();
}
void setSizeListener(@Nullable final SizeListener sizeListener) {
mSizeListener = sizeListener;
}
void setCodeScanner(@NonNull final CodeScanner codeScanner) {
if (mCodeScanner != null) {
throw new IllegalStateException("Code scanner has already been set");
}
mCodeScanner = codeScanner;
setAutoFocusEnabled(codeScanner.isAutoFocusEnabled());
setFlashEnabled(codeScanner.isFlashEnabled());
}
void setAutoFocusEnabled(final boolean enabled) {
mAutoFocusButton.setImageResource(enabled ? R.drawable.ic_code_scanner_auto_focus_on :
R.drawable.ic_code_scanner_auto_focus_off);
}
void setFlashEnabled(final boolean enabled) {
mFlashButton.setImageResource(enabled ? R.drawable.ic_code_scanner_flash_on :
R.drawable.ic_code_scanner_flash_off);
}
private void performLayout(final int width, final int height) {
final Point previewSize = mPreviewSize;
if (previewSize == null) {
mPreviewView.layout(0, 0, width, height);
} else {
int frameLeft = 0;
int frameTop = 0;
int frameRight = width;
int frameBottom = height;
final int previewWidth = previewSize.getX();
if (previewWidth > width) {
final int d = (previewWidth - width) / 2;
frameLeft -= d;
frameRight += d;
}
final int previewHeight = previewSize.getY();
if (previewHeight > height) {
final int d = (previewHeight - height) / 2;
frameTop -= d;
frameBottom += d;
}
mPreviewView.layout(frameLeft, frameTop, frameRight, frameBottom);
}
mViewFinderView.layout(0, 0, width, height);
final int buttonSize = mButtonSize;
mAutoFocusButton.layout(0, 0, buttonSize, buttonSize);
mFlashButton.layout(width - buttonSize, 0, width, buttonSize);
}
interface SizeListener {
void onSizeChanged(int width, int height);
}
private final class AutoFocusClickListener implements OnClickListener {
@Override
public void onClick(final View view) {
final CodeScanner scanner = mCodeScanner;
if (scanner == null || !scanner.isAutoFocusSupportedOrUnknown()) {
return;
}
final boolean enabled = !scanner.isAutoFocusEnabled();
scanner.setAutoFocusEnabled(enabled);
setAutoFocusEnabled(enabled);
}
}
private final class FlashClickListener implements OnClickListener {
@Override
public void onClick(final View view) {
final CodeScanner scanner = mCodeScanner;
if (scanner == null || !scanner.isFlashSupportedOrUnknown()) {
return;
}
final boolean enabled = !scanner.isFlashEnabled();
scanner.setFlashEnabled(enabled);
setFlashEnabled(enabled);
}
}
}
| 9,216 |
1,144 | package de.metas.handlingunits.picking;
import javax.annotation.Nullable;
import de.metas.handlingunits.HuId;
import org.eevolution.api.PPOrderId;
import lombok.Builder;
import lombok.NonNull;
import lombok.Value;
/*
* #%L
* de.metas.handlingunits.base
* %%
* Copyright (C) 2019 metas GmbH
* %%
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as
* published by the Free Software Foundation, either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/gpl-2.0.html>.
* #L%
*/
@Value
@Builder
public class PickFrom
{
public static PickFrom ofHuId(@NonNull final HuId huId)
{
return builder().huId(huId).build();
}
public static PickFrom ofPickingOrderId(@NonNull final PPOrderId pickingOrderId)
{
return builder().pickingOrderId(pickingOrderId).build();
}
@Nullable
HuId huId;
@Nullable
PPOrderId pickingOrderId;
@Builder
private PickFrom(
@Nullable HuId huId,
@Nullable PPOrderId pickingOrderId)
{
this.pickingOrderId = pickingOrderId;
this.huId = huId;
}
public boolean isPickFromHU()
{
return getHuId() != null;
}
public boolean isPickFromPickingOrder()
{
return getPickingOrderId() != null;
}
}
| 549 |
620 | /*
* Copyright (C) 2009-2011 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.parboiled;
/**
* Interface that can be implemented by classes containing action methods.
* If the class containing action methods implements this interface parboiled will use it to inform the
* instance of the current context, immediately before an action call.
*/
public interface ContextAware<V> {
/**
* Called immediately before any parser action method invocation. Informs the object containing the
* action about the context to be used for the coming action call.
*
* @param context the context
*/
void setContext(Context<V> context);
}
| 310 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.