max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
11,356 | #!/usr/bin/python
# Copyright (C) <NAME> 2018
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Tests the check-has-flag rule
import BoostBuild
t = BoostBuild.Tester(use_test_config=False)
# We need an object file before we can run the actual test.
t.write('input.cpp', 'void f() {}\n')
t.write('Jamroot.jam', 'obj input : input.cpp ;')
t.run_build_system()
linker_input = t.glob_file('bin/$toolset/debug*/input.obj')
# Check every possible result of pass or fail.
t.write('Jamroot.jam', '''
import flags ;
import modules ;
OBJECT_FILE = [ modules.peek : OBJECT_FILE ] ;
obj fail_cpp : test.cpp : [ check-has-flag <cxxflags>--illegal-flag-cpp
: <define>ERROR : <define>OK ] ;
obj pass_cpp : test.cpp : [ check-has-flag <cxxflags>-DMACRO_CPP
: <define>OK : <define>ERROR ] ;
obj fail_c : test.cpp : [ check-has-flag <cflags>--illegal-flag-c
: <define>ERROR : <define>OK ] ;
obj pass_c : test.cpp : [ check-has-flag <cflags>-DMACRO_C
: <define>OK : <define>ERROR ] ;
obj fail_link : test.cpp : [ check-has-flag <linkflags>--illegal-flag-link
: <define>ERROR : <define>OK ] ;
# The only thing that we can be certain the linker
# will accept is the name of an object file.
obj pass_link : test.cpp : [ check-has-flag <linkflags>$(OBJECT_FILE)
: <define>OK : <define>ERROR ] ;
''')
t.write('test.cpp', '''
#ifdef ERROR
#error ERROR defined
#endif
#ifndef OK
#error ERROR not defined
#endif
''')
# Don't check the status immediately, so that we have a chance
# to print config.log. Also, we need a minimum of d2 to make
# sure that we always see the commands and output.
t.run_build_system(['-sOBJECT_FILE=' + linker_input, '-d2'], status=None)
if t.status != 0:
log_file = t.read('bin/config.log')
BoostBuild.annotation("config.log", log_file)
t.fail_test(True)
t.expect_output_lines([' - has --illegal-flag-cpp : no',
' - has -DMACRO_CPP : yes',
' - has --illegal-flag-c : no',
' - has -DMACRO_C : yes',
' - has --illegal-flag-link : no',
' - has *bin*/input.* : yes'])
t.expect_addition('bin/$toolset/debug*/fail_cpp.obj')
t.expect_addition('bin/$toolset/debug*/pass_cpp.obj')
t.expect_addition('bin/$toolset/debug*/fail_c.obj')
t.expect_addition('bin/$toolset/debug*/pass_c.obj')
t.expect_addition('bin/$toolset/debug*/fail_link.obj')
t.expect_addition('bin/$toolset/debug*/pass_link.obj')
t.cleanup()
| 1,230 |
904 | <reponame>BurAndBY/pkgj
#pragma once
#include <functional>
#include <memory>
#include <stdexcept>
#include <string>
#include <cstdint>
#include "http.hpp"
class FileDownload
{
public:
std::function<void(uint64_t download_offset, uint64_t download_size)>
update_progress_cb;
std::function<bool()> is_canceled;
FileDownload(std::unique_ptr<Http> http);
void download(
const std::string& partition,
const std::string& titleid,
const std::string& url);
private:
std::string root;
std::unique_ptr<Http> _http;
uint64_t download_size;
uint64_t download_offset;
std::string download_url;
void* item_file;
void update_progress();
void start_download();
void download_data(uint32_t size);
void download_file();
};
| 334 |
892 | {
"schema_version": "1.2.0",
"id": "GHSA-g5mc-2cpg-f638",
"modified": "2022-04-05T00:00:45Z",
"published": "2022-03-30T00:00:33Z",
"aliases": [
"CVE-2021-43099"
],
"details": "An Archive Extraction (AKA \"Zip Slip) vulnerability exists in bbs 5.3 in the UpgradeNow function in UpgradeManageAction.java, which unzips the arbitrary upladed zip file without checking filenames. The vulnerability is exploited using a specially crafted archive that holds directory traversal filenames (e.g. ../../evil.exe).",
"severity": [
{
"type": "CVSS_V3",
"score": "CVSS:3.1/AV:N/AC:L/PR:H/UI:N/S:U/C:N/I:H/A:N"
}
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2021-43099"
},
{
"type": "WEB",
"url": "https://github.com/diyhi/bbs/issues/51"
}
],
"database_specific": {
"cwe_ids": [
"CWE-22"
],
"severity": "MODERATE",
"github_reviewed": false
}
} | 460 |
605 | <reponame>LaudateCorpus1/llvm-project<gh_stars>100-1000
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// <vector>
// UNSUPPORTED: c++03, c++11, c++14
// template <class InputIterator, class Allocator = allocator<typename iterator_traits<InputIterator>::value_type>>
// vector(InputIterator, InputIterator, Allocator = Allocator())
// -> vector<typename iterator_traits<InputIterator>::value_type, Allocator>;
//
#include <vector>
#include <cassert>
#include <cstddef>
#include <climits> // INT_MAX
#include <iterator>
#include <type_traits>
#include "deduction_guides_sfinae_checks.h"
#include "test_macros.h"
#include "test_iterators.h"
#include "test_allocator.h"
struct A {};
int main(int, char**)
{
// Test the explicit deduction guides
{
const int arr[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
std::vector vec(std::begin(arr), std::end(arr));
static_assert(std::is_same_v<decltype(vec), std::vector<int>>, "");
assert(std::equal(vec.begin(), vec.end(), std::begin(arr), std::end(arr)));
}
{
const long arr[] = {INT_MAX, 1L, 2L, 3L };
std::vector vec(std::begin(arr), std::end(arr), std::allocator<long>());
static_assert(std::is_same_v<decltype(vec)::value_type, long>, "");
assert(vec.size() == 4);
assert(vec[0] == INT_MAX);
assert(vec[1] == 1L);
assert(vec[2] == 2L);
}
// Test the implicit deduction guides
{
// We don't expect this one to work.
// std::vector vec(std::allocator<int>()); // vector (allocator &)
}
{
std::vector vec(1, A{}); // vector (size_type, T)
static_assert(std::is_same_v<decltype(vec)::value_type, A>, "");
static_assert(std::is_same_v<decltype(vec)::allocator_type, std::allocator<A>>, "");
assert(vec.size() == 1);
}
{
std::vector vec(1, A{}, test_allocator<A>()); // vector (size_type, T, allocator)
static_assert(std::is_same_v<decltype(vec)::value_type, A>, "");
static_assert(std::is_same_v<decltype(vec)::allocator_type, test_allocator<A>>, "");
assert(vec.size() == 1);
}
{
std::vector vec{1U, 2U, 3U, 4U, 5U}; // vector(initializer-list)
static_assert(std::is_same_v<decltype(vec)::value_type, unsigned>, "");
assert(vec.size() == 5);
assert(vec[2] == 3U);
}
{
std::vector vec({1.0, 2.0, 3.0, 4.0}, test_allocator<double>()); // vector(initializer-list, allocator)
static_assert(std::is_same_v<decltype(vec)::value_type, double>, "");
static_assert(std::is_same_v<decltype(vec)::allocator_type, test_allocator<double>>, "");
assert(vec.size() == 4);
assert(vec[3] == 4.0);
}
{
std::vector<long double> source;
std::vector vec(source); // vector(vector &)
static_assert(std::is_same_v<decltype(vec)::value_type, long double>, "");
static_assert(std::is_same_v<decltype(vec)::allocator_type, std::allocator<long double>>, "");
assert(vec.size() == 0);
}
// A couple of vector<bool> tests, too!
{
std::vector vec(3, true); // vector(initializer-list)
static_assert(std::is_same_v<decltype(vec)::value_type, bool>, "");
static_assert(std::is_same_v<decltype(vec)::allocator_type, std::allocator<bool>>, "");
assert(vec.size() == 3);
assert(vec[0] && vec[1] && vec[2]);
}
{
std::vector<bool> source;
std::vector vec(source); // vector(vector &)
static_assert(std::is_same_v<decltype(vec)::value_type, bool>, "");
static_assert(std::is_same_v<decltype(vec)::allocator_type, std::allocator<bool>>, "");
assert(vec.size() == 0);
}
{
typedef test_allocator<short> Alloc;
typedef test_allocator<int> ConvertibleToAlloc;
{
std::vector<short, Alloc> source;
std::vector vec(source, Alloc(2));
static_assert(std::is_same_v<decltype(vec), decltype(source)>);
}
{
std::vector<short, Alloc> source;
std::vector vec(source, ConvertibleToAlloc(2));
static_assert(std::is_same_v<decltype(vec), decltype(source)>);
}
{
std::vector<short, Alloc> source;
std::vector vec(std::move(source), Alloc(2));
static_assert(std::is_same_v<decltype(vec), decltype(source)>);
}
{
std::vector<short, Alloc> source;
std::vector vec(std::move(source), ConvertibleToAlloc(2));
static_assert(std::is_same_v<decltype(vec), decltype(source)>);
}
}
SequenceContainerDeductionGuidesSfinaeAway<std::vector, std::vector<int>>();
return 0;
}
| 2,001 |
890 | <filename>app/apiv2/__init__.py
from flask import Blueprint
import flask_restful
from flask.ext.restful.representations.json import output_json
output_json.func_globals['settings'] = {
'ensure_ascii': False,
'encoding': 'utf8'
}
apiv2 = Blueprint('apiv2', __name__, template_folder='templates')
apiv2_rest = flask_restful.Api(apiv2)
from . import routes
| 135 |
6,098 | from __future__ import print_function
import importlib, inspect, os, sys
import numpy as np
from sklearn.datasets import make_classification, make_regression
from sklearn.metrics import accuracy_score, r2_score
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
import h2o
from h2o.sklearn import H2OAutoMLEstimator, H2OAutoMLClassifier, H2OAutoMLRegressor
from h2o.sklearn.wrapper import H2OConnectionMonitorMixin
sys.path.insert(1, os.path.join("..",".."))
from tests import pyunit_utils, Namespace as ns
"""
This test suite creates sklearn pipelines using either a mix of sklearn+H2O components,
or only H2O components.
Then, it feeds them with H2O frames (more efficient and ensures compatibility with old API.)
or with numpy arrays to provide the simplest approach for users wanting to use H2O like any sklearn estimator.
"""
seed = 2019
init_connection_args = dict(strict_version_check=False, show_progress=True)
max_models = 3
scores = {}
def _get_data(format='numpy', n_classes=2):
generator = make_classification if n_classes > 0 else make_regression
params = dict(n_samples=100, n_features=5, n_informative=n_classes or 2, random_state=seed)
if generator is make_classification:
params.update(n_classes=n_classes)
X, y = generator(**params)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=seed)
data = ns(X_train=X_train, X_test=X_test, y_train=y_train, y_test=y_test)
if format == 'h2o':
for k, v in data.__dict__.items():
setattr(data, k, h2o.H2OFrame(v))
return data
def test_binomial_classification_with_h2o_frames():
pipeline = make_pipeline(H2OAutoMLClassifier(seed=seed))
pipeline.set_params(
h2oautomlclassifier__max_models=max_models,
h2oautomlclassifier__nfolds=3
)
pipeline.named_steps.h2oautomlclassifier.exclude_algos = ['XGBoost']
data = _get_data(format='h2o', n_classes=2)
assert isinstance(data.X_train, h2o.H2OFrame)
pipeline.fit(data.X_train, data.y_train)
assert len(pipeline.named_steps.h2oautomlclassifier.estimator.leaderboard) >= max_models + 1
preds = pipeline.predict(data.X_test)
assert isinstance(preds, h2o.H2OFrame)
assert preds.dim == [len(data.X_test), 1]
probs = pipeline.predict_proba(data.X_test)
assert probs.dim == [len(data.X_test), 2]
score = pipeline.score(data.X_test, data.y_test)
assert isinstance(score, float)
skl_score = accuracy_score(data.y_test.as_data_frame().values, preds.as_data_frame().values)
assert abs(score - skl_score) < 1e-6, "score={}, skl_score={}".format(score, skl_score)
def test_multinomial_classification_with_numpy_frames():
pipeline = make_pipeline(H2OAutoMLClassifier(seed=seed, init_connection_args=init_connection_args))
pipeline.set_params(
h2oautomlclassifier__max_models=max_models,
h2oautomlclassifier__nfolds=3
)
pipeline.named_steps.h2oautomlclassifier.exclude_algos = ['XGBoost']
data = _get_data(format='numpy', n_classes=3)
assert isinstance(data.X_train, np.ndarray)
pipeline.fit(data.X_train, data.y_train)
assert len(pipeline.named_steps.h2oautomlclassifier.estimator.leaderboard) >= max_models + 1
preds = pipeline.predict(data.X_test)
assert isinstance(preds, np.ndarray)
assert preds.shape == (len(data.X_test),)
probs = pipeline.predict_proba(data.X_test)
assert probs.shape == (len(data.X_test), 3)
assert np.allclose(np.sum(probs, axis=1), 1.), "`predict_proba` didn't return probabilities"
score = pipeline.score(data.X_test, data.y_test)
assert isinstance(score, float)
skl_score = accuracy_score(data.y_test, preds)
assert abs(score - skl_score) < 1e-6, "score={}, skl_score={}".format(score, skl_score)
def test_regression_with_numpy_frames():
pipeline = make_pipeline(H2OAutoMLRegressor(seed=seed, init_connection_args=init_connection_args))
pipeline.set_params(
h2oautomlregressor__max_models=max_models,
h2oautomlregressor__nfolds=3
)
pipeline.named_steps.h2oautomlregressor.exclude_algos = ['XGBoost']
data = _get_data(format='numpy', n_classes=0)
assert isinstance(data.X_train, np.ndarray)
pipeline.fit(data.X_train, data.y_train)
assert len(pipeline.named_steps.h2oautomlregressor.estimator.leaderboard) >= max_models + 1
preds = pipeline.predict(data.X_test)
assert isinstance(preds, np.ndarray)
assert preds.shape == (len(data.X_test),)
score = pipeline.score(data.X_test, data.y_test)
assert isinstance(score, float)
skl_score = r2_score(data.y_test, preds)
assert abs(score - skl_score) < 1e-6, "score={}, skl_score={}".format(score, skl_score)
def test_generic_estimator_for_classification():
pipeline = make_pipeline(H2OAutoMLEstimator(estimator_type='classifier', seed=seed,
init_connection_args=init_connection_args))
pipeline.set_params(
h2oautomlestimator__max_models=max_models,
h2oautomlestimator__nfolds=3
)
pipeline.named_steps.h2oautomlestimator.exclude_algos = ['XGBoost']
data = _get_data(format='numpy', n_classes=3)
assert isinstance(data.X_train, np.ndarray)
pipeline.fit(data.X_train, data.y_train)
assert len(pipeline.named_steps.h2oautomlestimator.estimator.leaderboard) >= max_models + 1
preds = pipeline.predict(data.X_test)
assert isinstance(preds, np.ndarray)
assert preds.shape == (len(data.X_test),)
probs = pipeline.predict_proba(data.X_test)
assert probs.shape == (len(data.X_test), 3)
assert np.allclose(np.sum(probs, axis=1), 1.), "`predict_proba` didn't return probabilities"
score = pipeline.score(data.X_test, data.y_test)
assert isinstance(score, float)
skl_score = accuracy_score(data.y_test, preds)
assert abs(score - skl_score) < 1e-6, "score={}, skl_score={}".format(score, skl_score)
def test_generic_estimator_for_regression():
pipeline = make_pipeline(H2OAutoMLEstimator(estimator_type='regressor', seed=seed,
init_connection_args=init_connection_args))
pipeline.set_params(
h2oautomlestimator__max_models=max_models,
h2oautomlestimator__nfolds=3
)
pipeline.named_steps.h2oautomlestimator.exclude_algos = ['XGBoost']
data = _get_data(format='numpy', n_classes=0)
assert isinstance(data.X_train, np.ndarray)
pipeline.fit(data.X_train, data.y_train)
assert len(pipeline.named_steps.h2oautomlestimator.estimator.leaderboard) >= max_models + 1
preds = pipeline.predict(data.X_test)
assert isinstance(preds, np.ndarray)
assert preds.shape == (len(data.X_test),)
score = pipeline.score(data.X_test, data.y_test)
assert isinstance(score, float)
skl_score = r2_score(data.y_test, preds)
assert abs(score - skl_score) < 1e-6, "score={}, skl_score={}".format(score, skl_score)
pyunit_utils.run_tests([
test_binomial_classification_with_h2o_frames,
test_multinomial_classification_with_numpy_frames,
test_regression_with_numpy_frames,
test_generic_estimator_for_classification,
test_generic_estimator_for_regression,
])
| 2,975 |
6,224 | <reponame>Trifunik/zephyr
/*
* Copyright (c) 2017 Linaro Limited
* Copyright (c) 2017-2019 Foundries.io
*
* SPDX-License-Identifier: Apache-2.0
*/
#define LOG_MODULE_NAME net_lwm2m_client_app
#define LOG_LEVEL LOG_LEVEL_DBG
#include <logging/log.h>
LOG_MODULE_REGISTER(LOG_MODULE_NAME);
#include <drivers/hwinfo.h>
#include <zephyr.h>
#include <drivers/gpio.h>
#include <drivers/sensor.h>
#include <net/lwm2m.h>
#define APP_BANNER "Run LWM2M client"
#if !defined(CONFIG_NET_CONFIG_PEER_IPV4_ADDR)
#define CONFIG_NET_CONFIG_PEER_IPV4_ADDR ""
#endif
#if !defined(CONFIG_NET_CONFIG_PEER_IPV6_ADDR)
#define CONFIG_NET_CONFIG_PEER_IPV6_ADDR ""
#endif
#if defined(CONFIG_NET_IPV6)
#define SERVER_ADDR CONFIG_NET_CONFIG_PEER_IPV6_ADDR
#elif defined(CONFIG_NET_IPV4)
#define SERVER_ADDR CONFIG_NET_CONFIG_PEER_IPV4_ADDR
#else
#error LwM2M requires either IPV6 or IPV4 support
#endif
#define WAIT_TIME K_SECONDS(10)
#define CONNECT_TIME K_SECONDS(10)
#define CLIENT_MANUFACTURER "Zephyr"
#define CLIENT_MODEL_NUMBER "OMA-LWM2M Sample Client"
#define CLIENT_SERIAL_NUMBER "345000123"
#define CLIENT_FIRMWARE_VER "1.0"
#define CLIENT_DEVICE_TYPE "OMA-LWM2M Client"
#define CLIENT_HW_VER "1.0.1"
#define LIGHT_NAME "Test light"
#define TIMER_NAME "Test timer"
#define ENDPOINT_LEN 32
#if DT_NODE_HAS_STATUS(DT_ALIAS(led0), okay)
#define LED_GPIO_PORT DT_GPIO_LABEL(DT_ALIAS(led0), gpios)
#define LED_GPIO_PIN DT_GPIO_PIN(DT_ALIAS(led0), gpios)
#define LED_GPIO_FLAGS DT_GPIO_FLAGS(DT_ALIAS(led0), gpios)
#else
/* Not an error; the relevant IPSO object will simply not be created. */
#define LED_GPIO_PORT ""
#define LED_GPIO_PIN 0
#define LED_GPIO_FLAGS 0
#endif
static uint8_t bat_idx = LWM2M_DEVICE_PWR_SRC_TYPE_BAT_INT;
static int bat_mv = 3800;
static int bat_ma = 125;
static uint8_t usb_idx = LWM2M_DEVICE_PWR_SRC_TYPE_USB;
static int usb_mv = 5000;
static int usb_ma = 900;
static uint8_t bat_level = 95;
static uint8_t bat_status = LWM2M_DEVICE_BATTERY_STATUS_CHARGING;
static int mem_free = 15;
static int mem_total = 25;
static const struct device *led_dev;
static uint32_t led_state;
static struct lwm2m_ctx client;
#if defined(CONFIG_LWM2M_FIRMWARE_UPDATE_PULL_SUPPORT)
/* Array with supported PULL firmware update protocols */
static uint8_t supported_protocol[1];
#endif
#if defined(CONFIG_LWM2M_DTLS_SUPPORT)
#define TLS_TAG 1
/* "000102030405060708090a0b0c0d0e0f" */
static unsigned char client_psk[] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
};
static const char client_psk_id[] = "Client_identity";
#endif /* CONFIG_LWM2M_DTLS_SUPPORT */
static struct k_sem quit_lock;
#if defined(CONFIG_LWM2M_FIRMWARE_UPDATE_OBJ_SUPPORT)
static uint8_t firmware_buf[64];
#endif
/* TODO: Move to a pre write hook that can handle ret codes once available */
static int led_on_off_cb(uint16_t obj_inst_id, uint16_t res_id, uint16_t res_inst_id,
uint8_t *data, uint16_t data_len,
bool last_block, size_t total_size)
{
int ret = 0;
uint32_t led_val;
led_val = *(uint8_t *) data;
if (led_val != led_state) {
ret = gpio_pin_set(led_dev, LED_GPIO_PIN, (int) led_val);
if (ret) {
/*
* We need an extra hook in LWM2M to better handle
* failures before writing the data value and not in
* post_write_cb, as there is not much that can be
* done here.
*/
LOG_ERR("Fail to write to GPIO %d", LED_GPIO_PIN);
return ret;
}
led_state = led_val;
/* TODO: Move to be set by an internal post write function */
lwm2m_engine_set_s32("3311/0/5852", 0);
}
return ret;
}
static int init_led_device(void)
{
int ret;
led_dev = device_get_binding(LED_GPIO_PORT);
if (!led_dev) {
return -ENODEV;
}
ret = gpio_pin_configure(led_dev, LED_GPIO_PIN, LED_GPIO_FLAGS |
GPIO_OUTPUT_INACTIVE);
if (ret) {
return ret;
}
return 0;
}
static int device_reboot_cb(uint16_t obj_inst_id,
uint8_t *args, uint16_t args_len)
{
LOG_INF("DEVICE: REBOOT");
/* Add an error for testing */
lwm2m_device_add_err(LWM2M_DEVICE_ERROR_LOW_POWER);
/* Change the battery voltage for testing */
lwm2m_engine_set_s32("3/0/7/0", (bat_mv - 1));
return 0;
}
static int device_factory_default_cb(uint16_t obj_inst_id,
uint8_t *args, uint16_t args_len)
{
LOG_INF("DEVICE: FACTORY DEFAULT");
/* Add an error for testing */
lwm2m_device_add_err(LWM2M_DEVICE_ERROR_GPS_FAILURE);
/* Change the USB current for testing */
lwm2m_engine_set_s32("3/0/8/1", (usb_ma - 1));
return 0;
}
#if defined(CONFIG_LWM2M_FIRMWARE_UPDATE_PULL_SUPPORT)
static int firmware_update_cb(uint16_t obj_inst_id,
uint8_t *args, uint16_t args_len)
{
LOG_DBG("UPDATE");
/* TODO: kick off update process */
/* If success, set the update result as RESULT_SUCCESS.
* In reality, it should be set at function lwm2m_setup()
*/
lwm2m_engine_set_u8("5/0/3", STATE_IDLE);
lwm2m_engine_set_u8("5/0/5", RESULT_SUCCESS);
return 0;
}
#endif
static void *temperature_get_buf(uint16_t obj_inst_id, uint16_t res_id,
uint16_t res_inst_id, size_t *data_len)
{
/* Last read temperature value, will use 25.5C if no sensor available */
static double v = 25.5;
const struct device *dev = NULL;
#if defined(CONFIG_FXOS8700_TEMP)
dev = device_get_binding(DT_LABEL(DT_INST(0, nxp_fxos8700)));
#endif
if (dev != NULL) {
struct sensor_value val;
if (sensor_sample_fetch(dev)) {
LOG_ERR("temperature data update failed");
}
sensor_channel_get(dev, SENSOR_CHAN_DIE_TEMP, &val);
v = sensor_value_to_double(&val);
LOG_DBG("LWM2M temperature set to %f", v);
}
/* echo the value back through the engine to update min/max values */
lwm2m_engine_set_float("3303/0/5700", &v);
*data_len = sizeof(v);
return &v;
}
#if defined(CONFIG_LWM2M_FIRMWARE_UPDATE_OBJ_SUPPORT)
static void *firmware_get_buf(uint16_t obj_inst_id, uint16_t res_id,
uint16_t res_inst_id, size_t *data_len)
{
*data_len = sizeof(firmware_buf);
return firmware_buf;
}
static int firmware_block_received_cb(uint16_t obj_inst_id,
uint16_t res_id, uint16_t res_inst_id,
uint8_t *data, uint16_t data_len,
bool last_block, size_t total_size)
{
LOG_INF("FIRMWARE: BLOCK RECEIVED: len:%u last_block:%d",
data_len, last_block);
return 0;
}
#endif
/* An example data validation callback. */
static int timer_on_off_validate_cb(uint16_t obj_inst_id, uint16_t res_id,
uint16_t res_inst_id, uint8_t *data,
uint16_t data_len, bool last_block,
size_t total_size)
{
LOG_INF("Validating On/Off data");
if (data_len != 1) {
return -EINVAL;
}
if (*data > 1) {
return -EINVAL;
}
return 0;
}
static int timer_digital_state_cb(uint16_t obj_inst_id,
uint16_t res_id, uint16_t res_inst_id,
uint8_t *data, uint16_t data_len,
bool last_block, size_t total_size)
{
bool *digital_state = (bool *)data;
if (*digital_state) {
LOG_INF("TIMER: ON");
} else {
LOG_INF("TIMER: OFF");
}
return 0;
}
static int lwm2m_setup(void)
{
int ret;
char *server_url;
uint16_t server_url_len;
uint8_t server_url_flags;
/* setup SECURITY object */
/* Server URL */
ret = lwm2m_engine_get_res_data("0/0/0",
(void **)&server_url, &server_url_len,
&server_url_flags);
if (ret < 0) {
return ret;
}
snprintk(server_url, server_url_len, "coap%s//%s%s%s",
IS_ENABLED(CONFIG_LWM2M_DTLS_SUPPORT) ? "s:" : ":",
strchr(SERVER_ADDR, ':') ? "[" : "", SERVER_ADDR,
strchr(SERVER_ADDR, ':') ? "]" : "");
/* Security Mode */
lwm2m_engine_set_u8("0/0/2",
IS_ENABLED(CONFIG_LWM2M_DTLS_SUPPORT) ? 0 : 3);
#if defined(CONFIG_LWM2M_DTLS_SUPPORT)
lwm2m_engine_set_string("0/0/3", (char *)client_psk_id);
lwm2m_engine_set_opaque("0/0/5",
(void *)client_psk, sizeof(client_psk));
#endif /* CONFIG_LWM2M_DTLS_SUPPORT */
#if defined(CONFIG_LWM2M_RD_CLIENT_SUPPORT_BOOTSTRAP)
/* Mark 1st instance of security object as a bootstrap server */
lwm2m_engine_set_u8("0/0/1", 1);
/* Create 2nd instance of security object needed for bootstrap */
lwm2m_engine_create_obj_inst("0/1");
#else
/* Match Security object instance with a Server object instance with
* Short Server ID.
*/
lwm2m_engine_set_u16("0/0/10", 101);
lwm2m_engine_set_u16("1/0/0", 101);
#endif
/* setup SERVER object */
/* setup DEVICE object */
lwm2m_engine_set_res_data("3/0/0", CLIENT_MANUFACTURER,
sizeof(CLIENT_MANUFACTURER),
LWM2M_RES_DATA_FLAG_RO);
lwm2m_engine_set_res_data("3/0/1", CLIENT_MODEL_NUMBER,
sizeof(CLIENT_MODEL_NUMBER),
LWM2M_RES_DATA_FLAG_RO);
lwm2m_engine_set_res_data("3/0/2", CLIENT_SERIAL_NUMBER,
sizeof(CLIENT_SERIAL_NUMBER),
LWM2M_RES_DATA_FLAG_RO);
lwm2m_engine_set_res_data("3/0/3", CLIENT_FIRMWARE_VER,
sizeof(CLIENT_FIRMWARE_VER),
LWM2M_RES_DATA_FLAG_RO);
lwm2m_engine_register_exec_callback("3/0/4", device_reboot_cb);
lwm2m_engine_register_exec_callback("3/0/5", device_factory_default_cb);
lwm2m_engine_set_res_data("3/0/9", &bat_level, sizeof(bat_level), 0);
lwm2m_engine_set_res_data("3/0/10", &mem_free, sizeof(mem_free), 0);
lwm2m_engine_set_res_data("3/0/17", CLIENT_DEVICE_TYPE,
sizeof(CLIENT_DEVICE_TYPE),
LWM2M_RES_DATA_FLAG_RO);
lwm2m_engine_set_res_data("3/0/18", CLIENT_HW_VER,
sizeof(CLIENT_HW_VER),
LWM2M_RES_DATA_FLAG_RO);
lwm2m_engine_set_res_data("3/0/20", &bat_status, sizeof(bat_status), 0);
lwm2m_engine_set_res_data("3/0/21", &mem_total, sizeof(mem_total), 0);
/* add power source resource instances */
lwm2m_engine_create_res_inst("3/0/6/0");
lwm2m_engine_set_res_data("3/0/6/0", &bat_idx, sizeof(bat_idx), 0);
lwm2m_engine_create_res_inst("3/0/7/0");
lwm2m_engine_set_res_data("3/0/7/0", &bat_mv, sizeof(bat_mv), 0);
lwm2m_engine_create_res_inst("3/0/8/0");
lwm2m_engine_set_res_data("3/0/8/0", &bat_ma, sizeof(bat_ma), 0);
lwm2m_engine_create_res_inst("3/0/6/1");
lwm2m_engine_set_res_data("3/0/6/1", &usb_idx, sizeof(usb_idx), 0);
lwm2m_engine_create_res_inst("3/0/7/1");
lwm2m_engine_set_res_data("3/0/7/1", &usb_mv, sizeof(usb_mv), 0);
lwm2m_engine_create_res_inst("3/0/8/1");
lwm2m_engine_set_res_data("3/0/8/1", &usb_ma, sizeof(usb_ma), 0);
/* setup FIRMWARE object */
#if defined(CONFIG_LWM2M_FIRMWARE_UPDATE_OBJ_SUPPORT)
/* setup data buffer for block-wise transfer */
lwm2m_engine_register_pre_write_callback("5/0/0", firmware_get_buf);
lwm2m_firmware_set_write_cb(firmware_block_received_cb);
#endif
#if defined(CONFIG_LWM2M_FIRMWARE_UPDATE_PULL_SUPPORT)
lwm2m_engine_create_res_inst("5/0/8/0");
lwm2m_engine_set_res_data("5/0/8/0", &supported_protocol[0],
sizeof(supported_protocol[0]), 0);
lwm2m_firmware_set_update_cb(firmware_update_cb);
#endif
/* setup TEMP SENSOR object */
lwm2m_engine_create_obj_inst("3303/0");
lwm2m_engine_register_read_callback("3303/0/5700", temperature_get_buf);
/* IPSO: Light Control object */
if (init_led_device() == 0) {
lwm2m_engine_create_obj_inst("3311/0");
lwm2m_engine_register_post_write_callback("3311/0/5850",
led_on_off_cb);
lwm2m_engine_set_res_data("3311/0/5750",
LIGHT_NAME, sizeof(LIGHT_NAME),
LWM2M_RES_DATA_FLAG_RO);
}
/* IPSO: Timer object */
lwm2m_engine_create_obj_inst("3340/0");
lwm2m_engine_register_validate_callback("3340/0/5850",
timer_on_off_validate_cb);
lwm2m_engine_register_post_write_callback("3340/0/5543",
timer_digital_state_cb);
lwm2m_engine_set_res_data("3340/0/5750", TIMER_NAME, sizeof(TIMER_NAME),
LWM2M_RES_DATA_FLAG_RO);
return 0;
}
static void rd_client_event(struct lwm2m_ctx *client,
enum lwm2m_rd_client_event client_event)
{
switch (client_event) {
case LWM2M_RD_CLIENT_EVENT_NONE:
/* do nothing */
break;
case LWM2M_RD_CLIENT_EVENT_BOOTSTRAP_REG_FAILURE:
LOG_DBG("Bootstrap registration failure!");
break;
case LWM2M_RD_CLIENT_EVENT_BOOTSTRAP_REG_COMPLETE:
LOG_DBG("Bootstrap registration complete");
break;
case LWM2M_RD_CLIENT_EVENT_BOOTSTRAP_TRANSFER_COMPLETE:
LOG_DBG("Bootstrap transfer complete");
break;
case LWM2M_RD_CLIENT_EVENT_REGISTRATION_FAILURE:
LOG_DBG("Registration failure!");
break;
case LWM2M_RD_CLIENT_EVENT_REGISTRATION_COMPLETE:
LOG_DBG("Registration complete");
break;
case LWM2M_RD_CLIENT_EVENT_REG_UPDATE_FAILURE:
LOG_DBG("Registration update failure!");
break;
case LWM2M_RD_CLIENT_EVENT_REG_UPDATE_COMPLETE:
LOG_DBG("Registration update complete");
break;
case LWM2M_RD_CLIENT_EVENT_DEREGISTER_FAILURE:
LOG_DBG("Deregister failure!");
break;
case LWM2M_RD_CLIENT_EVENT_DISCONNECT:
LOG_DBG("Disconnected");
break;
case LWM2M_RD_CLIENT_EVENT_QUEUE_MODE_RX_OFF:
LOG_DBG("Queue mode RX window closed");
break;
case LWM2M_RD_CLIENT_EVENT_NETWORK_ERROR:
LOG_ERR("LwM2M engine reported a network erorr.");
lwm2m_rd_client_stop(client, rd_client_event, true);
break;
}
}
static void observe_cb(enum lwm2m_observe_event event,
struct lwm2m_obj_path *path, void *user_data)
{
char buf[LWM2M_MAX_PATH_STR_LEN];
switch (event) {
case LWM2M_OBSERVE_EVENT_OBSERVER_ADDED:
LOG_INF("Observer added for %s", lwm2m_path_log_strdup(buf, path));
break;
case LWM2M_OBSERVE_EVENT_OBSERVER_REMOVED:
LOG_INF("Observer removed for %s", lwm2m_path_log_strdup(buf, path));
break;
case LWM2M_OBSERVE_EVENT_NOTIFY_ACK:
LOG_INF("Notify acknowledged for %s", lwm2m_path_log_strdup(buf, path));
break;
case LWM2M_OBSERVE_EVENT_NOTIFY_TIMEOUT:
LOG_INF("Notify timeout for %s, trying registration update",
lwm2m_path_log_strdup(buf, path));
lwm2m_rd_client_update();
break;
}
}
void main(void)
{
uint32_t flags = IS_ENABLED(CONFIG_LWM2M_RD_CLIENT_SUPPORT_BOOTSTRAP) ?
LWM2M_RD_CLIENT_FLAG_BOOTSTRAP : 0;
int ret;
LOG_INF(APP_BANNER);
k_sem_init(&quit_lock, 0, K_SEM_MAX_LIMIT);
ret = lwm2m_setup();
if (ret < 0) {
LOG_ERR("Cannot setup LWM2M fields (%d)", ret);
return;
}
(void)memset(&client, 0x0, sizeof(client));
#if defined(CONFIG_LWM2M_DTLS_SUPPORT)
client.tls_tag = TLS_TAG;
#endif
#if defined(CONFIG_HWINFO)
uint8_t dev_id[16];
char dev_str[33];
ssize_t length;
int i;
(void)memset(dev_id, 0x0, sizeof(dev_id));
/* Obtain the device id */
length = hwinfo_get_device_id(dev_id, sizeof(dev_id));
/* If this fails for some reason, use all zeros instead */
if (length <= 0) {
length = sizeof(dev_id);
}
/* Render the obtained serial number in hexadecimal representation */
for (i = 0 ; i < length ; i++) {
sprintf(&dev_str[i*2], "%02x", dev_id[i]);
}
lwm2m_rd_client_start(&client, dev_str, flags, rd_client_event, observe_cb);
#else
/* client.sec_obj_inst is 0 as a starting point */
lwm2m_rd_client_start(&client, CONFIG_BOARD, flags, rd_client_event, observe_cb);
#endif
k_sem_take(&quit_lock, K_FOREVER);
}
| 6,705 |
1,671 | <filename>ambry-network/src/main/java/com/github/ambry/network/BlockingChannelConnectionPool.java<gh_stars>1000+
/**
* Copyright 2016 LinkedIn Corp. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
package com.github.ambry.network;
import com.codahale.metrics.Counter;
import com.codahale.metrics.Gauge;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.Timer;
import com.github.ambry.commons.SSLFactory;
import com.github.ambry.config.ClusterMapConfig;
import com.github.ambry.config.ConnectionPoolConfig;
import com.github.ambry.config.SSLConfig;
import java.io.IOException;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLSocketFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A connection pool that uses BlockingChannel as the underlying connection.
* It is responsible for all the connection management. It helps to
* checkout a new connection, checkin an existing connection that has been
* checked out and destroy a connection in the case of an error
*/
public final class BlockingChannelConnectionPool implements ConnectionPool {
private static final Logger logger = LoggerFactory.getLogger(BlockingChannelConnectionPool.class);
private final Map<String, BlockingChannelInfo> connections;
private final ConnectionPoolConfig config;
private final MetricRegistry registry;
private final Timer connectionCheckOutTime;
private final Timer connectionCheckInTime;
private final Timer connectionDestroyTime;
private final AtomicInteger requestsWaitingToCheckoutConnectionCount;
private SSLSocketFactory sslSocketFactory;
private final SSLConfig sslConfig;
// Represents the total number to nodes connectedTo, i.e. if the blockingchannel has atleast 1 connection
private Gauge<Integer> totalNumberOfNodesConnectedTo;
// Represents the total number of connections, in other words, aggregate of the connections from all nodes
public Gauge<Integer> totalNumberOfConnections;
// Represents the number of requests waiting to checkout a connection
public Gauge<Integer> requestsWaitingToCheckoutConnection;
// Represents the number of sslSocketFactory Initializations by client
public Counter sslSocketFactoryClientInitializationCount;
// Represents the number of sslSocketFactory Initialization Error by client
public Counter sslSocketFactoryClientInitializationErrorCount;
public BlockingChannelConnectionPool(ConnectionPoolConfig config, SSLConfig sslConfig,
ClusterMapConfig clusterMapConfig, MetricRegistry registry) throws Exception {
connections = new ConcurrentHashMap<String, BlockingChannelInfo>();
this.config = config;
this.registry = registry;
this.sslConfig = sslConfig;
connectionCheckOutTime =
registry.timer(MetricRegistry.name(BlockingChannelConnectionPool.class, "connectionCheckOutTime"));
connectionCheckInTime =
registry.timer(MetricRegistry.name(BlockingChannelConnectionPool.class, "connectionCheckInTime"));
connectionDestroyTime =
registry.timer(MetricRegistry.name(BlockingChannelConnectionPool.class, "connectionDestroyTime"));
totalNumberOfNodesConnectedTo = () -> {
int noOfNodesConnectedTo = 0;
for (BlockingChannelInfo blockingChannelInfo : connections.values()) {
if (blockingChannelInfo.getNumberOfConnections() > 0) {
noOfNodesConnectedTo++;
}
}
return noOfNodesConnectedTo;
};
registry.register(MetricRegistry.name(BlockingChannelConnectionPool.class, "totalNumberOfNodesConnectedTo"),
totalNumberOfNodesConnectedTo);
totalNumberOfConnections = () -> {
int noOfConnections = 0;
for (BlockingChannelInfo blockingChannelInfo : connections.values()) {
noOfConnections += blockingChannelInfo.getNumberOfConnections();
}
return noOfConnections;
};
registry.register(MetricRegistry.name(BlockingChannelConnectionPool.class, "totalNumberOfConnections"),
totalNumberOfConnections);
requestsWaitingToCheckoutConnectionCount = new AtomicInteger(0);
requestsWaitingToCheckoutConnection = requestsWaitingToCheckoutConnectionCount::get;
registry.register(MetricRegistry.name(BlockingChannelConnectionPool.class, "requestsWaitingToCheckoutConnection"),
requestsWaitingToCheckoutConnection);
sslSocketFactoryClientInitializationCount = registry.counter(
MetricRegistry.name(BlockingChannelConnectionPool.class, "SslSocketFactoryClientInitializationCount"));
sslSocketFactoryClientInitializationErrorCount = registry.counter(
MetricRegistry.name(BlockingChannelConnectionPool.class, "SslSocketFactoryClientInitializationErrorCount"));
if (clusterMapConfig.clusterMapSslEnabledDatacenters.length() > 0) {
initializeSSLSocketFactory();
} else {
this.sslSocketFactory = null;
}
}
@Override
public void start() {
logger.info("BlockingChannelConnectionPool started");
}
@Override
public void shutdown() {
logger.info("Shutting down the BlockingChannelConnectionPool");
for (Map.Entry<String, BlockingChannelInfo> channels : connections.entrySet()) {
channels.getValue().cleanup();
}
}
private void initializeSSLSocketFactory() throws Exception {
try {
SSLFactory sslFactory = SSLFactory.getNewInstance(sslConfig);
SSLContext sslContext = sslFactory.getSSLContext();
this.sslSocketFactory = sslContext.getSocketFactory();
this.sslSocketFactoryClientInitializationCount.inc();
} catch (Exception e) {
this.sslSocketFactoryClientInitializationErrorCount.inc();
logger.error("SSLSocketFactory Client Initialization Error ", e);
throw e;
}
}
@Override
public ConnectedChannel checkOutConnection(String host, Port port, long timeoutInMs)
throws IOException, InterruptedException, ConnectionPoolTimeoutException {
final Timer.Context context = connectionCheckOutTime.time();
try {
requestsWaitingToCheckoutConnectionCount.incrementAndGet();
BlockingChannelInfo blockingChannelInfo = connections.get(host + port.getPort());
if (blockingChannelInfo == null) {
synchronized (this) {
blockingChannelInfo = connections.get(host + port.getPort());
if (blockingChannelInfo == null) {
logger.trace("Creating new blocking channel info for host {} and port {}", host, port.getPort());
blockingChannelInfo = new BlockingChannelInfo(config, host, port, registry, sslSocketFactory, sslConfig);
connections.put(host + port.getPort(), blockingChannelInfo);
} else {
logger.trace("Using already existing BlockingChannelInfo for {}:{} in synchronized block", host,
port.getPort());
}
}
} else {
logger.trace("Using already existing BlockingChannelInfo for {}:{}", host, port.getPort());
}
return blockingChannelInfo.getBlockingChannel(timeoutInMs);
} finally {
requestsWaitingToCheckoutConnectionCount.decrementAndGet();
context.stop();
}
}
@Override
public void checkInConnection(ConnectedChannel connectedChannel) {
final Timer.Context context = connectionCheckInTime.time();
try {
BlockingChannelInfo blockingChannelInfo =
connections.get(connectedChannel.getRemoteHost() + connectedChannel.getRemotePort());
if (blockingChannelInfo == null) {
logger.error("Unexpected state in connection pool. Host {} and port {} not found to checkin connection",
connectedChannel.getRemoteHost(), connectedChannel.getRemotePort());
throw new IllegalArgumentException("Connection does not belong to the pool");
}
blockingChannelInfo.releaseBlockingChannel((BlockingChannel) connectedChannel);
logger.trace("Checking in connection for host {} and port {}", connectedChannel.getRemoteHost(),
connectedChannel.getRemotePort());
} finally {
context.stop();
}
}
@Override
public void destroyConnection(ConnectedChannel connectedChannel) {
final Timer.Context context = connectionDestroyTime.time();
try {
BlockingChannelInfo blockingChannelInfo =
connections.get(connectedChannel.getRemoteHost() + connectedChannel.getRemotePort());
if (blockingChannelInfo == null) {
logger.error("Unexpected state in connection pool. Host {} and port {} not found to checkin connection",
connectedChannel.getRemoteHost(), connectedChannel.getRemotePort());
throw new IllegalArgumentException("Connection does not belong to the pool");
}
blockingChannelInfo.destroyBlockingChannel((BlockingChannel) connectedChannel);
logger.trace("Destroying connection for host {} and port {}", connectedChannel.getRemoteHost(),
connectedChannel.getRemotePort());
} finally {
context.stop();
}
}
}
| 2,898 |
1,338 | <reponame>Kirishikesan/haiku<gh_stars>1000+
/*
* Copyright 2011-2012 Haiku, Inc. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Authors:
* <NAME>, <EMAIL>
*/
#include "Option.h"
OptionDevice::OptionDevice(usb_device device, uint16 vendorID,
uint16 productID, const char *description)
:
ACMDevice(device, vendorID, productID, description)
{
TRACE_FUNCALLS("> OptionDevice found: %s\n", description);
}
status_t
OptionDevice::AddDevice(const usb_configuration_info *config)
{
TRACE_FUNCALLS("> OptionDevice::AddDevice(%08x, %08x)\n", this, config);
int portsFound = 0;
if (config->interface_count > 0) {
for (size_t index = 0; index < config->interface_count; index++) {
usb_interface_info *interface = config->interface[index].active;
int txEndpointID = -1;
int rxEndpointID = -1;
int irEndpointID = -1;
for (size_t i = 0; i < interface->endpoint_count; i++) {
usb_endpoint_info *endpoint = &interface->endpoint[i];
// Find our Interrupt endpoint
if (endpoint->descr->attributes == USB_ENDPOINT_ATTR_INTERRUPT
&& (endpoint->descr->endpoint_address
& USB_ENDPOINT_ADDR_DIR_IN) != 0) {
irEndpointID = i;
continue;
}
// Find our Transmit / Receive endpoints
if (endpoint->descr->attributes == USB_ENDPOINT_ATTR_BULK) {
if ((endpoint->descr->endpoint_address
& USB_ENDPOINT_ADDR_DIR_IN) != 0) {
rxEndpointID = i;
} else {
txEndpointID = i;
}
continue;
}
}
TRACE("> OptionDevice::%s: endpoint %d, tx: %d, rx: %d, ir: %d\n",
__func__, index, txEndpointID, rxEndpointID, irEndpointID);
if (txEndpointID < 0 || rxEndpointID < 0 || irEndpointID < 0)
continue;
TRACE("> OptionDevice::%s: found port at interface %d\n", __func__,
index);
portsFound++;
usb_endpoint_info *irEndpoint = &interface->endpoint[irEndpointID];
usb_endpoint_info *txEndpoint = &interface->endpoint[txEndpointID];
usb_endpoint_info *rxEndpoint = &interface->endpoint[rxEndpointID];
SetControlPipe(irEndpoint->handle);
SetReadPipe(rxEndpoint->handle);
SetWritePipe(txEndpoint->handle);
}
// TODO: We need to handle multiple ports
// We use the last found serial port for now
if (portsFound > 0) {
if (portsFound > 1) {
TRACE_ALWAYS("> OptionDevice::%s: Warning: Found more than one "
"serial port on this device (%d). Only the last one is "
"is used.\n", __func__, portsFound);
}
return B_OK;
}
}
return ENODEV;
}
status_t
OptionDevice::ResetDevice()
{
TRACE_FUNCALLS("> OptionDevice::ResetDevice(%08x)\n", this);
return B_OK;
}
| 1,087 |
5,169 | <reponame>Ray0218/Specs<filename>Specs/IP-UIKit-Wisdom/0.0.7/IP-UIKit-Wisdom.podspec.json
{
"name": "IP-UIKit-Wisdom",
"version": "0.0.7",
"summary": "A bag of internal helper libraries and categories around UIKit",
"description": "A bag of internal helper libraries and categories around UIKit, written by the developers of Intrepid Pursuits.",
"homepage": "https://github.com/IntrepidPursuits/uikit-wisdom",
"license": "MIT",
"authors": {
"<NAME>": "<EMAIL>"
},
"source": {
"git": "https://github.com/IntrepidPursuits/uikit-wisdom.git",
"tag": "v0.0.7"
},
"source_files": "src/**/*.{h,m}",
"exclude_files": "tests/**/*",
"platforms": {
"ios": null
},
"frameworks": "UIKit"
}
| 290 |
5,079 | <reponame>yetsun/hue
from unittest import TestCase
from kazoo.hosts import collect_hosts
class HostsTestCase(TestCase):
def test_ipv4(self):
hosts, chroot = collect_hosts('127.0.0.1:2181, 192.168.1.2:2181, \
192.168.127.12:2181')
assert hosts == [('127.0.0.1', 2181),
('192.168.1.2', 2181),
('192.168.127.12', 2181)]
assert chroot is None
hosts, chroot = collect_hosts(['127.0.0.1:2181',
'192.168.1.2:2181',
'192.168.127.12:2181'])
assert hosts == [('127.0.0.1', 2181),
('192.168.1.2', 2181),
('192.168.127.12', 2181)]
assert chroot is None
def test_ipv6(self):
hosts, chroot = collect_hosts('[fe80::200:5aee:feaa:20a2]:2181')
assert hosts == [('fe80::200:5aee:feaa:20a2', 2181)]
assert chroot is None
hosts, chroot = collect_hosts(['[fe80::200:5aee:feaa:20a2]:2181'])
assert hosts == [('fe80::200:5aee:feaa:20a2', 2181)]
assert chroot is None
def test_hosts_list(self):
hosts, chroot = collect_hosts('zk01:2181, zk02:2181, zk03:2181')
expected1 = [('zk01', 2181), ('zk02', 2181), ('zk03', 2181)]
assert hosts == expected1
assert chroot is None
hosts, chroot = collect_hosts(['zk01:2181', 'zk02:2181', 'zk03:2181'])
assert hosts == expected1
assert chroot is None
expected2 = '/test'
hosts, chroot = collect_hosts('zk01:2181, zk02:2181, zk03:2181/test')
assert hosts == expected1
assert chroot == expected2
hosts, chroot = collect_hosts(['zk01:2181',
'zk02:2181',
'zk03:2181', '/test'])
assert hosts == expected1
assert chroot == expected2
| 1,102 |
2,494 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef vm_ArrayObject_inl_h
#define vm_ArrayObject_inl_h
#include "vm/ArrayObject.h"
#include "vm/String.h"
#include "jsinferinlines.h"
namespace js {
inline void
ArrayObject::setLength(ExclusiveContext *cx, uint32_t length)
{
JS_ASSERT(lengthIsWritable());
if (length > INT32_MAX) {
/* Track objects with overflowing lengths in type information. */
types::MarkTypeObjectFlags(cx, this, types::OBJECT_FLAG_LENGTH_OVERFLOW);
}
getElementsHeader()->length = length;
}
} // namespace js
#endif // vm_ArrayObject_inl_h
| 331 |
1,139 | <reponame>ghiloufibelgacem/jornaldev
package com.journaldev.struts2.actions;
import org.apache.struts2.convention.annotation.Action;
import org.apache.struts2.convention.annotation.Actions;
import org.apache.struts2.convention.annotation.Namespace;
import org.apache.struts2.convention.annotation.Namespaces;
import org.apache.struts2.convention.annotation.Result;
import com.opensymphony.xwork2.ActionSupport;
/**
* An empty class for default Action implementation for:
*
* <action name="home"> <result>/login.jsp</result> </action> HomeAction class
* will be automatically mapped for home.action Default page is login.jsp which
* will be served to client
*
* @author pankaj
*
*/
@Namespaces(value = { @Namespace("/User"), @Namespace("/") })
@Result(location = "login.jsp")
@Actions(value = { @Action(""), @Action("home") })
public class HomeAction extends ActionSupport {
}
| 289 |
532 | package ai.yue.library.base.exception;
/**
* 参数校验不通过异常
*
* @author ylyue
* @since 2017年10月9日
*/
public class ParamException extends RuntimeException {
private static final long serialVersionUID = -7818277682527873103L;
public ParamException(String msg) {
super(msg);
}
}
| 148 |
777 | // Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "net/cert/nss_cert_database_chromeos.h"
#include <memory>
#include "base/bind.h"
#include "base/callback.h"
#include "base/run_loop.h"
#include "base/threading/thread_task_runner_handle.h"
#include "crypto/nss_util_internal.h"
#include "crypto/scoped_test_nss_chromeos_user.h"
#include "crypto/scoped_test_nss_db.h"
#include "net/cert/cert_database.h"
#include "net/test/cert_test_util.h"
#include "net/test/test_data_directory.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace net {
namespace {
bool IsCertInCertificateList(const X509Certificate* cert,
const CertificateList& cert_list) {
for (CertificateList::const_iterator it = cert_list.begin();
it != cert_list.end();
++it) {
if (X509Certificate::IsSameOSCert((*it)->os_cert_handle(),
cert->os_cert_handle()))
return true;
}
return false;
}
void SwapCertLists(CertificateList* destination,
std::unique_ptr<CertificateList> source) {
ASSERT_TRUE(destination);
ASSERT_TRUE(source);
destination->swap(*source);
}
} // namespace
class NSSCertDatabaseChromeOSTest : public testing::Test,
public CertDatabase::Observer {
public:
NSSCertDatabaseChromeOSTest()
: observer_added_(false), user_1_("user1"), user_2_("user2") {}
void SetUp() override {
// Initialize nss_util slots.
ASSERT_TRUE(user_1_.constructed_successfully());
ASSERT_TRUE(user_2_.constructed_successfully());
user_1_.FinishInit();
user_2_.FinishInit();
// Create NSSCertDatabaseChromeOS for each user.
db_1_.reset(new NSSCertDatabaseChromeOS(
crypto::GetPublicSlotForChromeOSUser(user_1_.username_hash()),
crypto::GetPrivateSlotForChromeOSUser(
user_1_.username_hash(),
base::Callback<void(crypto::ScopedPK11Slot)>())));
db_1_->SetSlowTaskRunnerForTest(base::ThreadTaskRunnerHandle::Get());
db_1_->SetSystemSlot(
crypto::ScopedPK11Slot(PK11_ReferenceSlot(system_db_.slot())));
db_2_.reset(new NSSCertDatabaseChromeOS(
crypto::GetPublicSlotForChromeOSUser(user_2_.username_hash()),
crypto::GetPrivateSlotForChromeOSUser(
user_2_.username_hash(),
base::Callback<void(crypto::ScopedPK11Slot)>())));
db_2_->SetSlowTaskRunnerForTest(base::ThreadTaskRunnerHandle::Get());
// Add observer to CertDatabase for checking that notifications from
// NSSCertDatabaseChromeOS are proxied to the CertDatabase.
CertDatabase::GetInstance()->AddObserver(this);
observer_added_ = true;
}
void TearDown() override {
if (observer_added_)
CertDatabase::GetInstance()->RemoveObserver(this);
}
// CertDatabase::Observer:
void OnCertDBChanged(const X509Certificate* cert) override {
added_ca_.push_back(cert ? cert->os_cert_handle() : NULL);
}
protected:
bool observer_added_;
// Certificates that were passed to the CertDatabase observers.
std::vector<CERTCertificate*> added_ca_;
crypto::ScopedTestNSSChromeOSUser user_1_;
crypto::ScopedTestNSSChromeOSUser user_2_;
crypto::ScopedTestNSSDB system_db_;
std::unique_ptr<NSSCertDatabaseChromeOS> db_1_;
std::unique_ptr<NSSCertDatabaseChromeOS> db_2_;
};
// Test that ListModules() on each user includes that user's NSS software slot,
// and does not include the software slot of the other user. (Does not check the
// private slot, since it is the same as the public slot in tests.)
TEST_F(NSSCertDatabaseChromeOSTest, ListModules) {
CryptoModuleList modules_1;
CryptoModuleList modules_2;
db_1_->ListModules(&modules_1, false /* need_rw */);
db_2_->ListModules(&modules_2, false /* need_rw */);
bool found_1 = false;
for (CryptoModuleList::iterator it = modules_1.begin(); it != modules_1.end();
++it) {
EXPECT_NE(db_2_->GetPublicSlot().get(), (*it)->os_module_handle());
if ((*it)->os_module_handle() == db_1_->GetPublicSlot().get())
found_1 = true;
}
EXPECT_TRUE(found_1);
bool found_2 = false;
for (CryptoModuleList::iterator it = modules_2.begin(); it != modules_2.end();
++it) {
EXPECT_NE(db_1_->GetPublicSlot().get(), (*it)->os_module_handle());
if ((*it)->os_module_handle() == db_2_->GetPublicSlot().get())
found_2 = true;
}
EXPECT_TRUE(found_2);
}
// Test that ImportCACerts imports the cert to the correct slot, and that
// ListCerts includes the added cert for the correct user, and does not include
// it for the other user.
TEST_F(NSSCertDatabaseChromeOSTest, ImportCACerts) {
// Load test certs from disk.
CertificateList certs_1 =
CreateCertificateListFromFile(GetTestCertsDirectory(),
"root_ca_cert.pem",
X509Certificate::FORMAT_AUTO);
ASSERT_EQ(1U, certs_1.size());
CertificateList certs_2 =
CreateCertificateListFromFile(GetTestCertsDirectory(),
"2048-rsa-root.pem",
X509Certificate::FORMAT_AUTO);
ASSERT_EQ(1U, certs_2.size());
// Import one cert for each user.
NSSCertDatabase::ImportCertFailureList failed;
EXPECT_TRUE(
db_1_->ImportCACerts(certs_1, NSSCertDatabase::TRUSTED_SSL, &failed));
EXPECT_EQ(0U, failed.size());
failed.clear();
EXPECT_TRUE(
db_2_->ImportCACerts(certs_2, NSSCertDatabase::TRUSTED_SSL, &failed));
EXPECT_EQ(0U, failed.size());
// Get cert list for each user.
CertificateList user_1_certlist;
CertificateList user_2_certlist;
db_1_->ListCertsSync(&user_1_certlist);
db_2_->ListCertsSync(&user_2_certlist);
// Check that the imported certs only shows up in the list for the user that
// imported them.
EXPECT_TRUE(IsCertInCertificateList(certs_1[0].get(), user_1_certlist));
EXPECT_FALSE(IsCertInCertificateList(certs_1[0].get(), user_2_certlist));
EXPECT_TRUE(IsCertInCertificateList(certs_2[0].get(), user_2_certlist));
EXPECT_FALSE(IsCertInCertificateList(certs_2[0].get(), user_1_certlist));
// Run the message loop so the observer notifications get processed.
base::RunLoop().RunUntilIdle();
// Should have gotten two OnCertDBChanged notifications.
ASSERT_EQ(2U, added_ca_.size());
// TODO(mattm): make NSSCertDatabase actually pass the cert to the callback,
// and enable these checks:
// EXPECT_EQ(certs_1[0]->os_cert_handle(), added_ca_[0]);
// EXPECT_EQ(certs_2[0]->os_cert_handle(), added_ca_[1]);
// Tests that the new certs are loaded by async ListCerts method.
CertificateList user_1_certlist_async;
CertificateList user_2_certlist_async;
db_1_->ListCerts(
base::Bind(&SwapCertLists, base::Unretained(&user_1_certlist_async)));
db_2_->ListCerts(
base::Bind(&SwapCertLists, base::Unretained(&user_2_certlist_async)));
base::RunLoop().RunUntilIdle();
EXPECT_TRUE(IsCertInCertificateList(certs_1[0].get(), user_1_certlist_async));
EXPECT_FALSE(
IsCertInCertificateList(certs_1[0].get(), user_2_certlist_async));
EXPECT_TRUE(IsCertInCertificateList(certs_2[0].get(), user_2_certlist_async));
EXPECT_FALSE(
IsCertInCertificateList(certs_2[0].get(), user_1_certlist_async));
}
// Test that ImportServerCerts imports the cert to the correct slot, and that
// ListCerts includes the added cert for the correct user, and does not include
// it for the other user.
TEST_F(NSSCertDatabaseChromeOSTest, ImportServerCert) {
// Load test certs from disk.
CertificateList certs_1 = CreateCertificateListFromFile(
GetTestCertsDirectory(), "ok_cert.pem", X509Certificate::FORMAT_AUTO);
ASSERT_EQ(1U, certs_1.size());
CertificateList certs_2 =
CreateCertificateListFromFile(GetTestCertsDirectory(),
"2048-rsa-ee-by-2048-rsa-intermediate.pem",
X509Certificate::FORMAT_AUTO);
ASSERT_EQ(1U, certs_2.size());
// Import one cert for each user.
NSSCertDatabase::ImportCertFailureList failed;
EXPECT_TRUE(
db_1_->ImportServerCert(certs_1, NSSCertDatabase::TRUSTED_SSL, &failed));
EXPECT_EQ(0U, failed.size());
failed.clear();
EXPECT_TRUE(
db_2_->ImportServerCert(certs_2, NSSCertDatabase::TRUSTED_SSL, &failed));
EXPECT_EQ(0U, failed.size());
// Get cert list for each user.
CertificateList user_1_certlist;
CertificateList user_2_certlist;
db_1_->ListCertsSync(&user_1_certlist);
db_2_->ListCertsSync(&user_2_certlist);
// Check that the imported certs only shows up in the list for the user that
// imported them.
EXPECT_TRUE(IsCertInCertificateList(certs_1[0].get(), user_1_certlist));
EXPECT_FALSE(IsCertInCertificateList(certs_1[0].get(), user_2_certlist));
EXPECT_TRUE(IsCertInCertificateList(certs_2[0].get(), user_2_certlist));
EXPECT_FALSE(IsCertInCertificateList(certs_2[0].get(), user_1_certlist));
// Run the message loop so the observer notifications get processed.
base::RunLoop().RunUntilIdle();
// TODO(mattm): ImportServerCert doesn't actually cause any observers to
// fire. Is that correct?
EXPECT_EQ(0U, added_ca_.size());
// Tests that the new certs are loaded by async ListCerts method.
CertificateList user_1_certlist_async;
CertificateList user_2_certlist_async;
db_1_->ListCerts(
base::Bind(&SwapCertLists, base::Unretained(&user_1_certlist_async)));
db_2_->ListCerts(
base::Bind(&SwapCertLists, base::Unretained(&user_2_certlist_async)));
base::RunLoop().RunUntilIdle();
EXPECT_TRUE(IsCertInCertificateList(certs_1[0].get(), user_1_certlist_async));
EXPECT_FALSE(
IsCertInCertificateList(certs_1[0].get(), user_2_certlist_async));
EXPECT_TRUE(IsCertInCertificateList(certs_2[0].get(), user_2_certlist_async));
EXPECT_FALSE(
IsCertInCertificateList(certs_2[0].get(), user_1_certlist_async));
}
// Tests that There is no crash if the database is deleted while ListCerts
// is being processed on the worker pool.
TEST_F(NSSCertDatabaseChromeOSTest, NoCrashIfShutdownBeforeDoneOnWorkerPool) {
CertificateList certlist;
db_1_->ListCerts(base::Bind(&SwapCertLists, base::Unretained(&certlist)));
EXPECT_EQ(0U, certlist.size());
db_1_.reset();
base::RunLoop().RunUntilIdle();
EXPECT_LT(0U, certlist.size());
}
TEST_F(NSSCertDatabaseChromeOSTest, ListCertsReadsSystemSlot) {
scoped_refptr<X509Certificate> cert_1(
ImportClientCertAndKeyFromFile(GetTestCertsDirectory(),
"client_1.pem",
"client_1.pk8",
db_1_->GetPublicSlot().get()));
scoped_refptr<X509Certificate> cert_2(
ImportClientCertAndKeyFromFile(GetTestCertsDirectory(),
"client_2.pem",
"client_2.pk8",
db_1_->GetSystemSlot().get()));
CertificateList certs;
db_1_->ListCertsSync(&certs);
EXPECT_TRUE(IsCertInCertificateList(cert_1.get(), certs));
EXPECT_TRUE(IsCertInCertificateList(cert_2.get(), certs));
}
TEST_F(NSSCertDatabaseChromeOSTest, ListCertsDoesNotCrossReadSystemSlot) {
scoped_refptr<X509Certificate> cert_1(
ImportClientCertAndKeyFromFile(GetTestCertsDirectory(),
"client_1.pem",
"client_1.pk8",
db_2_->GetPublicSlot().get()));
scoped_refptr<X509Certificate> cert_2(
ImportClientCertAndKeyFromFile(GetTestCertsDirectory(),
"client_2.pem",
"client_2.pk8",
system_db_.slot()));
CertificateList certs;
db_2_->ListCertsSync(&certs);
EXPECT_TRUE(IsCertInCertificateList(cert_1.get(), certs));
EXPECT_FALSE(IsCertInCertificateList(cert_2.get(), certs));
}
} // namespace net
| 4,988 |
348 | {"nom":"Magny-sur-Tille","circ":"3ème circonscription","dpt":"Côte-d'Or","inscrits":636,"abs":343,"votants":293,"blancs":21,"nuls":4,"exp":268,"res":[{"nuance":"REM","nom":"<NAME>","voix":159},{"nuance":"FN","nom":"<NAME>","voix":109}]} | 98 |
2,329 | <filename>testdata/src/main/java/reflection/constructors/ClassWithAnnotatedConstructors002.java
package reflection.constructors;
import reflection.AnnoT;
import reflection.AnnoT2;
import reflection.AnnoT3;
/**
* For testing constructor reloading and methods related to fetching annotation data from
* constructors.
*
* @author kdvolder
*/
public class ClassWithAnnotatedConstructors002 {
// We want our reloaded version to have
// - additional constructors (with annotations)
// - constructors with changed annotations
//The annotation will be removed
@SuppressWarnings("unused")
private /* @AnnoT */ ClassWithAnnotatedConstructors002() {}
//The attribute value will be changed
public @AnnoT3(/*"first"*/ "second") ClassWithAnnotatedConstructors002(int x) {}
//Annotations will be added
protected @AnnoT @AnnoT3("haa002") ClassWithAnnotatedConstructors002(double x) {}
//Annotations will be changed (some added some removed)
protected /*@AnnoT*/ @AnnoT3("haa") /*+*/ @AnnoT2 ClassWithAnnotatedConstructors002(boolean x) {}
//Annotations are not changed at all
public @AnnoT @AnnoT2 @AnnoT3("haa") ClassWithAnnotatedConstructors002(char x) {}
// Annotations in the parameters will change
public ClassWithAnnotatedConstructors002(@AnnoT3("002") String x, @AnnoT2 double y, boolean z) {}
// Annotations in the parameters will be removed
public ClassWithAnnotatedConstructors002(double x, double y, boolean z) {}
// Annotations in the parameters will be added
public ClassWithAnnotatedConstructors002(@AnnoT char x, @AnnoT2 String y, @AnnoT2 @AnnoT3("bongo") @AnnoT boolean z) {}
///////////////////////////////////////////
// Some new constructors with and without annotations
public @AnnoT @AnnoT2 @AnnoT3("haa") ClassWithAnnotatedConstructors002(String x) {}
public ClassWithAnnotatedConstructors002(Float x) {}
public @AnnoT2 ClassWithAnnotatedConstructors002(float x) {}
public ClassWithAnnotatedConstructors002(float x, @AnnoT2 String y, @AnnoT2 @AnnoT3("bongo") @AnnoT boolean z) {}
}
| 661 |
1,550 | <gh_stars>1000+
#@+leo-ver=5-thin
#@+node:ekr.20170428084207.111: * @file ../external/npyscreen/eveventhandler.py
#@+others
#@+node:ekr.20170428084207.112: ** Declarations
import weakref
#@+node:ekr.20170428084207.113: ** class Event
class Event:
# a basic event class
#@+others
#@+node:ekr.20170428084207.114: *3* __init__
def __init__(self, name, payload=None):
self.name = name
self.payload = payload
#@-others
#@+node:ekr.20170428084207.115: ** class EventHandler
class EventHandler:
# This partial base class provides the framework to handle events.
#@+others
#@+node:ekr.20170428084207.116: *3* initialize_event_handling
def initialize_event_handling(self):
self.event_handlers = {}
#@+node:ekr.20170428084207.117: *3* add_event_hander
def add_event_hander(self, event_name, handler):
if not event_name in self.event_handlers:
self.event_handlers[event_name] = set()
# weakref.WeakSet() #Why doesn't the WeakSet work?
self.event_handlers[event_name].add(handler)
parent_app = self.find_parent_app()
if parent_app:
parent_app.register_for_event(self, event_name)
else:
# Probably are the parent App!
# but could be a form outside a proper application environment
try:
self.register_for_event(self, event_name)
except AttributeError:
pass
#@+node:ekr.20170428084207.118: *3* remove_event_handler
def remove_event_handler(self, event_name, handler):
if event_name in self.event_handlers:
self.event_handlers[event_name].remove(handler)
if not self.event_handlers[event_name]:
self.event_handlers.pop({})
#@+node:ekr.20170428084207.119: *3* handle_event
def handle_event(self, event):
"return True if the event was handled. Return False if the application should stop sending this event."
if event.name not in self.event_handlers:
return False
else:
remove_list = []
for handler in self.event_handlers[event.name]:
try:
handler(event)
except weakref.ReferenceError:
remove_list.append(handler)
for dead_handler in remove_list:
self.event_handlers[event.name].remove(handler)
return True
#@+node:ekr.20170428084207.120: *3* find_parent_app
def find_parent_app(self):
if hasattr(self, "parentApp"):
return self.parentApp
elif hasattr(self, "parent") and hasattr(self.parent, "parentApp"):
return self.parent.parentApp
else:
return None
#@-others
#@-others
#@@language python
#@@tabwidth -4
#@-leo
| 1,303 |
1,262 | <reponame>chinmayapadhi/metacat
/*
* Copyright 2016 Netflix, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.main.services.impl;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.google.common.base.Throwables;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.netflix.metacat.common.MetacatRequestContext;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.DatabaseDto;
import com.netflix.metacat.common.dto.StorageDto;
import com.netflix.metacat.common.dto.TableDto;
import com.netflix.metacat.common.exception.MetacatBadRequestException;
import com.netflix.metacat.common.exception.MetacatNotSupportedException;
import com.netflix.metacat.common.server.connectors.exception.NotFoundException;
import com.netflix.metacat.common.server.connectors.exception.TableNotFoundException;
import com.netflix.metacat.common.server.converter.ConverterUtil;
import com.netflix.metacat.common.server.events.MetacatCreateTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatCreateTablePreEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatDeleteTablePreEvent;
import com.netflix.metacat.common.server.events.MetacatEventBus;
import com.netflix.metacat.common.server.events.MetacatRenameTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatRenameTablePreEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateIcebergTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateTablePostEvent;
import com.netflix.metacat.common.server.events.MetacatUpdateTablePreEvent;
import com.netflix.metacat.common.server.monitoring.Metrics;
import com.netflix.metacat.common.server.properties.Config;
import com.netflix.metacat.common.server.spi.MetacatCatalogConfig;
import com.netflix.metacat.common.server.usermetadata.AuthorizationService;
import com.netflix.metacat.common.server.usermetadata.GetMetadataInterceptorParameters;
import com.netflix.metacat.common.server.usermetadata.MetacatOperation;
import com.netflix.metacat.common.server.usermetadata.TagService;
import com.netflix.metacat.common.server.usermetadata.UserMetadataService;
import com.netflix.metacat.common.server.util.MetacatContextManager;
import com.netflix.metacat.main.manager.ConnectorManager;
import com.netflix.metacat.main.services.DatabaseService;
import com.netflix.metacat.main.services.GetTableNamesServiceParameters;
import com.netflix.metacat.main.services.GetTableServiceParameters;
import com.netflix.metacat.main.services.TableService;
import com.netflix.spectator.api.Registry;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import lombok.extern.slf4j.Slf4j;
import javax.annotation.Nullable;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.TimeUnit;
/**
* Table service implementation.
*/
@Slf4j
public class TableServiceImpl implements TableService {
private static final String NAME_TAGS = "tags";
private final ConnectorManager connectorManager;
private final DatabaseService databaseService;
private final TagService tagService;
private final UserMetadataService userMetadataService;
private final MetacatEventBus eventBus;
private final Registry registry;
private final Config config;
private final ConverterUtil converterUtil;
private final ConnectorTableServiceProxy connectorTableServiceProxy;
private final AuthorizationService authorizationService;
/**
* Constructor.
*
* @param connectorManager Connector manager to use
* @param connectorTableServiceProxy connector table service proxy
* @param databaseService database service
* @param tagService tag service
* @param userMetadataService user metadata service
* @param eventBus Internal event bus
* @param registry registry handle
* @param config configurations
* @param converterUtil utility to convert to/from Dto to connector resources
* @param authorizationService authorization service
*/
public TableServiceImpl(
final ConnectorManager connectorManager,
final ConnectorTableServiceProxy connectorTableServiceProxy,
final DatabaseService databaseService,
final TagService tagService,
final UserMetadataService userMetadataService,
final MetacatEventBus eventBus,
final Registry registry,
final Config config,
final ConverterUtil converterUtil,
final AuthorizationService authorizationService
) {
this.connectorManager = connectorManager;
this.connectorTableServiceProxy = connectorTableServiceProxy;
this.databaseService = databaseService;
this.tagService = tagService;
this.userMetadataService = userMetadataService;
this.eventBus = eventBus;
this.registry = registry;
this.config = config;
this.authorizationService = authorizationService;
this.converterUtil = converterUtil;
}
/**
* {@inheritDoc}
*/
@Override
public TableDto create(final QualifiedName name, final TableDto tableDto) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
validate(name);
this.authorizationService.checkPermission(metacatRequestContext.getUserName(),
tableDto.getName(), MetacatOperation.CREATE);
//
// Set the owner,if null, with the session user name.
//
setOwnerIfNull(tableDto, metacatRequestContext.getUserName());
log.info("Creating table {}", name);
eventBus.post(new MetacatCreateTablePreEvent(name, metacatRequestContext, this, tableDto));
connectorTableServiceProxy.create(name, converterUtil.fromTableDto(tableDto));
if (tableDto.getDataMetadata() != null || tableDto.getDefinitionMetadata() != null) {
log.info("Saving user metadata for table {}", name);
final long start = registry.clock().wallTime();
userMetadataService.saveMetadata(metacatRequestContext.getUserName(), tableDto, true);
final long duration = registry.clock().wallTime() - start;
log.info("Time taken to save user metadata for table {} is {} ms", name, duration);
registry.timer(registry.createId(Metrics.TimerSaveTableMetadata.getMetricName()).withTags(name.parts()))
.record(duration, TimeUnit.MILLISECONDS);
tag(name, tableDto.getDefinitionMetadata());
}
TableDto dto = tableDto;
try {
dto = get(name, GetTableServiceParameters.builder()
.disableOnReadMetadataIntercetor(false)
.includeInfo(true)
.includeDataMetadata(true)
.includeDefinitionMetadata(true)
.build()).orElse(tableDto);
} catch (Exception e) {
handleExceptionOnCreate(name, "getTable", e);
}
try {
eventBus.post(new MetacatCreateTablePostEvent(name, metacatRequestContext, this, dto));
} catch (Exception e) {
handleExceptionOnCreate(name, "postEvent", e);
}
return dto;
}
private void setOwnerIfNull(final TableDto tableDto, final String user) {
String owner = user;
StorageDto serde = tableDto.getSerde();
if (serde == null) {
serde = new StorageDto();
tableDto.setSerde(serde);
}
final String serdeOwner = serde.getOwner();
if (Strings.isNullOrEmpty(serdeOwner)) {
serde.setOwner(user);
} else {
owner = serdeOwner;
}
if (!Strings.isNullOrEmpty(owner)) {
userMetadataService.populateOwnerIfMissing(tableDto, owner);
}
}
@SuppressFBWarnings
private void tag(final QualifiedName name, final ObjectNode definitionMetadata) {
final Set<String> tags = getTableTags(definitionMetadata);
if (!tags.isEmpty()) {
log.info("Setting tags {} for table {}", tags, name);
final Set<String> result = tagService.setTags(name, tags, false);
}
}
private Set<String> getTableTags(@Nullable final ObjectNode definitionMetadata) {
final Set<String> tags = Sets.newHashSet();
if (definitionMetadata != null && definitionMetadata.get(NAME_TAGS) != null) {
final JsonNode tagsNode = definitionMetadata.get(NAME_TAGS);
if (tagsNode.isArray() && tagsNode.size() > 0) {
for (JsonNode tagNode : tagsNode) {
tags.add(tagNode.textValue());
}
}
}
return tags;
}
/**
* {@inheritDoc}
*/
@Override
public TableDto deleteAndReturn(final QualifiedName name, final boolean isMView) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
validate(name);
this.authorizationService.checkPermission(metacatRequestContext.getUserName(),
name, MetacatOperation.DELETE);
eventBus.post(new MetacatDeleteTablePreEvent(name, metacatRequestContext, this));
TableDto tableDto = new TableDto();
tableDto.setName(name);
try {
final Optional<TableDto> oTable = get(name,
GetTableServiceParameters.builder()
.includeInfo(true)
.disableOnReadMetadataIntercetor(false)
.includeDefinitionMetadata(true)
.includeDataMetadata(true)
.build());
tableDto = oTable.orElse(tableDto);
} catch (Exception e) {
handleException(name, true, "deleteAndReturn_get", e);
}
// Fail if the table is tagged not to be deleted.
if (hasTags(tableDto, config.getNoTableDeleteOnTags())) {
throw new IllegalArgumentException(
String.format("Table %s cannot be deleted because it is tagged with %s.", name,
config.getNoTableDeleteOnTags()));
}
// Try to delete the table even if get above fails
try {
connectorTableServiceProxy.delete(name);
} catch (NotFoundException ignored) {
log.debug("NotFoundException ignored for table {}", name);
}
if (canDeleteMetadata(name)) {
// Delete the metadata. Type doesn't matter since we discard the result
log.info("Deleting user metadata for table {}", name);
userMetadataService.deleteMetadata(metacatRequestContext.getUserName(), Lists.newArrayList(tableDto));
log.info("Deleting tags for table {}", name);
tagService.delete(name, false);
} else {
if (config.canSoftDeleteDataMetadata() && tableDto.isDataExternal()) {
userMetadataService.softDeleteDataMetadata(metacatRequestContext.getUserName(),
Lists.newArrayList(tableDto.getDataUri()));
}
}
eventBus.post(new MetacatDeleteTablePostEvent(name, metacatRequestContext, this, tableDto, isMView));
return tableDto;
}
private boolean hasTags(@Nullable final TableDto tableDto, final Set<String> hasTags) {
if (!hasTags.isEmpty() && tableDto != null) {
final Set<String> tags = getTableTags(tableDto.getDefinitionMetadata());
if (!tags.isEmpty()) {
for (String t: hasTags) {
if (tags.contains(t)) {
return true;
}
}
}
}
return false;
}
/**
* Returns true
* 1. If the system is configured to delete deifnition metadata.
* 2. If the system is configured not to but the tableName is configured to either explicitly or if the
* table's database/catalog is configure to.
*
* @param tableName table name
* @return whether or not to delete definition metadata
*/
private boolean canDeleteMetadata(final QualifiedName tableName) {
return config.canDeleteTableDefinitionMetadata() || isEnabledForTableDefinitionMetadataDelete(tableName);
}
/**
* Returns true if tableName is enabled for deifnition metadata delete either explicitly or if the
* table's database/catalog is configure to.
*
* @param tableName table name
* @return whether or not to delete definition metadata
*/
private boolean isEnabledForTableDefinitionMetadataDelete(final QualifiedName tableName) {
final Set<QualifiedName> enableDeleteForQualifiedNames = config.getNamesEnabledForDefinitionMetadataDelete();
return enableDeleteForQualifiedNames.contains(tableName)
|| enableDeleteForQualifiedNames.contains(
QualifiedName.ofDatabase(tableName.getCatalogName(), tableName.getDatabaseName()))
|| enableDeleteForQualifiedNames.contains(QualifiedName.ofCatalog(tableName.getCatalogName()));
}
/**
* {@inheritDoc}
*/
@Override
public Optional<TableDto> get(final QualifiedName name, final GetTableServiceParameters getTableServiceParameters) {
validate(name);
TableDto tableInternal = null;
final TableDto table;
final MetacatCatalogConfig catalogConfig = connectorManager.getCatalogConfig(name);
if (getTableServiceParameters.isIncludeInfo()
|| (getTableServiceParameters.isIncludeDefinitionMetadata() && catalogConfig.isInterceptorEnabled()
&& !getTableServiceParameters.isDisableOnReadMetadataIntercetor())) {
try {
tableInternal = converterUtil.toTableDto(connectorTableServiceProxy
.get(name, getTableServiceParameters,
getTableServiceParameters.isUseCache() && config.isCacheEnabled()
&& catalogConfig.isCacheEnabled()));
} catch (NotFoundException ignored) {
return Optional.empty();
}
table = tableInternal;
} else {
table = new TableDto();
table.setName(name);
}
if (getTableServiceParameters.isIncludeDefinitionMetadata()) {
final Optional<ObjectNode> definitionMetadata =
(getTableServiceParameters.isDisableOnReadMetadataIntercetor())
? userMetadataService.getDefinitionMetadata(name)
: userMetadataService.getDefinitionMetadataWithInterceptor(name,
GetMetadataInterceptorParameters.builder().hasMetadata(tableInternal).build());
definitionMetadata.ifPresent(table::setDefinitionMetadata);
}
if (getTableServiceParameters.isIncludeDataMetadata() && catalogConfig.isHasDataExternal()) {
TableDto dto = table;
if (tableInternal == null && !getTableServiceParameters.isIncludeInfo()) {
try {
dto = converterUtil.toTableDto(connectorTableServiceProxy
.get(name,
getTableServiceParameters,
getTableServiceParameters.isUseCache() && config.isCacheEnabled()));
} catch (NotFoundException ignored) {
}
}
if (dto != null && dto.getSerde() != null) {
final Optional<ObjectNode> dataMetadata =
userMetadataService.getDataMetadata(dto.getSerde().getUri());
dataMetadata.ifPresent(table::setDataMetadata);
}
}
return Optional.of(table);
}
/**
* {@inheritDoc}
*/
@Override
public void rename(
final QualifiedName oldName,
final QualifiedName newName,
final boolean isMView
) {
validate(oldName);
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
this.authorizationService.checkPermission(metacatRequestContext.getUserName(),
oldName, MetacatOperation.RENAME);
final TableDto oldTable = get(oldName, GetTableServiceParameters.builder()
.includeInfo(true)
.disableOnReadMetadataIntercetor(false)
.includeDefinitionMetadata(true)
.includeDataMetadata(true)
.build()).orElseThrow(() -> new TableNotFoundException(oldName));
// Fail if the table is tagged not to be renamed.
if (hasTags(oldTable, config.getNoTableRenameOnTags())) {
throw new IllegalArgumentException(
String.format("Table %s cannot be renamed because it is tagged with %s.", oldName,
config.getNoTableRenameOnTags()));
}
if (oldTable != null) {
//Ignore if the operation is not supported, so that we can at least go ahead and save the user metadata
eventBus.post(new MetacatRenameTablePreEvent(oldName, metacatRequestContext, this, newName));
connectorTableServiceProxy.rename(oldName, newName, isMView);
userMetadataService.renameDefinitionMetadataKey(oldName, newName);
tagService.renameTableTags(oldName, newName.getTableName());
final TableDto dto = get(newName, GetTableServiceParameters.builder()
.includeInfo(true)
.disableOnReadMetadataIntercetor(false)
.includeDefinitionMetadata(true)
.includeDataMetadata(true)
.build()).orElseThrow(() -> new IllegalStateException("should exist"));
eventBus.post(
new MetacatRenameTablePostEvent(oldName, metacatRequestContext, this, oldTable, dto, isMView));
}
}
/**
* {@inheritDoc}
*/
@Override
public void update(final QualifiedName name, final TableDto tableDto) {
updateAndReturn(name, tableDto);
}
/**
* {@inheritDoc}
*/
@Override
public TableDto updateAndReturn(final QualifiedName name, final TableDto tableDto) {
validate(name);
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final TableDto oldTable = get(name, GetTableServiceParameters.builder()
.disableOnReadMetadataIntercetor(false)
.includeInfo(true)
.includeDataMetadata(true)
.includeDefinitionMetadata(true)
.build()).orElseThrow(() -> new TableNotFoundException(name));
eventBus.post(new MetacatUpdateTablePreEvent(name, metacatRequestContext, this, oldTable, tableDto));
//
// Check if the table schema info is provided. If provided, we should continue calling the update on the table
// schema. Uri may exist in the serde when updating data metadata for a table.
//
boolean ignoreErrorsAfterUpdate = false;
if (isTableInfoProvided(tableDto, oldTable)) {
ignoreErrorsAfterUpdate = connectorTableServiceProxy.update(name, converterUtil.fromTableDto(tableDto));
}
try {
// Merge in metadata if the user sent any
if (tableDto.getDataMetadata() != null || tableDto.getDefinitionMetadata() != null) {
log.info("Saving user metadata for table {}", name);
final long start = registry.clock().wallTime();
userMetadataService.saveMetadata(metacatRequestContext.getUserName(), tableDto, true);
final long duration = registry.clock().wallTime() - start;
log.info("Time taken to save user metadata for table {} is {} ms", name, duration);
registry.timer(registry.createId(Metrics.TimerSaveTableMetadata.getMetricName()).withTags(name.parts()))
.record(duration, TimeUnit.MILLISECONDS);
}
} catch (Exception e) {
handleException(name, ignoreErrorsAfterUpdate, "saveMetadata", e);
}
// ignoreErrorsAfterUpdate is currently set only for iceberg tables
if (config.isUpdateIcebergTableAsyncPostEventEnabled() && ignoreErrorsAfterUpdate) {
eventBus.post(new MetacatUpdateIcebergTablePostEvent(name,
metacatRequestContext, this, oldTable, tableDto));
return tableDto;
} else {
TableDto updatedDto = tableDto;
try {
updatedDto = get(name,
GetTableServiceParameters.builder()
.disableOnReadMetadataIntercetor(false)
.includeInfo(true)
.includeDataMetadata(true)
.includeDefinitionMetadata(true)
.build()).orElse(tableDto);
} catch (Exception e) {
handleException(name, ignoreErrorsAfterUpdate, "getTable", e);
}
try {
eventBus.post(new MetacatUpdateTablePostEvent(name, metacatRequestContext, this, oldTable,
updatedDto, updatedDto != tableDto));
} catch (Exception e) {
handleException(name, ignoreErrorsAfterUpdate, "postEvent", e);
}
return updatedDto;
}
}
/**
* Throws exception if the provided <code>ignoreErrorsAfterUpdate</code> is false. If true, it will swallow the
* exception and log it.
*
*/
private void handleException(final QualifiedName name,
final boolean ignoreErrorsAfterUpdate,
final String request,
final Exception ex) {
if (ignoreErrorsAfterUpdate) {
log.warn("Failed {} for table {}. Error: {}", request, name, ex.getMessage());
registry.counter(registry.createId(
Metrics.CounterTableUpdateIgnoredException.getMetricName()).withTags(name.parts())
.withTag("request", request)).increment();
} else {
throw Throwables.propagate(ex);
}
}
/**
* Swallow the exception and log it.
*
*/
private void handleExceptionOnCreate(final QualifiedName name,
final String request,
final Exception ex) {
log.warn("Failed {} for create table {}. Error: {}", request, name, ex.getMessage());
registry.counter(registry.createId(
Metrics.CounterTableCreateIgnoredException.getMetricName()).withTags(name.parts())
.withTag("request", request)).increment();
}
@VisibleForTesting
private boolean isTableInfoProvided(final TableDto tableDto, final TableDto oldTableDto) {
boolean result = false;
if ((tableDto.getFields() != null && !tableDto.getFields().isEmpty())
|| isSerdeInfoProvided(tableDto, oldTableDto)
|| (tableDto.getMetadata() != null && !tableDto.getMetadata().isEmpty())
|| tableDto.getAudit() != null) {
result = true;
}
return result;
}
private boolean isSerdeInfoProvided(final TableDto tableDto, final TableDto oldTableDto) {
boolean result = false;
final StorageDto serde = tableDto.getSerde();
if (serde == null) {
result = false;
} else {
final StorageDto oldSerde = oldTableDto.getSerde();
final String oldUri = oldSerde != null ? oldSerde.getUri() : null;
if (serde.getInputFormat() != null
|| serde.getOutputFormat() != null
|| serde.getOwner() != null
|| serde.getParameters() != null
|| serde.getSerdeInfoParameters() != null
|| serde.getSerializationLib() != null
|| (serde.getUri() != null && !Objects.equals(serde.getUri(), oldUri))) {
result = true;
}
}
return result;
}
/**
* {@inheritDoc}
*/
@Override
public void delete(final QualifiedName name) {
deleteAndReturn(name, false);
}
/**
* {@inheritDoc}
*/
@Override
public TableDto get(final QualifiedName name) {
//this is used for different purpose, need to change the ineral calls
final Optional<TableDto> dto = get(name, GetTableServiceParameters.builder()
.includeInfo(true)
.includeDefinitionMetadata(true)
.includeDataMetadata(true)
.disableOnReadMetadataIntercetor(false)
.build());
return dto.orElse(null);
}
/**
* {@inheritDoc}
*/
@Override
public TableDto copy(final QualifiedName sourceName, final QualifiedName targetName) {
// Source should be same
if (!sourceName.getCatalogName().equals(targetName.getCatalogName())) {
throw new MetacatNotSupportedException("Cannot copy a table from a different source");
}
// Error out when source table does not exists
final Optional<TableDto> oTable = get(sourceName,
GetTableServiceParameters.builder()
.includeInfo(true)
.disableOnReadMetadataIntercetor(true)
.includeDataMetadata(false)
.includeDefinitionMetadata(false)
.build());
if (!oTable.isPresent()) {
throw new TableNotFoundException(sourceName);
}
// Error out when target table already exists
final Optional<TableDto> oTargetTable = get(targetName,
GetTableServiceParameters.builder()
.disableOnReadMetadataIntercetor(true)
.includeInfo(true)
.includeDataMetadata(false)
.includeDefinitionMetadata(false)
.build());
if (oTargetTable.isPresent()) {
throw new TableNotFoundException(targetName);
}
return copy(oTable.get(), targetName);
}
/**
* {@inheritDoc}
*/
@Override
public TableDto copy(final TableDto tableDto, final QualifiedName targetName) {
final QualifiedName databaseName =
QualifiedName.ofDatabase(targetName.getCatalogName(), targetName.getDatabaseName());
if (!databaseService.exists(databaseName)) {
final DatabaseDto databaseDto = new DatabaseDto();
databaseDto.setName(databaseName);
databaseService.create(databaseName, databaseDto);
}
final TableDto targetTableDto = new TableDto();
targetTableDto.setName(targetName);
targetTableDto.setFields(tableDto.getFields());
targetTableDto.setPartition_keys(tableDto.getPartition_keys());
final StorageDto storageDto = tableDto.getSerde();
if (storageDto != null) {
final StorageDto targetStorageDto = new StorageDto();
targetStorageDto.setInputFormat(storageDto.getInputFormat());
targetStorageDto.setOwner(storageDto.getOwner());
targetStorageDto.setOutputFormat(storageDto.getOutputFormat());
targetStorageDto.setParameters(storageDto.getParameters());
targetStorageDto.setUri(storageDto.getUri());
targetStorageDto.setSerializationLib(storageDto.getSerializationLib());
targetTableDto.setSerde(targetStorageDto);
}
create(targetName, targetTableDto);
return targetTableDto;
}
/**
* {@inheritDoc}
*/
@Override
public void saveMetadata(final QualifiedName name, final ObjectNode definitionMetadata,
final ObjectNode dataMetadata) {
validate(name);
final Optional<TableDto> tableDtoOptional = get(name, GetTableServiceParameters.builder().includeInfo(true)
.disableOnReadMetadataIntercetor(true)
.includeDefinitionMetadata(false)
.includeDataMetadata(false)
.build());
if (tableDtoOptional.isPresent()) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final TableDto tableDto = tableDtoOptional.get();
tableDto.setDefinitionMetadata(definitionMetadata); //override the previous one
tableDto.setDataMetadata(dataMetadata);
log.info("Saving user metadata for table {}", name);
userMetadataService.saveMetadata(metacatRequestContext.getUserName(), tableDto, true);
tag(name, tableDto.getDefinitionMetadata());
}
}
/**
* {@inheritDoc}
*/
@Override
public List<QualifiedName> getQualifiedNames(final String uri, final boolean prefixSearch) {
return connectorTableServiceProxy.getQualifiedNames(uri, prefixSearch);
}
/**
* {@inheritDoc}
*/
@Override
public Map<String, List<QualifiedName>> getQualifiedNames(final List<String> uris, final boolean prefixSearch) {
return connectorTableServiceProxy.getQualifiedNames(uris, prefixSearch);
}
@Override
public List<QualifiedName> getQualifiedNames(final QualifiedName name,
final GetTableNamesServiceParameters parameters) {
if (Strings.isNullOrEmpty(parameters.getFilter())) {
throw new MetacatBadRequestException("Filter expression cannot be empty");
}
return connectorTableServiceProxy.getQualifiedNames(name, parameters);
}
/**
* {@inheritDoc}
*/
@Override
public boolean exists(final QualifiedName name) {
return connectorTableServiceProxy.exists(name);
}
private void validate(final QualifiedName name) {
Preconditions.checkNotNull(name, "name cannot be null");
Preconditions.checkArgument(name.isTableDefinition(), "Definition {} does not refer to a table", name);
}
}
| 12,747 |
2,611 | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tpu_summary."""
from lingvo import compat as tf
from lingvo.core import test_utils
from lingvo.core import tpu_summary
class MockTransformer:
def FProp(self, x, y):
for i in range(3):
with tf.name_scope('encoder%03d' % i):
x = tf.identity(x)
y = tf.identity(y)
x = x + 1
tpu_summary.scalar('x_mean', tf.reduce_mean(x))
tpu_summary.scalar('y_mean', tf.reduce_mean(y))
for i in range(3):
with tf.name_scope('decoder%03d' % i):
x = tf.identity(x)
y = tf.identity(y)
y = y + 1
tpu_summary.scalar('x_mean', tf.reduce_mean(x))
tpu_summary.scalar('y_mean', tf.reduce_mean(y))
return x, y
def BeamSearch(self, x, y, decoder_reduce_sum=False):
for i in range(3):
with tf.name_scope('encoder%03d' % i):
x = tf.identity(x)
y = tf.identity(y)
x = x + 1
tpu_summary.scalar('x_mean', tf.reduce_mean(x))
tpu_summary.scalar('y_mean', tf.reduce_mean(y))
def DecoderStep(x, y):
for i in range(3):
with tf.name_scope('decoder%03d' % i):
x = tf.identity(x)
y = tf.identity(y)
y = y + 1
if decoder_reduce_sum:
tpu_summary.scalar(
'x_mean', tf.reduce_mean(x), while_loop_reduce='sum')
tpu_summary.scalar(
'y_mean', tf.reduce_mean(y), while_loop_reduce='sum')
else:
tpu_summary.scalar('x_mean', tf.reduce_mean(x))
tpu_summary.scalar('y_mean', tf.reduce_mean(y))
return x, y
def DecoderCond(x, y):
del x, y
return True
(x, y) = tf.while_loop(
cond=DecoderCond,
body=DecoderStep,
loop_vars=(x, y),
maximum_iterations=10)
return x, y
class TpuSummaryTest(test_utils.TestCase):
def testNoContext(self):
with self.session() as sess:
model = MockTransformer()
x = tf.constant(0, dtype=tf.float32)
y = tf.constant(0, dtype=tf.int64)
x, y = model.FProp(x, y)
x, y = sess.run((x, y))
self.assertEqual((3.0, 3), (x, y))
def _CanonicalizeSummaryName(self, summaries):
ret = dict()
for k in summaries:
ret[k.replace('/while', '')] = summaries[k]
return ret
def testMergeAll(self):
with self.session() as sess:
model = MockTransformer()
x = tf.constant(0, dtype=tf.float32)
y = tf.constant(0, dtype=tf.int64)
with tpu_summary.context():
x, y = model.FProp(x, y)
summaries = tpu_summary.merge_all()
x, y, summaries = sess.run((x, y, summaries))
self.assertEqual((3.0, 3), (x, y))
expected = {
'x_mean/decoder000': 3.0,
'x_mean/decoder001': 3.0,
'x_mean/decoder002': 3.0,
'x_mean/encoder000': 1.0,
'x_mean/encoder001': 2.0,
'x_mean/encoder002': 3.0,
'y_mean/decoder000': 1,
'y_mean/decoder001': 2,
'y_mean/decoder002': 3,
'y_mean/encoder000': 0,
'y_mean/encoder001': 0,
'y_mean/encoder002': 0,
}
self.assertEqual(expected, self._CanonicalizeSummaryName(summaries))
def testWhileLoopNoMergeAll(self):
with self.session() as sess:
model = MockTransformer()
x = tf.constant(0, dtype=tf.float32)
y = tf.constant(0, dtype=tf.int64)
with tpu_summary.context():
x, y = model.BeamSearch(x, y)
x, y = sess.run((x, y))
self.assertEqual((3.0, 30), (x, y))
def testWhileLoopNoRewrite(self):
with self.session() as sess:
model = MockTransformer()
x = tf.constant(0, dtype=tf.float32)
y = tf.constant(0, dtype=tf.int64)
with tpu_summary.context():
x, y = model.BeamSearch(x, y)
# ValueError: Tensor decoder000/Mean:0 is not an element of this graph.
with self.assertRaises(ValueError):
summaries = tpu_summary.merge_all()
x, y, summaries = sess.run((x, y, summaries))
def testWhileLoopRewrite(self):
with self.session() as sess:
model = MockTransformer()
x = tf.constant(0, dtype=tf.float32)
y = tf.constant(0, dtype=tf.int64)
with tpu_summary.context(rewrite_while_loop=True):
x, y = model.BeamSearch(x, y)
summaries = tpu_summary.merge_all()
tf.logging.info('summaries=%r', summaries)
x, y, summaries = sess.run((x, y, summaries))
self.assertEqual((3.0, 30), (x, y))
expected = {
'x_mean/encoder000': 1.0,
'x_mean/encoder001': 2.0,
'x_mean/encoder002': 3.0,
'y_mean/encoder000': 0,
'y_mean/encoder001': 0,
'y_mean/encoder002': 0,
'x_mean/decoder000': 3.0,
'x_mean/decoder001': 3.0,
'x_mean/decoder002': 3.0,
'y_mean/decoder000': 14.5,
'y_mean/decoder001': 15.5,
'y_mean/decoder002': 16.5,
}
self.assertEqual(expected, self._CanonicalizeSummaryName(summaries))
def testWhileLoopRewriteMaxVarsLimit(self):
with self.session() as sess:
model = MockTransformer()
x = tf.constant(0, dtype=tf.float32)
y = tf.constant(0, dtype=tf.int64)
with tpu_summary.context(rewrite_while_loop=True, max_loop_vars=2):
x, y = model.BeamSearch(x, y)
summaries = tpu_summary.merge_all()
tf.logging.info('summaries=%r', summaries)
x, y, summaries = sess.run((x, y, summaries))
self.assertEqual((3.0, 30), (x, y))
expected = {
'x_mean/encoder000': 1.0,
'x_mean/encoder001': 2.0,
'x_mean/encoder002': 3.0,
'y_mean/encoder000': 0,
'y_mean/encoder001': 0,
'y_mean/encoder002': 0,
'x_mean/decoder000': 3.0,
'y_mean/decoder000': 14.5,
}
self.assertEqual(expected, self._CanonicalizeSummaryName(summaries))
def testWhileLoopReduceSum(self):
with self.session() as sess:
model = MockTransformer()
x = tf.constant(0, dtype=tf.float32)
y = tf.constant(0, dtype=tf.int64)
with tpu_summary.context(rewrite_while_loop=True):
x, y = model.BeamSearch(x, y, decoder_reduce_sum=True)
summaries = tpu_summary.merge_all()
tf.logging.info('summaries=%r', summaries)
x, y, summaries = sess.run((x, y, summaries))
self.assertEqual((3.0, 30), (x, y))
expected = {
'x_mean/encoder000': 1.0,
'x_mean/encoder001': 2.0,
'x_mean/encoder002': 3.0,
'y_mean/encoder000': 0,
'y_mean/encoder001': 0,
'y_mean/encoder002': 0,
'x_mean/decoder000': 30.0,
'x_mean/decoder001': 30.0,
'x_mean/decoder002': 30.0,
'y_mean/decoder000': 145.0,
'y_mean/decoder001': 155.0,
'y_mean/decoder002': 165.0,
}
self.assertEqual(expected, self._CanonicalizeSummaryName(summaries))
if __name__ == '__main__':
tf.test.main()
| 3,765 |
1,931 | {
"main": "dist/remirror-pm-model.cjs.js",
"module": "dist/remirror-pm-model.esm.js",
"browser": {
"./dist/remirror-pm-model.cjs.js": "./dist/remirror-pm-model.browser.cjs.js",
"./dist/remirror-pm-model.esm.js": "./dist/remirror-pm-model.browser.esm.js"
},
"types": "dist/remirror-pm-model.cjs.d.ts",
"rn:dev": "../src/model.ts"
}
| 170 |
435 | <filename>warehouse/ops-tools/config-compare/src/main/java/datawave/configuration/DataTypeConfigCompare.java
package datawave.configuration;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import java.util.Map;
import java.util.SortedSet;
import java.util.TreeSet;
/**
* A utility for comparing data type configuration files. The comparison will report fields that are the same and fields that are different.
* <p>
* The comparison follows the following rules: 1) There must be a data.name config. This will be used as the expected prefix. <br>
* 2) If a field starts with the detected prefix, it will be compared to the corresponding (prefixed) value in the other config. <br>
* 3) If a field is not prefixed, it will be compared to the same field in the other config. <br>
*/
public class DataTypeConfigCompare {
public static final String PREFIX = "data.name";
/**
* Runs the comparison.
*
* @param left
* @param right
* @return CompareResult which houses comparison details.
*/
public CompareResult run(Configuration left, Configuration right) {
SortedSet<String> same = new TreeSet<>();
SortedSet<String> diff = new TreeSet<>();
SortedSet<String> leftOnly = new TreeSet<>();
SortedSet<String> rightOnly = new TreeSet<>();
String leftPrefix = getPrefix(left);
String rightPrefix = getPrefix(right);
for (Map.Entry<String,String> entry : left) {
ConfField field = new ConfField(leftPrefix, entry.getKey());
String leftValue = entry.getValue();
String rightValue = right.get(field.getField(rightPrefix));
if (nullSafeEquals(leftValue, rightValue)) {
same.add(field.getField());
} else if (rightValue == null) {
leftOnly.add(field.getField());
} else {
diff.add(field.getField());
}
}
// To find values only in right, we just iterate through
// and verify each property does not exist in left, since
// we already checked equivalence above.
for (Map.Entry<String,String> entry : right) {
ConfField field = new ConfField(rightPrefix, entry.getKey());
if (left.get(field.getField(leftPrefix)) == null) {
rightOnly.add(field.getField());
}
}
return new CompareResult(same, diff, leftOnly, rightOnly);
}
private boolean nullSafeEquals(String s1, String s2) {
return s1 == null ? s2 == null : s1.equals(s2);
}
private String getPrefix(Configuration c) {
String prefix = c.get(PREFIX);
if (StringUtils.isBlank(prefix)) {
throw new IllegalArgumentException("Configurations must contain a 'data.name' field.");
}
return prefix;
}
}
| 1,204 |
2,305 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import json
import logging
from pathlib import Path
from typing import Union, Dict, Any
from .utils import ContextStack
_logger = logging.getLogger(__name__)
def fixed_arch(fixed_arch: Union[str, Path, Dict[str, Any]], verbose=True):
"""
Load architecture from ``fixed_arch`` and apply to model. This should be used as a context manager. For example,
.. code-block:: python
with fixed_arch('/path/to/export.json'):
model = Model(3, 224, 224)
Parameters
----------
fixed_arc : str, Path or dict
Path to the JSON that stores the architecture, or dict that stores the exported architecture.
verbose : bool
Print log messages if set to True
Returns
-------
ContextStack
Context manager that provides a fixed architecture when creates the model.
"""
if isinstance(fixed_arch, (str, Path)):
with open(fixed_arch) as f:
fixed_arch = json.load(f)
if verbose:
_logger.info(f'Fixed architecture: %s', fixed_arch)
return ContextStack('fixed', fixed_arch)
| 402 |
517 | // GMSMapView+ClusterKit.h
//
// Copyright © 2017 Hulab. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#import <GoogleMaps/GoogleMaps.h>
#import <ClusterKit/CKMap.h>
#import <ClusterKit/CKCluster.h>
NS_ASSUME_NONNULL_BEGIN
/**
The GMSMapViewDataSource protocol is adopted by an object that mediates the GMSMapView’s data. The data source provides the markers that represent clusters on map.
*/
@protocol GMSMapViewDataSource <NSObject>
@optional
/**
Asks the data source for a marker that represent the given cluster.
@param mapView A map view object requesting the marker.
@param cluster The cluster to represent.
@return An object inheriting from GMSMarker that the map view can use for the specified cluster.
*/
- (__kindof GMSMarker *)mapView:(GMSMapView *)mapView markerForCluster:(CKCluster *)cluster;
@end
/**
GMSMarker category adopting the CKAnnotation protocol.
*/
@interface GMSMarker (ClusterKit)
/**
The cluster that the marker is related to.
*/
@property (nonatomic, weak, nullable) CKCluster *cluster;
@end
/**
GMSMapView category adopting the CKMap protocol.
*/
@interface GMSMapView (ClusterKit) <CKMap>
/**
Data source instance that adopt the GMSMapViewDataSource.
*/
@property(nonatomic, weak) IBOutlet id<GMSMapViewDataSource> dataSource;
/**
Returns the marker representing the given cluster.
@param cluster The cluster for which to return the corresponding marker.
@return The value associated with cluster, or nil if no value is associated with cluster.
*/
- (nullable __kindof GMSMarker *)markerForCluster:(CKCluster *)cluster;
@end
/**
GMSCameraUpdate for modifying the camera to show the content of a cluster.
*/
@interface GMSCameraUpdate (ClusterKit)
/**
Returns a GMSCameraUpdate that transforms the camera such that the specified cluster are centered on screen at the greatest possible zoom level. The bounds will have a default padding of 64 points.
The returned camera update will set the camera's bearing and tilt to their default zero values (i.e., facing north and looking directly at the Earth).
@param cluster The cluster to fit.
@return The camera update that fit the given cluster.
*/
+ (GMSCameraUpdate *)fitCluster:(CKCluster *)cluster;
/**
This is similar to fitCluster: but allows specifying the padding (in points) in order to inset the bounding box from the view's edges.
@param cluster The cluster to fit.
@param padding The padding that inset the bounding box. If the requested padding is larger than the view size in either the vertical or horizontal direction the map will be maximally zoomed out.
@return The camera update that fit the given cluster.
*/
+ (GMSCameraUpdate *)fitCluster:(CKCluster *)cluster
withPadding:(CGFloat)padding;
/**
This is similar to fitCluster: but allows specifying edge insets in order to inset the bounding box from the view's edges.
@param cluster The cluster to fit.
@param edgeInsets The edge insets of the bounding box. If the requested edge insets are larger than the view size in either the vertical or horizontal direction the map will be maximally zoomed out.
@return The camera update that fit the given cluster.
*/
+ (GMSCameraUpdate *)fitCluster:(CKCluster *)cluster
withEdgeInsets:(UIEdgeInsets)edgeInsets;
@end
NS_ASSUME_NONNULL_END
| 1,266 |
1,355 | <gh_stars>1000+
package li.cil.oc.api.machine;
/**
* Used to signal that the direct call limit for the current server tick has
* been reached in {@link Machine#invoke(String, String, Object[])}.
*/
public class LimitReachedException extends Exception {
}
| 73 |
310 | <reponame>rczhang/healthcare<gh_stars>100-1000
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gets predictions from the TensorFlow model server."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, List, Text, Union
import attr
import grpc
import retrying
import tensorflow.compat.v1 as tf
from tensorflow.python.framework import tensor_util
from google.rpc import code_pb2
from hcls_imaging_ml_toolkit import exception
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
@attr.s
class Response(object):
"""Response of Tensorflow serving.
Attributes:
key: The key that holds the response.
shape: The expected shape of the response.
"""
key = attr.ib(type=Text)
# Use `None` for dimensions whose values are not known in advance.
# e.g., Consider a model that produces arbitrary number of bounding boxes with
# the output signature `[1, 4, -1]`. Set shape to `[1, 4, None]`.
shape = attr.ib(type=List[int])
@attr.s
class ModelConfig(object):
"""Properties of a Tensorflow model.
Attributes:
name: Name of the model.
signature: Signature of the model.
input_key: The key used to populate the input request.
response: The expected response structure of the model.
"""
name = attr.ib(type=Text)
signature = attr.ib(type=Text)
input_key = attr.ib(type=Text)
response = attr.ib(type=List[Response])
# Overrwite the max GRPC request and response size to 1GB. The default of 4MB
# is too small for some of the models invoked here.
_MAX_GRPC_REQUEST_AND_RESPONSE_SIZE = 1024 * 1024 * 1024
# gRPC status codes that are retried.
_RETRIABLE_GRPC_STATUS_CODES = (
grpc.StatusCode.UNAVAILABLE,
grpc.StatusCode.CANCELLED,
grpc.StatusCode.RESOURCE_EXHAUSTED,
grpc.StatusCode.DEADLINE_EXCEEDED,
)
def _IsRetriableGrpcError(e) -> bool:
"""Determines whether the given gRPC exception is retriable.
Args:
e: The gRPC exception.
Returns:
Whether the exception can be retried.
"""
return isinstance(e, grpc.Call) and e.code() in _RETRIABLE_GRPC_STATUS_CODES
class ModelServer(object):
"""Gets predictions from the model server."""
def __init__(self, address: Text, timeout=300.0) -> None:
"""Inits ModelServer with passed args.
Args:
address: Address in host:port format.
timeout: Timeout for prediction calls.
"""
self._address = address
self._timeout = timeout
options = [
('grpc.max_send_message_length', _MAX_GRPC_REQUEST_AND_RESPONSE_SIZE),
('grpc.max_receive_message_length', _MAX_GRPC_REQUEST_AND_RESPONSE_SIZE)
]
self._channel = grpc.insecure_channel(self._address, options)
def Close(self) -> None:
"""Shuts down GRPC Channel."""
self._channel.close()
def Predict(self, input_bytes: bytes,
model_config: ModelConfig) -> List[List[float]]:
"""Get prediction from the model server.
Args:
input_bytes: The input bytes.
model_config: Configuration for the model to be called.
Returns:
The prediction response from TF serving.
"""
return self._Predict(input_bytes, 1, model_config)
def PredictExamples(self, examples: Union[List[tf.train.Example],
List[tf.train.SequenceExample]],
model_config: ModelConfig) -> List[List[float]]:
"""Get prediction for a list of TF (Sequence)Examples from the model server.
Args:
examples: The list of TF examples used as input to TF serving.
model_config: Configuration for the model to be called.
Returns:
The prediction response from TF serving.
"""
serialized_examples = [example.SerializeToString() for example in examples]
return self._Predict(serialized_examples, len(serialized_examples),
model_config)
def _Predict(self, input_data: Any, num_inputs: int,
model_config: ModelConfig) -> List[List[float]]:
"""Get prediction from the model server.
Send the provided input data to the model server over gRPC and returns
the response.
Args:
input_data: Input data fed into the model.
num_inputs: Number of model inputs (should be set to 1 for non-batch).
model_config: Configuration for the model to be called.
Returns:
The prediction response from TF serving.
Raises:
exception.CustomExceptionError: If the shape of response is incompatible
or if the model server returns an error and it sets the status code to
INTERNAL.
"""
try:
req = predict_pb2.PredictRequest()
req.model_spec.name = model_config.name
req.model_spec.signature_name = model_config.signature
req.inputs[model_config.input_key].CopyFrom(
tf.make_tensor_proto(input_data, shape=[num_inputs]))
resp = self._InvokePredictRequest(req)
except Exception as e:
raise exception.CustomExceptionError(str(e), code_pb2.Code.INTERNAL)
floats = []
for r in model_config.response:
value = resp.outputs[r.key]
shape = tensor_util.TensorShapeProtoToList(value.tensor_shape)
if not tf.TensorShape(shape).is_compatible_with(r.shape):
raise exception.CustomExceptionError(
'Model returned invalid shape {}, want {}'.format(shape, r.shape),
code_pb2.Code.INTERNAL)
floats.append(value.float_val[:])
return floats
@retrying.retry(
retry_on_exception=_IsRetriableGrpcError,
wait_exponential_multiplier=2000,
wait_exponential_max=32000,
stop_max_attempt_number=5)
def _InvokePredictRequest(
self, req: predict_pb2.PredictRequest) -> predict_pb2.PredictResponse:
"""Invokes a Predict request to TF serving.
This function will retry all transient errors.
Args:
req: The Predict request.
Returns:
The prediction response from TF serving.
"""
stub = prediction_service_pb2_grpc.PredictionServiceStub(self._channel)
return stub.Predict(req, self._timeout)
| 2,376 |
655 | #pragma once
#include <df/util/tensor.h>
#include <df/voxel/color.h>
#include <df/voxel/compositeVoxel.h>
#include <df/voxel/tsdf.h>
#include <df/voxel/voxelGrid.h>
namespace df {
template <typename Scalar, typename VoxelT>
void computeSurfaceColors(const DeviceTensor1<Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> > & vertices,
DeviceTensor1<Eigen::Matrix<unsigned char,3,1,Eigen::DontAlign> > & colors,
const DeviceVoxelGrid<Scalar,VoxelT> & voxelGrid,
const DeviceTensor1<Eigen::Matrix<unsigned char,3,1,Eigen::DontAlign> > &);
} // namespace df
| 311 |
15,947 | <reponame>ChaseKnowlden/airflow
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
import pytest
from parameterized import parameterized
from spython.instance import Instance
from airflow.exceptions import AirflowException
from airflow.providers.singularity.operators.singularity import SingularityOperator
class SingularityOperatorTestCase(unittest.TestCase):
@mock.patch('airflow.providers.singularity.operators.singularity.Client')
def test_execute(self, client_mock):
instance = mock.Mock(
autospec=Instance,
**{
'start.return_value': 0,
'stop.return_value': 0,
},
)
client_mock.instance.return_value = instance
client_mock.execute.return_value = {'return_code': 0, 'message': 'message'}
task = SingularityOperator(task_id='task-id', image="docker://busybox", command="echo hello")
task.execute({})
client_mock.instance.assert_called_once_with("docker://busybox", options=[], args=None, start=False)
client_mock.execute.assert_called_once_with(mock.ANY, "echo hello", return_result=True)
execute_args, _ = client_mock.execute.call_args
assert execute_args[0] is instance
instance.start.assert_called_once_with()
instance.stop.assert_called_once_with()
@parameterized.expand(
[
("",),
(None,),
]
)
def test_command_is_required(self, command):
task = SingularityOperator(task_id='task-id', image="docker://busybox", command=command)
with pytest.raises(AirflowException, match="You must define a command."):
task.execute({})
@mock.patch('airflow.providers.singularity.operators.singularity.Client')
def test_image_should_be_pulled_when_not_exists(self, client_mock):
instance = mock.Mock(
autospec=Instance,
**{
'start.return_value': 0,
'stop.return_value': 0,
},
)
client_mock.pull.return_value = '/tmp/busybox_latest.sif'
client_mock.instance.return_value = instance
client_mock.execute.return_value = {'return_code': 0, 'message': 'message'}
task = SingularityOperator(
task_id='task-id',
image="docker://busybox",
command="echo hello",
pull_folder="/tmp",
force_pull=True,
)
task.execute({})
client_mock.instance.assert_called_once_with(
"/tmp/busybox_latest.sif", options=[], args=None, start=False
)
client_mock.pull.assert_called_once_with("docker://busybox", stream=True, pull_folder="/tmp")
client_mock.execute.assert_called_once_with(mock.ANY, "echo hello", return_result=True)
@parameterized.expand(
[
(
None,
[],
),
(
[],
[],
),
(
["AAA"],
['--bind', 'AAA'],
),
(
["AAA", "BBB"],
['--bind', 'AAA', '--bind', 'BBB'],
),
(
["AAA", "BBB", "CCC"],
['--bind', 'AAA', '--bind', 'BBB', '--bind', 'CCC'],
),
]
)
@mock.patch('airflow.providers.singularity.operators.singularity.Client')
def test_bind_options(self, volumes, expected_options, client_mock):
instance = mock.Mock(
autospec=Instance,
**{
'start.return_value': 0,
'stop.return_value': 0,
},
)
client_mock.pull.return_value = 'docker://busybox'
client_mock.instance.return_value = instance
client_mock.execute.return_value = {'return_code': 0, 'message': 'message'}
task = SingularityOperator(
task_id='task-id',
image="docker://busybox",
command="echo hello",
force_pull=True,
volumes=volumes,
)
task.execute({})
client_mock.instance.assert_called_once_with(
"docker://busybox", options=expected_options, args=None, start=False
)
@parameterized.expand(
[
(
None,
[],
),
(
"",
['--workdir', ''],
),
(
"/work-dir/",
['--workdir', '/work-dir/'],
),
]
)
@mock.patch('airflow.providers.singularity.operators.singularity.Client')
def test_working_dir(self, working_dir, expected_working_dir, client_mock):
instance = mock.Mock(
autospec=Instance,
**{
'start.return_value': 0,
'stop.return_value': 0,
},
)
client_mock.pull.return_value = 'docker://busybox'
client_mock.instance.return_value = instance
client_mock.execute.return_value = {'return_code': 0, 'message': 'message'}
task = SingularityOperator(
task_id='task-id',
image="docker://busybox",
command="echo hello",
force_pull=True,
working_dir=working_dir,
)
task.execute({})
client_mock.instance.assert_called_once_with(
"docker://busybox", options=expected_working_dir, args=None, start=False
)
| 2,942 |
14,668 | <reponame>chromium/chromium
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ash/quick_pair/keyed_service/quick_pair_metrics_logger.h"
#include <memory>
#include "ash/quick_pair/common/account_key_failure.h"
#include "ash/quick_pair/common/constants.h"
#include "ash/quick_pair/common/device.h"
#include "ash/quick_pair/common/fast_pair/fast_pair_metrics.h"
#include "ash/quick_pair/common/logging.h"
#include "ash/quick_pair/common/pair_failure.h"
#include "ash/quick_pair/common/protocol.h"
#include "ash/quick_pair/pairing/fake_retroactive_pairing_detector.h"
#include "ash/quick_pair/pairing/mock_pairer_broker.h"
#include "ash/quick_pair/pairing/pairer_broker.h"
#include "ash/quick_pair/pairing/retroactive_pairing_detector.h"
#include "ash/quick_pair/scanning/mock_scanner_broker.h"
#include "ash/quick_pair/scanning/scanner_broker.h"
#include "ash/quick_pair/ui/mock_ui_broker.h"
#include "ash/quick_pair/ui/ui_broker.h"
#include "base/memory/scoped_refptr.h"
#include "base/run_loop.h"
#include "base/test/metrics/histogram_tester.h"
#include "base/test/task_environment.h"
#include "base/time/time.h"
#include "device/bluetooth/bluetooth_adapter_factory.h"
#include "device/bluetooth/test/mock_bluetooth_adapter.h"
#include "device/bluetooth/test/mock_bluetooth_device.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/abseil-cpp/absl/types/optional.h"
namespace {
constexpr char kTestMetadataId[] = "test_metadata_id";
constexpr char kTestAddress[] = "test_address";
constexpr char kFastPairEngagementFlowMetricInitial[] =
"Bluetooth.ChromeOS.FastPair.EngagementFunnel.Steps.InitialPairingProtocol";
constexpr char kFastPairEngagementFlowMetricSubsequent[] =
"Bluetooth.ChromeOS.FastPair.EngagementFunnel.Steps."
"SubsequentPairingProtocol";
const char kFastPairRetroactiveEngagementFlowMetric[] =
"Bluetooth.ChromeOS.FastPair.RetroactiveEngagementFunnel.Steps";
constexpr char kFastPairPairTimeMetricInitial[] =
"Bluetooth.ChromeOS.FastPair.TotalUxPairTime.InitialPairingProtocol";
constexpr char kFastPairPairTimeMetricSubsequent[] =
"Bluetooth.ChromeOS.FastPair.TotalUxPairTime.SubsequentPairingProtocol";
const char kPairingMethodMetric[] = "Bluetooth.ChromeOS.FastPair.PairingMethod";
const char kRetroactivePairingResultMetric[] =
"Bluetooth.ChromeOS.FastPair.RetroactivePairing.Result";
const char kFastPairPairFailureMetricInitial[] =
"Bluetooth.ChromeOS.FastPair.PairFailure.InitialPairingProtocol";
const char kFastPairPairFailureMetricSubsequent[] =
"Bluetooth.ChromeOS.FastPair.PairFailure.SubsequentPairingProtocol";
const char kFastPairPairFailureMetricRetroactive[] =
"Bluetooth.ChromeOS.FastPair.PairFailure.RetroactivePairingProtocol";
const char kFastPairPairResultMetricInitial[] =
"Bluetooth.ChromeOS.FastPair.Pairing.Result.InitialPairingProtocol";
const char kFastPairPairResultMetricSubsequent[] =
"Bluetooth.ChromeOS.FastPair.Pairing.Result.SubsequentPairingProtocol";
const char kFastPairPairResultMetricRetroactive[] =
"Bluetooth.ChromeOS.FastPair.Pairing.Result.RetroactivePairingProtocol";
const char kFastPairAccountKeyWriteResultMetricInitial[] =
"Bluetooth.ChromeOS.FastPair.AccountKeyWrite.Result.InitialPairingProtocol";
const char kFastPairAccountKeyWriteResultMetricRetroactive[] =
"Bluetooth.ChromeOS.FastPair.AccountKeyWrite.Result."
"RetroactivePairingProtocol";
const char kFastPairAccountKeyWriteFailureMetricInitial[] =
"Bluetooth.ChromeOS.FastPair.AccountKeyFailure.InitialPairingProtocol";
const char kFastPairAccountKeyWriteFailureMetricRetroactive[] =
"Bluetooth.ChromeOS.FastPair.AccountKeyFailure.RetroactivePairingProtocol";
constexpr char kTestDeviceAddress[] = "11:12:13:14:15:16";
constexpr char kTestBleDeviceName[] = "Test Device Name";
constexpr char kValidModelId[] = "718c17";
std::unique_ptr<testing::NiceMock<device::MockBluetoothDevice>>
CreateTestBluetoothDevice(std::string address) {
return std::make_unique<testing::NiceMock<device::MockBluetoothDevice>>(
/*adapter=*/nullptr, /*bluetooth_class=*/0, kTestBleDeviceName, address,
/*paired=*/true, /*connected=*/false);
}
class FakeMetricBluetoothAdapter
: public testing::NiceMock<device::MockBluetoothAdapter> {
public:
device::BluetoothDevice* GetDevice(const std::string& address) override {
for (const auto& it : mock_devices_) {
if (it->GetAddress() == address)
return it.get();
}
return nullptr;
}
void NotifyDevicePairedChanged(device::BluetoothDevice* device,
bool new_paired_status) {
device::BluetoothAdapter::NotifyDevicePairedChanged(device,
new_paired_status);
}
private:
~FakeMetricBluetoothAdapter() = default;
};
} // namespace
namespace ash {
namespace quick_pair {
class QuickPairMetricsLoggerTest : public testing::Test {
public:
void SetUp() override {
adapter_ = base::MakeRefCounted<FakeMetricBluetoothAdapter>();
device::BluetoothAdapterFactory::SetAdapterForTesting(adapter_);
scanner_broker_ = std::make_unique<MockScannerBroker>();
mock_scanner_broker_ =
static_cast<MockScannerBroker*>(scanner_broker_.get());
retroactive_pairing_detector_ =
std::make_unique<FakeRetroactivePairingDetector>();
fake_retroactive_pairing_detector_ =
static_cast<FakeRetroactivePairingDetector*>(
retroactive_pairing_detector_.get());
pairer_broker_ = std::make_unique<MockPairerBroker>();
mock_pairer_broker_ = static_cast<MockPairerBroker*>(pairer_broker_.get());
ui_broker_ = std::make_unique<MockUIBroker>();
mock_ui_broker_ = static_cast<MockUIBroker*>(ui_broker_.get());
initial_device_ = base::MakeRefCounted<Device>(
kTestMetadataId, kTestAddress, Protocol::kFastPairInitial);
subsequent_device_ = base::MakeRefCounted<Device>(
kTestMetadataId, kTestAddress, Protocol::kFastPairSubsequent);
retroactive_device_ = base::MakeRefCounted<Device>(
kTestMetadataId, kTestAddress, Protocol::kFastPairRetroactive);
metrics_logger_ = std::make_unique<QuickPairMetricsLogger>(
scanner_broker_.get(), pairer_broker_.get(), ui_broker_.get(),
retroactive_pairing_detector_.get());
}
void SimulateDiscoveryUiShown(Protocol protocol) {
switch (protocol) {
case Protocol::kFastPairInitial:
mock_scanner_broker_->NotifyDeviceFound(initial_device_);
break;
case Protocol::kFastPairSubsequent:
mock_scanner_broker_->NotifyDeviceFound(subsequent_device_);
break;
case Protocol::kFastPairRetroactive:
break;
}
}
void SimulateDiscoveryUiDismissed(Protocol protocol) {
switch (protocol) {
case Protocol::kFastPairInitial:
mock_ui_broker_->NotifyDiscoveryAction(initial_device_,
DiscoveryAction::kDismissed);
break;
case Protocol::kFastPairSubsequent:
mock_ui_broker_->NotifyDiscoveryAction(subsequent_device_,
DiscoveryAction::kDismissed);
break;
case Protocol::kFastPairRetroactive:
break;
}
}
void SimulateDiscoveryUiDismissedByUser(Protocol protocol) {
switch (protocol) {
case Protocol::kFastPairInitial:
mock_ui_broker_->NotifyDiscoveryAction(
initial_device_, DiscoveryAction::kDismissedByUser);
break;
case Protocol::kFastPairSubsequent:
mock_ui_broker_->NotifyDiscoveryAction(
subsequent_device_, DiscoveryAction::kDismissedByUser);
break;
case Protocol::kFastPairRetroactive:
break;
}
}
void SimulateDiscoveryUiConnectPressed(Protocol protocol) {
switch (protocol) {
case Protocol::kFastPairInitial:
mock_ui_broker_->NotifyDiscoveryAction(initial_device_,
DiscoveryAction::kPairToDevice);
break;
case Protocol::kFastPairSubsequent:
mock_ui_broker_->NotifyDiscoveryAction(subsequent_device_,
DiscoveryAction::kPairToDevice);
break;
case Protocol::kFastPairRetroactive:
break;
}
}
void SimulatePairingFailed(Protocol protocol) {
switch (protocol) {
case Protocol::kFastPairInitial:
mock_pairer_broker_->NotifyPairFailure(
initial_device_,
PairFailure::kKeyBasedPairingCharacteristicDiscovery);
break;
case Protocol::kFastPairSubsequent:
mock_pairer_broker_->NotifyPairFailure(
subsequent_device_,
PairFailure::kKeyBasedPairingCharacteristicDiscovery);
break;
case Protocol::kFastPairRetroactive:
mock_pairer_broker_->NotifyPairFailure(
retroactive_device_,
PairFailure::kKeyBasedPairingCharacteristicDiscovery);
break;
}
}
void SimulatePairingSucceeded(Protocol protocol) {
switch (protocol) {
case Protocol::kFastPairInitial:
initial_device_->set_classic_address(kTestAddress);
mock_pairer_broker_->NotifyDevicePaired(initial_device_);
break;
case Protocol::kFastPairSubsequent:
subsequent_device_->set_classic_address(kTestAddress);
mock_pairer_broker_->NotifyDevicePaired(subsequent_device_);
break;
case Protocol::kFastPairRetroactive:
retroactive_device_->set_classic_address(kTestAddress);
mock_pairer_broker_->NotifyDevicePaired(retroactive_device_);
break;
}
}
void SimulateErrorUiDismissedByUser(Protocol protocol) {
switch (protocol) {
case Protocol::kFastPairInitial:
mock_ui_broker_->NotifyPairingFailedAction(
initial_device_, PairingFailedAction::kDismissedByUser);
break;
case Protocol::kFastPairSubsequent:
mock_ui_broker_->NotifyPairingFailedAction(
subsequent_device_, PairingFailedAction::kDismissedByUser);
break;
case Protocol::kFastPairRetroactive:
break;
}
}
void SimulateErrorUiDismissed(Protocol protocol) {
switch (protocol) {
case Protocol::kFastPairInitial:
mock_ui_broker_->NotifyPairingFailedAction(
initial_device_, PairingFailedAction::kDismissed);
break;
case Protocol::kFastPairSubsequent:
mock_ui_broker_->NotifyPairingFailedAction(
subsequent_device_, PairingFailedAction::kDismissed);
break;
case Protocol::kFastPairRetroactive:
break;
}
}
void SimulateErrorUiSettingsPressed(Protocol protocol) {
switch (protocol) {
case Protocol::kFastPairInitial:
mock_ui_broker_->NotifyPairingFailedAction(
initial_device_, PairingFailedAction::kNavigateToSettings);
break;
case Protocol::kFastPairSubsequent:
mock_ui_broker_->NotifyPairingFailedAction(
subsequent_device_, PairingFailedAction::kNavigateToSettings);
break;
case Protocol::kFastPairRetroactive:
break;
}
}
void SimulateAssociateAccountUiShown() {
fake_retroactive_pairing_detector_->NotifyRetroactivePairFound(
retroactive_device_);
}
void SimulateAssociateAccountUiDismissed() {
mock_ui_broker_->NotifyAssociateAccountAction(
retroactive_device_, AssociateAccountAction::kDismissed);
}
void SimulateAssociateAccountUiDismissedByUser() {
mock_ui_broker_->NotifyAssociateAccountAction(
retroactive_device_, AssociateAccountAction::kDismissedByUser);
}
void SimulateAssociateAccountUiSavePressed() {
mock_ui_broker_->NotifyAssociateAccountAction(
retroactive_device_, AssociateAccountAction::kAssoicateAccount);
}
void SimulateAssociateAccountUiLearnMorePressed() {
mock_ui_broker_->NotifyAssociateAccountAction(
retroactive_device_, AssociateAccountAction::kLearnMore);
}
void SimulateAccountKeyWritten(Protocol protocol) {
switch (protocol) {
case Protocol::kFastPairInitial:
mock_pairer_broker_->NotifyAccountKeyWrite(initial_device_,
absl::nullopt);
break;
case Protocol::kFastPairSubsequent:
break;
case Protocol::kFastPairRetroactive:
mock_pairer_broker_->NotifyAccountKeyWrite(retroactive_device_,
absl::nullopt);
break;
}
}
void SimulateAccountKeyFailure(Protocol protocol) {
switch (protocol) {
case Protocol::kFastPairInitial:
mock_pairer_broker_->NotifyAccountKeyWrite(
initial_device_, AccountKeyFailure::kAccountKeyCharacteristicWrite);
break;
case Protocol::kFastPairSubsequent:
break;
case Protocol::kFastPairRetroactive:
mock_pairer_broker_->NotifyAccountKeyWrite(
retroactive_device_,
AccountKeyFailure::kAccountKeyCharacteristicWrite);
break;
}
}
void PairFastPairDeviceWithFastPair(std::string address) {
auto fp_device = base::MakeRefCounted<Device>(kValidModelId, address,
Protocol::kFastPairInitial);
fp_device->set_classic_address(address);
mock_pairer_broker_->NotifyDevicePaired(fp_device);
}
void PairFastPairDeviceWithClassicBluetooth(bool new_paired_status,
std::string classic_address) {
std::unique_ptr<testing::NiceMock<device::MockBluetoothDevice>>
bluetooth_device = CreateTestBluetoothDevice(classic_address);
bluetooth_device->AddUUID(ash::quick_pair::kFastPairBluetoothUuid);
auto* bt_device_ptr = bluetooth_device.get();
adapter_->AddMockDevice(std::move(bluetooth_device));
adapter_->NotifyDevicePairedChanged(bt_device_ptr, new_paired_status);
}
base::HistogramTester& histogram_tester() { return histogram_tester_; }
protected:
base::HistogramTester histogram_tester_;
base::test::SingleThreadTaskEnvironment task_environment_;
scoped_refptr<FakeMetricBluetoothAdapter> adapter_;
scoped_refptr<Device> initial_device_;
scoped_refptr<Device> subsequent_device_;
scoped_refptr<Device> retroactive_device_;
MockScannerBroker* mock_scanner_broker_ = nullptr;
MockPairerBroker* mock_pairer_broker_ = nullptr;
MockUIBroker* mock_ui_broker_ = nullptr;
FakeRetroactivePairingDetector* fake_retroactive_pairing_detector_ = nullptr;
std::unique_ptr<ScannerBroker> scanner_broker_;
std::unique_ptr<RetroactivePairingDetector> retroactive_pairing_detector_;
std::unique_ptr<PairerBroker> pairer_broker_;
std::unique_ptr<UIBroker> ui_broker_;
std::unique_ptr<QuickPairMetricsLogger> metrics_logger_;
};
TEST_F(QuickPairMetricsLoggerTest, LogDiscoveryUiShown_Initial) {
SimulateDiscoveryUiShown(Protocol::kFastPairInitial);
base::RunLoop().RunUntilIdle();
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiShown),
1);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiConnectPressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kPairingFailed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kPairingSucceeded),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kErrorUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kErrorUiSettingsPressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kErrorUiDismissedByUser),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiDismissedByUser),
0);
}
TEST_F(QuickPairMetricsLoggerTest, LogDiscoveryUiShown_Subsequent) {
SimulateDiscoveryUiShown(Protocol::kFastPairSubsequent);
base::RunLoop().RunUntilIdle();
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiShown),
1);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiConnectPressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kPairingFailed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kPairingSucceeded),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kErrorUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kErrorUiSettingsPressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kErrorUiDismissedByUser),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiDismissedByUser),
0);
}
TEST_F(QuickPairMetricsLoggerTest, LogDiscoveryUiDismissed_Initial) {
SimulateDiscoveryUiDismissed(Protocol::kFastPairInitial);
base::RunLoop().RunUntilIdle();
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiShown),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiDismissed),
1);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiConnectPressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kPairingFailed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kPairingSucceeded),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kErrorUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kErrorUiSettingsPressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kErrorUiDismissedByUser),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiDismissedByUser),
0);
}
TEST_F(QuickPairMetricsLoggerTest, LogDiscoveryUiDismissedByUser_Initial) {
SimulateDiscoveryUiDismissedByUser(Protocol::kFastPairInitial);
base::RunLoop().RunUntilIdle();
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiShown),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiConnectPressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kPairingFailed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kPairingSucceeded),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kErrorUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kErrorUiSettingsPressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kErrorUiDismissedByUser),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiDismissedByUser),
1);
}
TEST_F(QuickPairMetricsLoggerTest, LogDiscoveryUiDismissed_Subsequent) {
SimulateDiscoveryUiDismissed(Protocol::kFastPairSubsequent);
base::RunLoop().RunUntilIdle();
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiShown),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiDismissed),
1);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiConnectPressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kPairingFailed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kPairingSucceeded),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kErrorUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kErrorUiSettingsPressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kErrorUiDismissedByUser),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiDismissedByUser),
0);
}
TEST_F(QuickPairMetricsLoggerTest, LogDiscoveryUiDismissedByUser_Subsequent) {
SimulateDiscoveryUiDismissedByUser(Protocol::kFastPairSubsequent);
base::RunLoop().RunUntilIdle();
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiShown),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiConnectPressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kPairingFailed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kPairingSucceeded),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kErrorUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kErrorUiSettingsPressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kErrorUiDismissedByUser),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiDismissedByUser),
1);
}
TEST_F(QuickPairMetricsLoggerTest, LogDiscoveryUiConnectPressed_Initial) {
SimulateDiscoveryUiConnectPressed(Protocol::kFastPairInitial);
base::RunLoop().RunUntilIdle();
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiShown),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiConnectPressed),
1);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kPairingFailed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kPairingSucceeded),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kErrorUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kErrorUiSettingsPressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kErrorUiDismissedByUser),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiDismissedByUser),
0);
}
TEST_F(QuickPairMetricsLoggerTest, LogDiscoveryUiConnectPressed_Subsequent) {
SimulateDiscoveryUiConnectPressed(Protocol::kFastPairSubsequent);
base::RunLoop().RunUntilIdle();
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiShown),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiConnectPressed),
1);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kPairingFailed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kPairingSucceeded),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kErrorUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kErrorUiSettingsPressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kErrorUiDismissedByUser),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiDismissedByUser),
0);
}
TEST_F(QuickPairMetricsLoggerTest, LogPairingFailed_Initial) {
SimulatePairingFailed(Protocol::kFastPairInitial);
base::RunLoop().RunUntilIdle();
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiShown),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiConnectPressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kPairingFailed),
1);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kPairingSucceeded),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kErrorUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kErrorUiSettingsPressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kErrorUiDismissedByUser),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiDismissedByUser),
0);
}
TEST_F(QuickPairMetricsLoggerTest, LogPairingFailed_Subsequent) {
SimulatePairingFailed(Protocol::kFastPairSubsequent);
base::RunLoop().RunUntilIdle();
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiShown),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiConnectPressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kPairingFailed),
1);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kPairingSucceeded),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kErrorUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kErrorUiSettingsPressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kErrorUiDismissedByUser),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiDismissedByUser),
0);
}
TEST_F(QuickPairMetricsLoggerTest, LogPairingSucceeded_Initial) {
SimulatePairingSucceeded(Protocol::kFastPairInitial);
base::RunLoop().RunUntilIdle();
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiShown),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiConnectPressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kPairingFailed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kPairingSucceeded),
1);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kErrorUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kErrorUiSettingsPressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kErrorUiDismissedByUser),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiDismissedByUser),
0);
}
TEST_F(QuickPairMetricsLoggerTest, LogPairingSucceeded_Subsequent) {
SimulatePairingSucceeded(Protocol::kFastPairSubsequent);
base::RunLoop().RunUntilIdle();
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiShown),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiConnectPressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kPairingFailed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kPairingSucceeded),
1);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kErrorUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kErrorUiSettingsPressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kErrorUiDismissedByUser),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiDismissedByUser),
0);
}
TEST_F(QuickPairMetricsLoggerTest, LogErrorUiDismissed_Initial) {
SimulateErrorUiDismissed(Protocol::kFastPairInitial);
base::RunLoop().RunUntilIdle();
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiShown),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiConnectPressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kPairingFailed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kPairingSucceeded),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kErrorUiDismissed),
1);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kErrorUiSettingsPressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kErrorUiDismissedByUser),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiDismissedByUser),
0);
}
TEST_F(QuickPairMetricsLoggerTest, LogErrorUiDismissedByUser_Initial) {
SimulateErrorUiDismissedByUser(Protocol::kFastPairInitial);
base::RunLoop().RunUntilIdle();
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiShown),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiConnectPressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kPairingFailed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kPairingSucceeded),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kErrorUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kErrorUiSettingsPressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kErrorUiDismissedByUser),
1);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiDismissedByUser),
0);
}
TEST_F(QuickPairMetricsLoggerTest, LogErrorUiDismissed_Subsequent) {
SimulateErrorUiDismissed(Protocol::kFastPairSubsequent);
base::RunLoop().RunUntilIdle();
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiShown),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiConnectPressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kPairingFailed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kPairingSucceeded),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kErrorUiDismissed),
1);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kErrorUiSettingsPressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kErrorUiDismissedByUser),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiDismissedByUser),
0);
}
TEST_F(QuickPairMetricsLoggerTest, LogErrorUiDismissedByUser_Subsequent) {
SimulateErrorUiDismissedByUser(Protocol::kFastPairSubsequent);
base::RunLoop().RunUntilIdle();
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiShown),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiConnectPressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kPairingFailed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kPairingSucceeded),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kErrorUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kErrorUiSettingsPressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kErrorUiDismissedByUser),
1);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiDismissedByUser),
0);
}
TEST_F(QuickPairMetricsLoggerTest, LogErrorUiSettingsPressed_Initial) {
SimulateErrorUiSettingsPressed(Protocol::kFastPairInitial);
base::RunLoop().RunUntilIdle();
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiShown),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiConnectPressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kPairingFailed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kPairingSucceeded),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kErrorUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kErrorUiSettingsPressed),
1);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kErrorUiDismissedByUser),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricInitial,
FastPairEngagementFlowEvent::kDiscoveryUiDismissedByUser),
0);
}
TEST_F(QuickPairMetricsLoggerTest, LogErrorUiSettingsPressed_Subsequent) {
SimulateErrorUiSettingsPressed(Protocol::kFastPairSubsequent);
base::RunLoop().RunUntilIdle();
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiShown),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiConnectPressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kPairingFailed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kPairingSucceeded),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kErrorUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kErrorUiSettingsPressed),
1);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kErrorUiDismissedByUser),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairEngagementFlowMetricSubsequent,
FastPairEngagementFlowEvent::kDiscoveryUiDismissedByUser),
0);
}
TEST_F(QuickPairMetricsLoggerTest, LogPairTime_Initial) {
SimulateDiscoveryUiConnectPressed(Protocol::kFastPairInitial);
base::RunLoop().RunUntilIdle();
histogram_tester().ExpectTotalCount(kFastPairPairTimeMetricInitial, 0);
SimulatePairingSucceeded(Protocol::kFastPairInitial);
base::RunLoop().RunUntilIdle();
histogram_tester().ExpectTotalCount(kFastPairPairTimeMetricInitial, 1);
}
TEST_F(QuickPairMetricsLoggerTest, LogPairTime_Subsequent) {
SimulateDiscoveryUiConnectPressed(Protocol::kFastPairSubsequent);
base::RunLoop().RunUntilIdle();
histogram_tester().ExpectTotalCount(kFastPairPairTimeMetricSubsequent, 0);
SimulatePairingSucceeded(Protocol::kFastPairSubsequent);
base::RunLoop().RunUntilIdle();
histogram_tester().ExpectTotalCount(kFastPairPairTimeMetricSubsequent, 1);
}
TEST_F(QuickPairMetricsLoggerTest, LogAssociateAccountShown) {
SimulateAssociateAccountUiShown();
base::RunLoop().RunUntilIdle();
EXPECT_EQ(
histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::kAssociateAccountUiShown),
1);
EXPECT_EQ(
histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::kAssociateAccountUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountUiDismissedByUser),
0);
EXPECT_EQ(
histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::kAssociateAccountSavePressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountLearnMorePressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountSavePressedAfterLearnMorePressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountDismissedByUserAfterLearnMorePressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountDismissedAfterLearnMorePressed),
0);
}
TEST_F(QuickPairMetricsLoggerTest, LogAssociateAccountDismissed) {
SimulateAssociateAccountUiDismissed();
base::RunLoop().RunUntilIdle();
EXPECT_EQ(
histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::kAssociateAccountUiShown),
0);
EXPECT_EQ(
histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::kAssociateAccountUiDismissed),
1);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountUiDismissedByUser),
0);
EXPECT_EQ(
histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::kAssociateAccountSavePressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountLearnMorePressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountSavePressedAfterLearnMorePressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountDismissedByUserAfterLearnMorePressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountDismissedAfterLearnMorePressed),
0);
}
TEST_F(QuickPairMetricsLoggerTest, LogAssociateAccountDismissedByUser) {
SimulateAssociateAccountUiDismissedByUser();
base::RunLoop().RunUntilIdle();
EXPECT_EQ(
histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::kAssociateAccountUiShown),
0);
EXPECT_EQ(
histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::kAssociateAccountUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountUiDismissedByUser),
1);
EXPECT_EQ(
histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::kAssociateAccountSavePressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountLearnMorePressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountSavePressedAfterLearnMorePressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountDismissedByUserAfterLearnMorePressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountDismissedAfterLearnMorePressed),
0);
}
TEST_F(QuickPairMetricsLoggerTest, LogAssociateAccountSavePressed) {
SimulateAssociateAccountUiSavePressed();
base::RunLoop().RunUntilIdle();
EXPECT_EQ(
histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::kAssociateAccountUiShown),
0);
EXPECT_EQ(
histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::kAssociateAccountUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountUiDismissedByUser),
0);
EXPECT_EQ(
histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::kAssociateAccountSavePressed),
1);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountLearnMorePressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountSavePressedAfterLearnMorePressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountDismissedByUserAfterLearnMorePressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountDismissedAfterLearnMorePressed),
0);
}
TEST_F(QuickPairMetricsLoggerTest, LogAssociateAccountLearnMorePressed) {
SimulateAssociateAccountUiLearnMorePressed();
base::RunLoop().RunUntilIdle();
EXPECT_EQ(
histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::kAssociateAccountUiShown),
0);
EXPECT_EQ(
histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::kAssociateAccountUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountUiDismissedByUser),
0);
EXPECT_EQ(
histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::kAssociateAccountSavePressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountLearnMorePressed),
1);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountSavePressedAfterLearnMorePressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountDismissedByUserAfterLearnMorePressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountDismissedAfterLearnMorePressed),
0);
}
TEST_F(QuickPairMetricsLoggerTest,
LogAssociateAccountLearnMorePressed_SavePressed) {
SimulateAssociateAccountUiLearnMorePressed();
base::RunLoop().RunUntilIdle();
SimulateAssociateAccountUiSavePressed();
base::RunLoop().RunUntilIdle();
EXPECT_EQ(
histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::kAssociateAccountUiShown),
0);
EXPECT_EQ(
histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::kAssociateAccountUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountUiDismissedByUser),
0);
EXPECT_EQ(
histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::kAssociateAccountSavePressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountLearnMorePressed),
1);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountSavePressedAfterLearnMorePressed),
1);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountDismissedByUserAfterLearnMorePressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountDismissedAfterLearnMorePressed),
0);
}
TEST_F(QuickPairMetricsLoggerTest,
LogAssociateAccountLearnMorePressed_Dismissed) {
SimulateAssociateAccountUiLearnMorePressed();
base::RunLoop().RunUntilIdle();
SimulateAssociateAccountUiDismissed();
base::RunLoop().RunUntilIdle();
EXPECT_EQ(
histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::kAssociateAccountUiShown),
0);
EXPECT_EQ(
histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::kAssociateAccountUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountUiDismissedByUser),
0);
EXPECT_EQ(
histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::kAssociateAccountSavePressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountLearnMorePressed),
1);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountSavePressedAfterLearnMorePressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountDismissedByUserAfterLearnMorePressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountDismissedAfterLearnMorePressed),
1);
}
TEST_F(QuickPairMetricsLoggerTest,
LogAssociateAccountLearnMorePressed_DismissedByUser) {
SimulateAssociateAccountUiLearnMorePressed();
base::RunLoop().RunUntilIdle();
SimulateAssociateAccountUiDismissedByUser();
base::RunLoop().RunUntilIdle();
EXPECT_EQ(
histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::kAssociateAccountUiShown),
0);
EXPECT_EQ(
histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::kAssociateAccountUiDismissed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountUiDismissedByUser),
0);
EXPECT_EQ(
histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::kAssociateAccountSavePressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountLearnMorePressed),
1);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountSavePressedAfterLearnMorePressed),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountDismissedByUserAfterLearnMorePressed),
1);
EXPECT_EQ(histogram_tester().GetBucketCount(
kFastPairRetroactiveEngagementFlowMetric,
FastPairRetroactiveEngagementFlowEvent::
kAssociateAccountDismissedAfterLearnMorePressed),
0);
}
TEST_F(QuickPairMetricsLoggerTest, DevicedPaired_FastPair) {
EXPECT_EQ(histogram_tester().GetBucketCount(kPairingMethodMetric,
PairingMethod::kFastPair),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(kPairingMethodMetric,
PairingMethod::kSystemPairingUi),
0);
PairFastPairDeviceWithFastPair(kTestDeviceAddress);
PairFastPairDeviceWithClassicBluetooth(
/*new_paired_status=*/true, kTestDeviceAddress);
EXPECT_EQ(histogram_tester().GetBucketCount(kPairingMethodMetric,
PairingMethod::kFastPair),
1);
EXPECT_EQ(histogram_tester().GetBucketCount(kPairingMethodMetric,
PairingMethod::kSystemPairingUi),
0);
}
TEST_F(QuickPairMetricsLoggerTest, DeviceUnpaired) {
EXPECT_EQ(histogram_tester().GetBucketCount(kPairingMethodMetric,
PairingMethod::kFastPair),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(kPairingMethodMetric,
PairingMethod::kSystemPairingUi),
0);
PairFastPairDeviceWithClassicBluetooth(
/*new_paired_status=*/false, kTestDeviceAddress);
EXPECT_EQ(histogram_tester().GetBucketCount(kPairingMethodMetric,
PairingMethod::kFastPair),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(kPairingMethodMetric,
PairingMethod::kSystemPairingUi),
0);
}
TEST_F(QuickPairMetricsLoggerTest, DevicePaired) {
EXPECT_EQ(histogram_tester().GetBucketCount(kPairingMethodMetric,
PairingMethod::kFastPair),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(kPairingMethodMetric,
PairingMethod::kSystemPairingUi),
0);
PairFastPairDeviceWithClassicBluetooth(
/*new_paired_status=*/true, kTestDeviceAddress);
EXPECT_EQ(histogram_tester().GetBucketCount(kPairingMethodMetric,
PairingMethod::kFastPair),
0);
EXPECT_EQ(histogram_tester().GetBucketCount(kPairingMethodMetric,
PairingMethod::kSystemPairingUi),
1);
}
TEST_F(QuickPairMetricsLoggerTest, WriteAccountKey_Initial) {
histogram_tester().ExpectTotalCount(kRetroactivePairingResultMetric, 0);
histogram_tester().ExpectTotalCount(
kFastPairAccountKeyWriteResultMetricInitial, 0);
histogram_tester().ExpectTotalCount(
kFastPairAccountKeyWriteFailureMetricInitial, 0);
histogram_tester().ExpectTotalCount(
kFastPairAccountKeyWriteResultMetricRetroactive, 0);
histogram_tester().ExpectTotalCount(
kFastPairAccountKeyWriteFailureMetricRetroactive, 0);
SimulateAccountKeyWritten(Protocol::kFastPairInitial);
histogram_tester().ExpectTotalCount(kRetroactivePairingResultMetric, 0);
histogram_tester().ExpectTotalCount(
kFastPairAccountKeyWriteResultMetricInitial, 1);
histogram_tester().ExpectTotalCount(
kFastPairAccountKeyWriteFailureMetricInitial, 0);
histogram_tester().ExpectTotalCount(
kFastPairAccountKeyWriteResultMetricRetroactive, 0);
histogram_tester().ExpectTotalCount(
kFastPairAccountKeyWriteFailureMetricRetroactive, 0);
}
TEST_F(QuickPairMetricsLoggerTest, WriteAccountKey_Retroactive) {
histogram_tester().ExpectTotalCount(kRetroactivePairingResultMetric, 0);
histogram_tester().ExpectTotalCount(
kFastPairAccountKeyWriteResultMetricInitial, 0);
histogram_tester().ExpectTotalCount(
kFastPairAccountKeyWriteFailureMetricInitial, 0);
histogram_tester().ExpectTotalCount(
kFastPairAccountKeyWriteResultMetricRetroactive, 0);
histogram_tester().ExpectTotalCount(
kFastPairAccountKeyWriteFailureMetricRetroactive, 0);
SimulateAccountKeyWritten(Protocol::kFastPairRetroactive);
histogram_tester().ExpectTotalCount(kRetroactivePairingResultMetric, 1);
histogram_tester().ExpectTotalCount(
kFastPairAccountKeyWriteResultMetricInitial, 0);
histogram_tester().ExpectTotalCount(
kFastPairAccountKeyWriteFailureMetricInitial, 0);
histogram_tester().ExpectTotalCount(
kFastPairAccountKeyWriteResultMetricRetroactive, 1);
histogram_tester().ExpectTotalCount(
kFastPairAccountKeyWriteFailureMetricRetroactive, 0);
}
TEST_F(QuickPairMetricsLoggerTest, WriteAccountKeyFailure_Retroactive) {
histogram_tester().ExpectTotalCount(kRetroactivePairingResultMetric, 0);
histogram_tester().ExpectTotalCount(
kFastPairAccountKeyWriteResultMetricInitial, 0);
histogram_tester().ExpectTotalCount(
kFastPairAccountKeyWriteFailureMetricInitial, 0);
histogram_tester().ExpectTotalCount(
kFastPairAccountKeyWriteResultMetricRetroactive, 0);
histogram_tester().ExpectTotalCount(
kFastPairAccountKeyWriteFailureMetricRetroactive, 0);
SimulateAccountKeyFailure(Protocol::kFastPairRetroactive);
histogram_tester().ExpectTotalCount(kRetroactivePairingResultMetric, 1);
histogram_tester().ExpectTotalCount(
kFastPairAccountKeyWriteResultMetricInitial, 0);
histogram_tester().ExpectTotalCount(
kFastPairAccountKeyWriteFailureMetricInitial, 0);
histogram_tester().ExpectTotalCount(
kFastPairAccountKeyWriteResultMetricRetroactive, 1);
histogram_tester().ExpectTotalCount(
kFastPairAccountKeyWriteFailureMetricRetroactive, 1);
}
TEST_F(QuickPairMetricsLoggerTest, WriteAccountKeyFailure_Initial) {
histogram_tester().ExpectTotalCount(kRetroactivePairingResultMetric, 0);
histogram_tester().ExpectTotalCount(
kFastPairAccountKeyWriteResultMetricInitial, 0);
histogram_tester().ExpectTotalCount(
kFastPairAccountKeyWriteFailureMetricInitial, 0);
histogram_tester().ExpectTotalCount(
kFastPairAccountKeyWriteResultMetricRetroactive, 0);
histogram_tester().ExpectTotalCount(
kFastPairAccountKeyWriteFailureMetricRetroactive, 0);
SimulateAccountKeyFailure(Protocol::kFastPairInitial);
histogram_tester().ExpectTotalCount(kRetroactivePairingResultMetric, 0);
histogram_tester().ExpectTotalCount(
kFastPairAccountKeyWriteResultMetricInitial, 1);
histogram_tester().ExpectTotalCount(
kFastPairAccountKeyWriteFailureMetricInitial, 1);
histogram_tester().ExpectTotalCount(
kFastPairAccountKeyWriteResultMetricRetroactive, 0);
histogram_tester().ExpectTotalCount(
kFastPairAccountKeyWriteFailureMetricRetroactive, 0);
}
TEST_F(QuickPairMetricsLoggerTest, LogPairFailure_Initial) {
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricInitial, 0);
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricSubsequent, 0);
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricRetroactive, 0);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricInitial, 0);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricSubsequent, 0);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricRetroactive, 0);
SimulatePairingFailed(Protocol::kFastPairInitial);
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricInitial, 1);
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricSubsequent, 0);
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricRetroactive, 0);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricInitial, 1);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricSubsequent, 0);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricRetroactive, 0);
}
TEST_F(QuickPairMetricsLoggerTest, LogPairSuccess_Initial) {
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricInitial, 0);
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricSubsequent, 0);
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricRetroactive, 0);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricInitial, 0);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricSubsequent, 0);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricRetroactive, 0);
SimulatePairingSucceeded(Protocol::kFastPairInitial);
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricInitial, 0);
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricSubsequent, 0);
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricRetroactive, 0);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricInitial, 1);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricSubsequent, 0);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricRetroactive, 0);
}
TEST_F(QuickPairMetricsLoggerTest, LogPairFailure_Subsequent) {
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricInitial, 0);
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricSubsequent, 0);
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricRetroactive, 0);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricInitial, 0);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricSubsequent, 0);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricRetroactive, 0);
SimulatePairingFailed(Protocol::kFastPairSubsequent);
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricInitial, 0);
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricSubsequent, 1);
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricRetroactive, 0);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricInitial, 0);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricSubsequent, 1);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricRetroactive, 0);
}
TEST_F(QuickPairMetricsLoggerTest, LogPairSuccess_Subsequent) {
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricInitial, 0);
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricSubsequent, 0);
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricRetroactive, 0);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricInitial, 0);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricSubsequent, 0);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricRetroactive, 0);
SimulatePairingSucceeded(Protocol::kFastPairSubsequent);
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricInitial, 0);
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricSubsequent, 0);
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricRetroactive, 0);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricInitial, 0);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricSubsequent, 1);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricRetroactive, 0);
}
TEST_F(QuickPairMetricsLoggerTest, LogPairFailure_Retroactive) {
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricInitial, 0);
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricSubsequent, 0);
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricRetroactive, 0);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricInitial, 0);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricSubsequent, 0);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricRetroactive, 0);
SimulatePairingFailed(Protocol::kFastPairRetroactive);
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricInitial, 0);
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricSubsequent, 0);
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricRetroactive, 1);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricInitial, 0);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricSubsequent, 0);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricRetroactive, 1);
}
TEST_F(QuickPairMetricsLoggerTest, LogPairSuccess_Retroactive) {
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricInitial, 0);
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricSubsequent, 0);
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricRetroactive, 0);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricInitial, 0);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricSubsequent, 0);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricRetroactive, 0);
SimulatePairingSucceeded(Protocol::kFastPairRetroactive);
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricInitial, 0);
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricSubsequent, 0);
histogram_tester().ExpectTotalCount(kFastPairPairFailureMetricRetroactive, 0);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricInitial, 0);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricSubsequent, 0);
histogram_tester().ExpectTotalCount(kFastPairPairResultMetricRetroactive, 1);
}
} // namespace quick_pair
} // namespace ash
| 35,363 |
4,253 | <reponame>timmersren/InternetArchitect
package com.msb.zookeeper.configurationcenter;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import java.util.concurrent.CountDownLatch;
/**
* @author: 马士兵教育
* @create: 2019-09-20 13:53
*/
public class DefaultWatch implements Watcher {
CountDownLatch init ;
public CountDownLatch getInit() {
return init;
}
public void setInit(CountDownLatch init) {
this.init = init;
}
@Override
public void process(WatchedEvent event) {
Event.KeeperState state = event.getState();
switch (state) {
case Disconnected:
System.out.println("Disconnected...c...new...");
init = new CountDownLatch(1);
break;
case SyncConnected:
System.out.println("Connected...c...ok...");
init.countDown();
break;
}
}
}
| 435 |
6,969 |
# Contributed by <NAME>
# https://github.com/parajshah
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the alternatingCharacters function below.
def alternatingCharacters(s):
count = 0
for i in range(len(s) - 1):
if (s[i] == 'A' and s[i + 1] == 'B') or (s[i] == 'B' and s[i + 1] == 'A'):
continue
count += 1
return count
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input())
for q_itr in range(q):
s = input()
result = alternatingCharacters(s)
fptr.write(str(result) + '\n')
fptr.close()
| 285 |
320 | <gh_stars>100-1000
#ifndef METAL_NUMBER_NUMBERS_HPP
#define METAL_NUMBER_NUMBERS_HPP
#include "../config.hpp"
#include "../list/list.hpp"
#include "../number/number.hpp"
#include <type_traits>
namespace metal {
/// \cond
namespace detail {
#if defined(METAL_WORKAROUND)
template <int_... vs>
struct _numbers;
#endif
}
/// \endcond
/// \ingroup number
///
/// ### Description
/// Constructs a \list of \numbers out of a sequence of integral values.
///
/// ### Example
/// \snippet number.cpp numbers
///
/// ### See Also
/// \see int_, number, list
template <int_... vs>
using numbers =
#if defined(METAL_WORKAROUND)
typename detail::_numbers<vs...>::type;
#else
metal::list<metal::number<vs>...>;
#endif
/// \cond
namespace detail {
#if defined(METAL_WORKAROUND)
template <int_... vs>
struct _numbers {
using type = list<std::integral_constant<int_, vs>...>;
};
#endif
}
/// \endcond
}
#endif
| 365 |
348 | {"nom":"Fontenelle","circ":"3ème circonscription","dpt":"Aisne","inscrits":195,"abs":103,"votants":92,"blancs":6,"nuls":0,"exp":86,"res":[{"nuance":"SOC","nom":"<NAME>","voix":59},{"nuance":"FN","nom":"<NAME>","voix":27}]} | 90 |
1,350 | <reponame>ppartarr/azure-sdk-for-java<gh_stars>1000+
package com.microsoft.windowsazure.services.media.implementation.templates.playreadylicense;
import static org.junit.Assert.*;
import java.security.InvalidParameterException;
import org.junit.Test;
import com.microsoft.windowsazure.services.media.implementation.templates.playreadylicense.PlayReadyLicenseType;
public class PlayReadyLicenseTypeTests {
@Test
public void valueOfPlayReadyLicenseTypeTestsTests() {
// provides full code coverage
assertEquals(PlayReadyLicenseType.Nonpersistent, PlayReadyLicenseType.valueOf("Nonpersistent"));
assertEquals(PlayReadyLicenseType.Persistent, PlayReadyLicenseType.valueOf("Persistent"));
}
@Test
public void fromCodeNonpersistentPlayReadyLicenseTypeTest() {
// Arrange
PlayReadyLicenseType expectedPlayReadyLicenseType = PlayReadyLicenseType.Nonpersistent;
// Act
PlayReadyLicenseType playReadyLicenseType = PlayReadyLicenseType.fromCode(0);
// Assert
assertEquals(playReadyLicenseType, expectedPlayReadyLicenseType);
}
@Test
public void fromCodePersistentPlayReadyLicenseTypeTest() {
// Arrange
PlayReadyLicenseType expectedPlayReadyLicenseType = PlayReadyLicenseType.Persistent;
// Act
PlayReadyLicenseType playReadyLicenseType = PlayReadyLicenseType.fromCode(1);
// Assert
assertEquals(playReadyLicenseType, expectedPlayReadyLicenseType);
}
@Test
public void fromCodeInvalidTokenTypeTests() {
// Arrange
int invalidCode = 666;
String expectedMessage = "code";
// Act
try {
@SuppressWarnings("unused")
PlayReadyLicenseType tokenPlayReadyLicenseType = PlayReadyLicenseType.fromCode(invalidCode);
fail("Should throw");
} catch (InvalidParameterException e) {
// Assert
assertEquals(e.getMessage(), expectedMessage);
}
}
@Test
public void getCodePersistentPlayReadyLicenseTypeTests() {
// Arrange
int expectedCode = 1;
// Act
PlayReadyLicenseType tokenTypeResult = PlayReadyLicenseType.Persistent;
int resultCode = tokenTypeResult.getCode();
// Assert
assertEquals(resultCode, expectedCode);
}
}
| 868 |
2,743 | <reponame>eshbeata/open-paperless
from __future__ import absolute_import, unicode_literals
from django.utils.translation import ugettext_lazy as _
from permissions import PermissionNamespace
namespace = PermissionNamespace('common', _('Common'))
permission_error_log_view = namespace.add_permission(
name='error_log_view', label=_('View error log')
)
| 113 |
400 | <filename>src/ut-stubs/osapi-queue-stubs.c
/*
* NASA Docket No. GSC-18,370-1, and identified as "Operating System Abstraction Layer"
*
* Copyright (c) 2019 United States Government as represented by
* the Administrator of the National Aeronautics and Space Administration.
* All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
*
* Auto-Generated stub implementations for functions defined in osapi-queue header
*/
#include "osapi-queue.h"
#include "utgenstub.h"
extern void UT_DefaultHandler_OS_QueueCreate(void *, UT_EntryKey_t, const UT_StubContext_t *);
extern void UT_DefaultHandler_OS_QueueDelete(void *, UT_EntryKey_t, const UT_StubContext_t *);
extern void UT_DefaultHandler_OS_QueueGet(void *, UT_EntryKey_t, const UT_StubContext_t *);
extern void UT_DefaultHandler_OS_QueueGetIdByName(void *, UT_EntryKey_t, const UT_StubContext_t *);
extern void UT_DefaultHandler_OS_QueueGetInfo(void *, UT_EntryKey_t, const UT_StubContext_t *);
extern void UT_DefaultHandler_OS_QueuePut(void *, UT_EntryKey_t, const UT_StubContext_t *);
/*
* ----------------------------------------------------
* Generated stub function for OS_QueueCreate()
* ----------------------------------------------------
*/
int32 OS_QueueCreate(osal_id_t *queue_id, const char *queue_name, osal_blockcount_t queue_depth, size_t data_size,
uint32 flags)
{
UT_GenStub_SetupReturnBuffer(OS_QueueCreate, int32);
UT_GenStub_AddParam(OS_QueueCreate, osal_id_t *, queue_id);
UT_GenStub_AddParam(OS_QueueCreate, const char *, queue_name);
UT_GenStub_AddParam(OS_QueueCreate, osal_blockcount_t, queue_depth);
UT_GenStub_AddParam(OS_QueueCreate, size_t, data_size);
UT_GenStub_AddParam(OS_QueueCreate, uint32, flags);
UT_GenStub_Execute(OS_QueueCreate, Basic, UT_DefaultHandler_OS_QueueCreate);
return UT_GenStub_GetReturnValue(OS_QueueCreate, int32);
}
/*
* ----------------------------------------------------
* Generated stub function for OS_QueueDelete()
* ----------------------------------------------------
*/
int32 OS_QueueDelete(osal_id_t queue_id)
{
UT_GenStub_SetupReturnBuffer(OS_QueueDelete, int32);
UT_GenStub_AddParam(OS_QueueDelete, osal_id_t, queue_id);
UT_GenStub_Execute(OS_QueueDelete, Basic, UT_DefaultHandler_OS_QueueDelete);
return UT_GenStub_GetReturnValue(OS_QueueDelete, int32);
}
/*
* ----------------------------------------------------
* Generated stub function for OS_QueueGet()
* ----------------------------------------------------
*/
int32 OS_QueueGet(osal_id_t queue_id, void *data, size_t size, size_t *size_copied, int32 timeout)
{
UT_GenStub_SetupReturnBuffer(OS_QueueGet, int32);
UT_GenStub_AddParam(OS_QueueGet, osal_id_t, queue_id);
UT_GenStub_AddParam(OS_QueueGet, void *, data);
UT_GenStub_AddParam(OS_QueueGet, size_t, size);
UT_GenStub_AddParam(OS_QueueGet, size_t *, size_copied);
UT_GenStub_AddParam(OS_QueueGet, int32, timeout);
UT_GenStub_Execute(OS_QueueGet, Basic, UT_DefaultHandler_OS_QueueGet);
return UT_GenStub_GetReturnValue(OS_QueueGet, int32);
}
/*
* ----------------------------------------------------
* Generated stub function for OS_QueueGetIdByName()
* ----------------------------------------------------
*/
int32 OS_QueueGetIdByName(osal_id_t *queue_id, const char *queue_name)
{
UT_GenStub_SetupReturnBuffer(OS_QueueGetIdByName, int32);
UT_GenStub_AddParam(OS_QueueGetIdByName, osal_id_t *, queue_id);
UT_GenStub_AddParam(OS_QueueGetIdByName, const char *, queue_name);
UT_GenStub_Execute(OS_QueueGetIdByName, Basic, UT_DefaultHandler_OS_QueueGetIdByName);
return UT_GenStub_GetReturnValue(OS_QueueGetIdByName, int32);
}
/*
* ----------------------------------------------------
* Generated stub function for OS_QueueGetInfo()
* ----------------------------------------------------
*/
int32 OS_QueueGetInfo(osal_id_t queue_id, OS_queue_prop_t *queue_prop)
{
UT_GenStub_SetupReturnBuffer(OS_QueueGetInfo, int32);
UT_GenStub_AddParam(OS_QueueGetInfo, osal_id_t, queue_id);
UT_GenStub_AddParam(OS_QueueGetInfo, OS_queue_prop_t *, queue_prop);
UT_GenStub_Execute(OS_QueueGetInfo, Basic, UT_DefaultHandler_OS_QueueGetInfo);
return UT_GenStub_GetReturnValue(OS_QueueGetInfo, int32);
}
/*
* ----------------------------------------------------
* Generated stub function for OS_QueuePut()
* ----------------------------------------------------
*/
int32 OS_QueuePut(osal_id_t queue_id, const void *data, size_t size, uint32 flags)
{
UT_GenStub_SetupReturnBuffer(OS_QueuePut, int32);
UT_GenStub_AddParam(OS_QueuePut, osal_id_t, queue_id);
UT_GenStub_AddParam(OS_QueuePut, const void *, data);
UT_GenStub_AddParam(OS_QueuePut, size_t, size);
UT_GenStub_AddParam(OS_QueuePut, uint32, flags);
UT_GenStub_Execute(OS_QueuePut, Basic, UT_DefaultHandler_OS_QueuePut);
return UT_GenStub_GetReturnValue(OS_QueuePut, int32);
}
| 1,873 |
707 | // Copyright (c) FIRST and other WPILib contributors.
// Open Source Software; you can modify and/or share it under the terms of
// the WPILib BSD license file in the root directory of this project.
#include "frc/simulation/EncoderSim.h"
#include <memory>
#include <stdexcept>
#include <utility>
#include <hal/simulation/EncoderData.h>
#include "frc/Encoder.h"
using namespace frc;
using namespace frc::sim;
EncoderSim::EncoderSim(const Encoder& encoder)
: m_index{encoder.GetFPGAIndex()} {}
EncoderSim EncoderSim::CreateForChannel(int channel) {
int index = HALSIM_FindEncoderForChannel(channel);
if (index < 0) {
throw std::out_of_range("no encoder found for channel");
}
return EncoderSim{index};
}
EncoderSim EncoderSim::CreateForIndex(int index) {
return EncoderSim{index};
}
std::unique_ptr<CallbackStore> EncoderSim::RegisterInitializedCallback(
NotifyCallback callback, bool initialNotify) {
auto store = std::make_unique<CallbackStore>(
m_index, -1, callback, &HALSIM_CancelEncoderInitializedCallback);
store->SetUid(HALSIM_RegisterEncoderInitializedCallback(
m_index, &CallbackStoreThunk, store.get(), initialNotify));
return store;
}
bool EncoderSim::GetInitialized() const {
return HALSIM_GetEncoderInitialized(m_index);
}
void EncoderSim::SetInitialized(bool initialized) {
HALSIM_SetEncoderInitialized(m_index, initialized);
}
std::unique_ptr<CallbackStore> EncoderSim::RegisterCountCallback(
NotifyCallback callback, bool initialNotify) {
auto store = std::make_unique<CallbackStore>(
m_index, -1, callback, &HALSIM_CancelEncoderCountCallback);
store->SetUid(HALSIM_RegisterEncoderCountCallback(
m_index, &CallbackStoreThunk, store.get(), initialNotify));
return store;
}
int EncoderSim::GetCount() const {
return HALSIM_GetEncoderCount(m_index);
}
void EncoderSim::SetCount(int count) {
HALSIM_SetEncoderCount(m_index, count);
}
std::unique_ptr<CallbackStore> EncoderSim::RegisterPeriodCallback(
NotifyCallback callback, bool initialNotify) {
auto store = std::make_unique<CallbackStore>(
m_index, -1, callback, &HALSIM_CancelEncoderPeriodCallback);
store->SetUid(HALSIM_RegisterEncoderPeriodCallback(
m_index, &CallbackStoreThunk, store.get(), initialNotify));
return store;
}
double EncoderSim::GetPeriod() const {
return HALSIM_GetEncoderPeriod(m_index);
}
void EncoderSim::SetPeriod(double period) {
HALSIM_SetEncoderPeriod(m_index, period);
}
std::unique_ptr<CallbackStore> EncoderSim::RegisterResetCallback(
NotifyCallback callback, bool initialNotify) {
auto store = std::make_unique<CallbackStore>(
m_index, -1, callback, &HALSIM_CancelEncoderResetCallback);
store->SetUid(HALSIM_RegisterEncoderResetCallback(
m_index, &CallbackStoreThunk, store.get(), initialNotify));
return store;
}
bool EncoderSim::GetReset() const {
return HALSIM_GetEncoderReset(m_index);
}
void EncoderSim::SetReset(bool reset) {
HALSIM_SetEncoderReset(m_index, reset);
}
std::unique_ptr<CallbackStore> EncoderSim::RegisterMaxPeriodCallback(
NotifyCallback callback, bool initialNotify) {
auto store = std::make_unique<CallbackStore>(
m_index, -1, callback, &HALSIM_CancelEncoderMaxPeriodCallback);
store->SetUid(HALSIM_RegisterEncoderMaxPeriodCallback(
m_index, &CallbackStoreThunk, store.get(), initialNotify));
return store;
}
double EncoderSim::GetMaxPeriod() const {
return HALSIM_GetEncoderMaxPeriod(m_index);
}
void EncoderSim::SetMaxPeriod(double maxPeriod) {
HALSIM_SetEncoderMaxPeriod(m_index, maxPeriod);
}
std::unique_ptr<CallbackStore> EncoderSim::RegisterDirectionCallback(
NotifyCallback callback, bool initialNotify) {
auto store = std::make_unique<CallbackStore>(
m_index, -1, callback, &HALSIM_CancelEncoderDirectionCallback);
store->SetUid(HALSIM_RegisterEncoderDirectionCallback(
m_index, &CallbackStoreThunk, store.get(), initialNotify));
return store;
}
bool EncoderSim::GetDirection() const {
return HALSIM_GetEncoderDirection(m_index);
}
void EncoderSim::SetDirection(bool direction) {
HALSIM_SetEncoderDirection(m_index, direction);
}
std::unique_ptr<CallbackStore> EncoderSim::RegisterReverseDirectionCallback(
NotifyCallback callback, bool initialNotify) {
auto store = std::make_unique<CallbackStore>(
m_index, -1, callback, &HALSIM_CancelEncoderReverseDirectionCallback);
store->SetUid(HALSIM_RegisterEncoderReverseDirectionCallback(
m_index, &CallbackStoreThunk, store.get(), initialNotify));
return store;
}
bool EncoderSim::GetReverseDirection() const {
return HALSIM_GetEncoderReverseDirection(m_index);
}
void EncoderSim::SetReverseDirection(bool reverseDirection) {
HALSIM_SetEncoderReverseDirection(m_index, reverseDirection);
}
std::unique_ptr<CallbackStore> EncoderSim::RegisterSamplesToAverageCallback(
NotifyCallback callback, bool initialNotify) {
auto store = std::make_unique<CallbackStore>(
m_index, -1, callback, &HALSIM_CancelEncoderSamplesToAverageCallback);
store->SetUid(HALSIM_RegisterEncoderSamplesToAverageCallback(
m_index, &CallbackStoreThunk, store.get(), initialNotify));
return store;
}
int EncoderSim::GetSamplesToAverage() const {
return HALSIM_GetEncoderSamplesToAverage(m_index);
}
void EncoderSim::SetSamplesToAverage(int samplesToAverage) {
HALSIM_SetEncoderSamplesToAverage(m_index, samplesToAverage);
}
std::unique_ptr<CallbackStore> EncoderSim::RegisterDistancePerPulseCallback(
NotifyCallback callback, bool initialNotify) {
auto store = std::make_unique<CallbackStore>(
m_index, -1, callback, &HALSIM_CancelEncoderDistancePerPulseCallback);
store->SetUid(HALSIM_RegisterEncoderDistancePerPulseCallback(
m_index, &CallbackStoreThunk, store.get(), initialNotify));
return store;
}
double EncoderSim::GetDistancePerPulse() const {
return HALSIM_GetEncoderDistancePerPulse(m_index);
}
void EncoderSim::SetDistancePerPulse(double distancePerPulse) {
HALSIM_SetEncoderDistancePerPulse(m_index, distancePerPulse);
}
void EncoderSim::ResetData() {
HALSIM_ResetEncoderData(m_index);
}
void EncoderSim::SetDistance(double distance) {
HALSIM_SetEncoderDistance(m_index, distance);
}
double EncoderSim::GetDistance() {
return HALSIM_GetEncoderDistance(m_index);
}
void EncoderSim::SetRate(double rate) {
HALSIM_SetEncoderRate(m_index, rate);
}
double EncoderSim::GetRate() {
return HALSIM_GetEncoderRate(m_index);
}
| 2,233 |
356 | <filename>mind-map/idea-mindmap/src/com/igormaznitsa/ideamindmap/utils/SelectIn.java
/*
* Copyright 2015-2018 <NAME>.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.igormaznitsa.ideamindmap.utils;
import com.igormaznitsa.ideamindmap.editor.MindMapDocumentEditor;
import com.igormaznitsa.ideamindmap.view.KnowledgeViewPane;
import com.igormaznitsa.mindmap.model.logger.Logger;
import com.igormaznitsa.mindmap.model.logger.LoggerFactory;
import com.intellij.ide.projectView.ProjectView;
import com.intellij.ide.projectView.impl.ProjectViewPane;
import com.intellij.openapi.editor.Editor;
import com.intellij.openapi.editor.LogicalPosition;
import com.intellij.openapi.editor.ScrollType;
import com.intellij.openapi.fileEditor.FileEditor;
import com.intellij.openapi.fileEditor.FileEditorManager;
import com.intellij.openapi.fileEditor.NavigatableFileEditor;
import com.intellij.openapi.fileEditor.TextEditor;
import com.intellij.openapi.module.Module;
import com.intellij.openapi.module.ModuleUtil;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.project.ProjectManager;
import com.intellij.openapi.vfs.VfsUtil;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.openapi.wm.ToolWindow;
import com.intellij.openapi.wm.ToolWindowId;
import com.intellij.openapi.wm.ToolWindowManager;
import javax.annotation.Nonnull;
public enum SelectIn {
IDE,
SYSTEM,
;
private static final Logger LOGGER = LoggerFactory.getLogger(SelectIn.class);
private static void projectFocusTo(final Project project, final VirtualFile file) {
final ProjectView view = ProjectView.getInstance(project);
String viewToActivate = ProjectViewPane.ID;
if (KnowledgeViewPane.ID.equals(view.getCurrentViewId())) {
final Module theModule = ModuleUtil.findModuleForFile(file, project);
if (theModule == null) {
viewToActivate = null;
} else {
final VirtualFile knowledgeFolder = IdeaUtils.findKnowledgeFolderForModule(theModule, false);
if (knowledgeFolder != null && VfsUtil.isAncestor(knowledgeFolder, file, true)) {
viewToActivate = KnowledgeViewPane.ID;
}
}
}
if (viewToActivate != null) {
view.changeView(viewToActivate);
}
final ToolWindow toolwindow = ToolWindowManager.getInstance(project).getToolWindow(ToolWindowId.PROJECT_VIEW);
if (toolwindow != null) {
toolwindow.activate(() -> view.select(null, file, true));
}
}
public void open(@Nonnull final MindMapDocumentEditor source, @Nonnull final VirtualFile file, final int line) {
final ProjectManager manager = ProjectManager.getInstance();
switch (this) {
case IDE: {
if (file.isDirectory()) {
if (IdeaUtils.isInProjectContentRoot(source.getProject(), file)) {
projectFocusTo(source.getProject(), file);
} else {
try {
manager.loadAndOpenProject(file.getCanonicalPath());
} catch (Exception ex) {
LOGGER.error("Can't open folder as project [" + file + ']', ex);
IdeaUtils.openInSystemViewer(source.getDialogProvider(), file);
}
}
} else {
projectFocusTo(source.getProject(), file);
final FileEditor[] editors = FileEditorManager.getInstance(source.getProject()).openFile(file, true);
if (line > 0) {
for (final FileEditor e : editors) {
if (e instanceof NavigatableFileEditor) {
final TextEditor navigatedEditor = (TextEditor) e;
final Editor editor = navigatedEditor.getEditor();
if (editor != null && editor.getDocument().getLineCount() > line) {
editor.getCaretModel().moveToLogicalPosition(new LogicalPosition(line - 1, 0));
editor.getScrollingModel().scrollToCaret(ScrollType.CENTER);
}
}
}
}
}
}
break;
case SYSTEM: {
IdeaUtils.openInSystemViewer(source.getDialogProvider(), file);
}
break;
}
}
}
| 1,760 |
571 | <reponame>laqieer/butano<filename>butano/hw/3rd_party/gba-link-connection/include/LinkConnection.h
#ifndef LINK_CONNECTION_H
#define LINK_CONNECTION_H
#include <tonc_core.h>
#include <tonc_memdef.h>
#include <tonc_memmap.h>
#include "bn_deque.h"
#include "bn_config_link.h"
static_assert(BN_CFG_LINK_BAUD_RATE == BN_LINK_BAUD_RATE_9600_BPS ||
BN_CFG_LINK_BAUD_RATE == BN_LINK_BAUD_RATE_38400_BPS ||
BN_CFG_LINK_BAUD_RATE == BN_LINK_BAUD_RATE_57600_BPS ||
BN_CFG_LINK_BAUD_RATE == BN_LINK_BAUD_RATE_115200_BPS);
static_assert(BN_CFG_LINK_SEND_WAIT > 0);
static_assert(bn::power_of_two(BN_CFG_LINK_MAX_MESSAGES));
static_assert(BN_CFG_LINK_MAX_MISSING_MESSAGES >= 0);
#define LINK_MAX_PLAYERS 4
#define LINK_DISCONNECTED 0xFFFF
#define LINK_NO_DATA 0x0
#define LINK_DEFAULT_TIMEOUT (BN_CFG_LINK_MAX_MISSING_MESSAGES + 1)
#define LINK_DEFAULT_REMOTE_TIMEOUT LINK_DEFAULT_TIMEOUT
#define LINK_DEFAULT_BUFFER_SIZE BN_CFG_LINK_MAX_MESSAGES
#define LINK_DEFAULT_INTERVAL BN_CFG_LINK_SEND_WAIT
#define LINK_DEFAULT_SEND_TIMER_ID 1
#define LINK_BASE_FREQUENCY TM_FREQ_1024
#define LINK_REMOTE_TIMEOUT_OFFLINE -1
#define LINK_BIT_SLAVE 2
#define LINK_BIT_READY 3
#define LINK_BITS_PLAYER_ID 4
#define LINK_BIT_ERROR 6
#define LINK_BIT_START 7
#define LINK_BIT_MULTIPLAYER 13
#define LINK_BIT_IRQ 14
#define LINK_BIT_GENERAL_PURPOSE_LOW 14
#define LINK_BIT_GENERAL_PURPOSE_HIGH 15
#define LINK_SET_HIGH(REG, BIT) REG |= 1 << BIT
#define LINK_SET_LOW(REG, BIT) REG &= ~(1 << BIT)
// A Link Cable connection for Multi-player mode.
// Usage:
// - 1) Include this header in your main.cpp file and add:
// LinkConnection* linkConnection = new LinkConnection();
// - 2) Add the required interrupt service routines:
// irq_init(NULL);
// irq_add(II_VBLANK, LINK_ISR_VBLANK);
// irq_add(II_SERIAL, LINK_ISR_SERIAL);
// irq_add(II_TIMER3, LINK_ISR_TIMER);
// irq_add(II_TIMER2, NULL);
// - 3) Initialize the library with:
// linkConnection->activate();
// - 4) Send/read messages by using:
// linkConnection->send(...);
// linkConnection->linkState
// `data` restrictions:
// 0xFFFF and 0x0 are reserved values, so don't use them
// (they mean 'disconnected' and 'no data' respectively)
void LINK_ISR_VBLANK();
void LINK_ISR_TIMER();
void LINK_ISR_SERIAL();
u16 LINK_QUEUE_POP(bn::ideque<u16>& q);
void LINK_QUEUE_CLEAR(bn::ideque<u16>& q);
struct LinkState {
bn::deque<u16, LINK_DEFAULT_BUFFER_SIZE> _incomingMessages[LINK_MAX_PLAYERS];
bn::deque<u16, LINK_DEFAULT_BUFFER_SIZE> _outgoingMessages;
int _timeouts[LINK_MAX_PLAYERS];
u32 _IRQTimeout;
u8 playerCount;
u8 currentPlayerId;
bool _IRQFlag;
bool isConnected() {
return playerCount > 1 && currentPlayerId < playerCount;
}
bool hasMessage(u8 playerId) {
if (playerId >= playerCount)
return false;
return !_incomingMessages[playerId].empty();
}
u16 readMessage(u8 playerId) {
return LINK_QUEUE_POP(_incomingMessages[playerId]);
}
};
class LinkConnection {
public:
LinkState linkState;
void init() {
stop();
}
bool isActive() {
return isEnabled;
}
void activate() {
isEnabled = true;
reset();
}
void deactivate() {
isEnabled = false;
resetState();
stop();
}
void block() {
isBlocked = true;
}
void unblock() {
isBlocked = false;
}
void send(u16 data) {
if (data == LINK_DISCONNECTED || data == LINK_NO_DATA)
return;
push(linkState._outgoingMessages, data);
}
void _onVBlank() {
if (!isEnabled || isBlocked)
return;
if (!linkState._IRQFlag)
linkState._IRQTimeout++;
linkState._IRQFlag = false;
}
void _onTimer() {
if (!isEnabled || isBlocked)
return;
if (didTimeout()) {
reset();
return;
}
if (isMaster() && isReady() && !isSending())
sendPendingData();
}
void _onSerial() {
if (!isEnabled || isBlocked)
return;
if (resetIfNeeded())
return;
linkState._IRQFlag = true;
linkState._IRQTimeout = 0;
unsigned newPlayerCount = 0;
for (u32 i = 0; i < LINK_MAX_PLAYERS; i++) {
u16 data = REG_SIOMULTI[i];
if (data != LINK_DISCONNECTED) {
if (data != LINK_NO_DATA && i != linkState.currentPlayerId)
push(linkState._incomingMessages[i], data);
newPlayerCount++;
linkState._timeouts[i] = 0;
}
else if (linkState._timeouts[i] > LINK_REMOTE_TIMEOUT_OFFLINE) {
if (linkState._timeouts[i] >= LINK_DEFAULT_REMOTE_TIMEOUT) {
LINK_QUEUE_CLEAR(linkState._incomingMessages[i]);
linkState._timeouts[i] = LINK_REMOTE_TIMEOUT_OFFLINE;
}
else {
linkState._timeouts[i]++;
newPlayerCount++;
}
}
}
linkState.playerCount = newPlayerCount;
linkState.currentPlayerId =
(REG_SIOCNT & (0b11 << LINK_BITS_PLAYER_ID)) >> LINK_BITS_PLAYER_ID;
if (!isMaster())
sendPendingData();
}
private:
bool isEnabled = false;
bool isBlocked = false;
bool isReady() { return isBitHigh(LINK_BIT_READY); }
bool hasError() { return isBitHigh(LINK_BIT_ERROR); }
bool isMaster() { return !isBitHigh(LINK_BIT_SLAVE); }
bool isSending() { return isBitHigh(LINK_BIT_START); }
bool didTimeout() { return linkState._IRQTimeout >= LINK_DEFAULT_TIMEOUT; }
void sendPendingData() {
transfer(LINK_QUEUE_POP(linkState._outgoingMessages));
}
void transfer(u16 data) {
REG_SIOMLT_SEND = data;
if (isMaster()) {
setBitHigh(LINK_BIT_START);
}
}
bool resetIfNeeded() {
if (!isReady() || hasError()) {
reset();
return true;
}
return false;
}
void reset() {
resetState();
stop();
start();
}
void resetState() {
linkState.playerCount = 0;
linkState.currentPlayerId = 0;
for (u32 i = 0; i < LINK_MAX_PLAYERS; i++) {
LINK_QUEUE_CLEAR(linkState._incomingMessages[i]);
linkState._timeouts[i] = LINK_REMOTE_TIMEOUT_OFFLINE;
}
LINK_QUEUE_CLEAR(linkState._outgoingMessages);
linkState._IRQFlag = false;
linkState._IRQTimeout = 0;
}
void stop() {
stopTimer();
LINK_SET_LOW(REG_RCNT, LINK_BIT_GENERAL_PURPOSE_LOW);
LINK_SET_HIGH(REG_RCNT, LINK_BIT_GENERAL_PURPOSE_HIGH);
}
void start() {
startTimer();
LINK_SET_LOW(REG_RCNT, LINK_BIT_GENERAL_PURPOSE_HIGH);
REG_SIOCNT = BN_CFG_LINK_BAUD_RATE;
REG_SIOMLT_SEND = 0;
setBitHigh(LINK_BIT_MULTIPLAYER);
setBitHigh(LINK_BIT_IRQ);
}
void stopTimer() {
REG_TM[LINK_DEFAULT_SEND_TIMER_ID].cnt = REG_TM[LINK_DEFAULT_SEND_TIMER_ID].cnt & (~TM_ENABLE);
}
void startTimer() {
REG_TM[LINK_DEFAULT_SEND_TIMER_ID].start = -LINK_DEFAULT_INTERVAL;
REG_TM[LINK_DEFAULT_SEND_TIMER_ID].cnt = TM_ENABLE | TM_IRQ | LINK_BASE_FREQUENCY;
}
void push(bn::ideque<u16>& q, u16 value) {
if (q.full())
q.pop_front();
q.push_back(value);
}
bool isBitHigh(unsigned bit) { return (REG_SIOCNT >> bit) & 1; }
void setBitHigh(unsigned bit) { LINK_SET_HIGH(REG_SIOCNT, bit); }
void setBitLow(unsigned bit) { LINK_SET_LOW(REG_SIOCNT, bit); }
};
extern LinkConnection* linkConnection;
inline void LINK_ISR_VBLANK() {
linkConnection->_onVBlank();
}
inline void LINK_ISR_TIMER() {
linkConnection->_onTimer();
}
inline void LINK_ISR_SERIAL() {
linkConnection->_onSerial();
}
inline u16 LINK_QUEUE_POP(bn::ideque<u16>& q) {
if (q.empty())
return LINK_NO_DATA;
u16 value = q.front();
q.pop_front();
return value;
}
inline void LINK_QUEUE_CLEAR(bn::ideque<u16>& q) {
q.clear();
}
#endif // LINK_CONNECTION_H
| 4,299 |
460 | <gh_stars>100-1000
/*
* Copyright 2012 Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef BUMPTOP_UNDOCOMMANDS_ROOMSTATEUNDOCOMMAND_H_
#define BUMPTOP_UNDOCOMMANDS_ROOMSTATEUNDOCOMMAND_H_
class Room;
class RoomUndoRedoState;
#include <QtGui/QUndoCommand>
#include <boost/shared_ptr.hpp>
class RoomStateUndoCommand : public QUndoCommand {
public:
explicit RoomStateUndoCommand(Room* room);
virtual ~RoomStateUndoCommand();
virtual void undo();
virtual void redo();
virtual void updateRoomFromRoomUndoRedoState(boost::shared_ptr<RoomUndoRedoState> state);
virtual void set_current_state(boost::shared_ptr<RoomUndoRedoState> state);
virtual void set_last_state(boost::shared_ptr<RoomUndoRedoState> state);
virtual boost::shared_ptr<RoomUndoRedoState> current_state();
virtual boost::shared_ptr<RoomUndoRedoState> last_state();
protected:
boost::shared_ptr<RoomUndoRedoState> current_state_;
boost::shared_ptr<RoomUndoRedoState> last_state_;
bool first_redo_happened_;
Room *room_;
};
#endif // BUMPTOP_UNDOCOMMANDS_ROOMSTATEUNDOCOMMAND_H_
| 542 |
23,901 | <filename>sgk/sparse/ops/cc/sddmm_launcher.cu.cc
// Copyright 2021 The Google Research Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#if GOOGLE_CUDA
#define EIGEN_USE_GPU // For definition of Eigen::GpuDevice.
#include "sparse/ops/cc/common.h"
#include "sparse/ops/cc/sddmm_launcher.h"
#include "sputnik/sputnik.h"
namespace sgk {
// CUDA kernel launcher.
void LaunchSddmm(const Eigen::GpuDevice &d, int m, int k, int n, int nonzeros,
const int *row_indices, const int *row_offsets,
const int *column_indices, const float *lhs_matrix,
const float *rhs_matrix, float *output_values) {
// TODO(tgale): There should be a TensorFlow approach to checking
// cudaError_t objects correctly. Switch to this, whatever it is.
CUDA_CALL(sputnik::CudaSddmm(m, k, n, nonzeros, row_indices, row_offsets,
column_indices, lhs_matrix, rhs_matrix,
output_values, d.stream()));
}
} // namespace sgk
#endif // GOOGLE_CUDA
| 592 |
1,346 | <gh_stars>1000+
package com.ctrip.platform.dal.dao.markdown;
import org.junit.runner.RunWith;
import org.junit.runners.Suite;
import org.junit.runners.Suite.SuiteClasses;
@RunWith(Suite.class)
@SuiteClasses({
MarkdownAndUpIntergration.class,
DetectorCounterTest.class,
TimeoutDetectorTest.class,
AutoMarkdownTest.class,
ManualMarkDownTest.class,
TimeBucketCounterTest.class,
})
public class _AllTests {
}
| 158 |
2,449 | /**
* Copyright 2016 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.deployservice.bean;
public class Resource {
public enum Type {
ENV, GROUP, SYSTEM
}
public final static String ALL = "*";
public final static Resource SYSTEM_RESOURCE = new Resource(ALL, Type.SYSTEM);
private String id;
private Type type;
public Resource(String id, Type type) {
this.id = id;
this.type = type;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public Type getType() {
return type;
}
public void setType(Type type) {
this.type = type;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Resource resource = (Resource) o;
if (!id.equals(resource.id)) {
return false;
}
return type == resource.type;
}
@Override
public int hashCode() {
int result = id.hashCode();
result = 31 * result + type.hashCode();
return result;
}
}
| 675 |
4,816 | <reponame>mehrdad-shokri/retdec<gh_stars>1000+
/**
* @file src/common/class.cpp
* @brief Common class representation.
* @copyright (c) 2019 Avast Software, licensed under the MIT license
*/
#include "retdec/common/class.h"
#include "retdec/utils/container.h"
using retdec::utils::hasItem;
namespace retdec {
namespace common {
//
//=============================================================================
// Class
//=============================================================================
//
Class::Class(const std::string& className) :
_name(className)
{
}
/**
* @return Class's ID is its name.
*/
std::string Class::getId() const
{
return getName();
}
std::string Class::getName() const
{
return _name;
}
std::string Class::getDemangledName() const
{
return _demangledName;
}
const std::vector<std::string>& Class::getSuperClasses() const
{
return _superClasses;
}
void Class::setName(const std::string& name)
{
_name = name;
}
void Class::setDemangledName(const std::string& demangledName)
{
_demangledName = demangledName;
}
/**
* Has the class a constructor of the given name?
*/
bool Class::hasConstructor(const std::string& name) const
{
return hasItem(constructors, name);
}
/**
* Has the class a destructor of the given name?
*/
bool Class::hasDestructor(const std::string& name) const
{
return hasItem(destructors, name);
}
/**
* Has the class a method of the given name?
*
* Only non-virtual methods are considered. If you want to check whether a class
* has a virtual method, use hasVirtualMethod().
*/
bool Class::hasMethod(const std::string& name) const
{
return hasItem(methods, name);
}
/**
* Has the class a virtual method of the given name?
*/
bool Class::hasVirtualMethod(const std::string& name) const
{
return hasItem(virtualMethods, name);
}
/**
* Does a function with the given name belong to the class?
*
* The function may be a constructor, destructor, method, or virtual method.
*/
bool Class::hasFunction(const std::string& name) const
{
return hasConstructor(name) ||
hasDestructor(name) ||
hasMethod(name) ||
hasVirtualMethod(name);
}
/**
* New super class is added only if there is not existing superclass of that name.
* @return @c True if superclass was added, @c false otherwise.
*/
bool Class::addSuperClass(const std::string& superClass)
{
for (auto& s : _superClasses)
{
if (s == superClass)
return false;
}
_superClasses.push_back(superClass);
return true;
}
/**
* Classes are ordered by their names.
*/
bool Class::operator<(const Class& o) const
{
return getName() < o.getName();
}
/**
* Classes are equal if their names are equal.
*/
bool Class::operator==(const Class& o) const
{
return getName() == o.getName();
}
} // namespace common
} // namespace retdec
| 877 |
1,755 | #ifndef DIY_COMMUNICATOR_HPP
#define DIY_COMMUNICATOR_HPP
#warning "diy::Communicator (in diy/communicator.hpp) is deprecated, use diy::mpi::communicator directly"
#include "mpi.hpp"
namespace diy
{
typedef mpi::communicator Communicator;
}
#endif
| 108 |
836 | /*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package androidx.test.core.app.testing;
import android.app.Activity;
import android.os.Bundle;
import androidx.annotation.Nullable;
/**
* An activity that finishes itself in {@link #onCreate(Bundle)}.
*
* <p>Note: When you call {@link #finish()} in {@link #onCreate(Bundle)}, the specialized lifecycle
* transition is applied and {@link #onDestroy()} is called immediately after {@link
* #onCreate(Bundle)}. {@link #onStart()} and {@link #onResume()} are never be called in this
* scenario.
*
* <p>This activity is used to test {@link androidx.test.core.app.ActivityScenario#launch(Class)}
* ensuring it handles this special lifecycle transitions properly.
*/
public class FinishItselfActivity extends Activity {
@Override
protected void onCreate(@Nullable Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
finish();
}
}
| 421 |
15,577 | <reponame>pdv-ru/ClickHouse<filename>src/Functions/JSONPath/Generator/IGenerator_fwd.h
#pragma once
#include <Functions/JSONPath/Generator/IVisitor.h>
namespace DB
{
template <typename JSONParser>
class IGenerator;
template <typename JSONParser>
using IVisitorPtr = std::shared_ptr<IVisitor<JSONParser>>;
template <typename JSONParser>
using VisitorList = std::vector<IVisitorPtr<JSONParser>>;
}
| 146 |
984 | <gh_stars>100-1000
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.phoenix.compile;
import java.util.List;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.phoenix.expression.Expression;
import org.apache.phoenix.query.KeyRange;
import org.apache.phoenix.schema.PColumn;
import org.apache.phoenix.schema.PTable;
/**
*
* Interface that determines how a key part contributes to
* the forming of the key (start/stop of scan and SkipScanFilter)
* for each part of a multi-part primary key. It acts as the glue
* between a built-in function and the setting of the scan key
* during query compilation.
*
*/
public interface KeyPart {
/**
* Calculate the key range given an operator and the key on
* the RHS of an expression. For example, given the expression
* SUBSTR(foo,1,3) = 'bar', the key range would be ['bar','bas'),
* and if foo was fixed length, the upper and lower key range
* bytes would be filled out to the fixed length.
* @param op comparison operator {@code (=, <=, <, >=, >, !=) }
* @param rhs the constant on the RHS of an expression.
* @return the key range that encompasses the range for the
* expression for which this keyPart is associated
*
* @see org.apache.phoenix.expression.function.ScalarFunction#newKeyPart(KeyPart)
*/
public KeyRange getKeyRange(CompareOp op, Expression rhs);
/**
* Determines whether an expression gets extracted from the
* WHERE clause if it contributes toward the building of the
* scan key. For example, the SUBSTR built-in function may
* be extracted, since it may be completely represented
* through a key range. However, the REGEXP_SUBSTR must be
* left in the WHERE clause, since only the constant prefix
* part of the evaluation can be represented through a key
* range (i.e. rows may pass through that will fail when
* the REGEXP_SUBSTR is evaluated).
*
* @return an empty list if the expression should remain in
* the WHEERE clause for post filtering or a singleton list
* containing the expression if it should be removed.
*/
public List<Expression> getExtractNodes();
/**
* Gets the primary key column associated with the start of this key part.
* @return the primary key column for this key part
*/
public PColumn getColumn();
/**
* Gets the table metadata object associated with this key part
* @return the table for this key part
*/
public PTable getTable();
} | 1,003 |
1,040 | <reponame>Ybalrid/orbiter
/*
* Copyright (C) 1998 Microsoft Corporation. All Rights Reserved.
*
* File: rmfull.h
*
*/
#ifndef __RMFULL_H__
#define __RMFULL_H__
#include <d3drm.h>
#include <time.h>
#include "d3dapp.h" /* prototypes for D3D helper functions */
#include "resource.h" /* defines constants used in rmfull.rc */
#define START_WIN_SIZE 320 /* initial size of the window */
#define RELEASE(x) if (x != NULL) {x->Release(); x = NULL;}
#ifdef __cplusplus
extern "C" {
#endif
typedef struct tagrmfullglobals {
HWND hWndMain; /* application window handle */
HINSTANCE hInstApp; /* application instance for dialog boxes */
LPSTR lpCmdLine; /* command line parameters */
LPDIRECT3DRMDEVICE3 dev; /* Direct3DRM device */
LPDIRECT3DRMVIEWPORT2 view; /* Direct3DRM viewport through which we view
the scene */
LPDIRECT3DRMFRAME3 scene; /* Master frame in which others are placed */
LPDIRECT3DRMFRAME3 camera; /* Frame describing the users POV */
BOOL bSingleStepMode; /* render one frame at a time */
BOOL bDrawAFrame; /* render on this pass of the main loop */
BOOL bShowFrameRate; /* show the frame rate at the top */
BOOL bShowInfo; /* show window information at the bottom */
BOOL bResized; /* the window has resized or some other drastic change, the
entire client area should be cleared */
BOOL bQuit; /* program is about to terminate */
BOOL bNoTextures; /* this sample doesn't use any textures */
BOOL bConstRenderQuality; /* this sample is not constructed with
MeshBuilders and so the RenderQuality
cannot be changed */
D3DRMRENDERQUALITY RenderQuality; /* current shade mode, fill mode and
lighting state */
D3DRMTEXTUREQUALITY TextureQuality; /* current texture interpolation */
BOOL bDithering; /* is dithering on? */
BOOL bAntialiasing; /* is antialiasing on? */
int mouse_buttons; /* mouse button state */
int mouse_x; /* mouse cursor x position */
int mouse_y; /* mouse cursor y position */
int CurrDDDriver; /* Current DirectDraw driver */
int NumDDDrivers; /* Number of DirectDraw drivers */
D3DAppDDDriver DDDriver[D3DAPP_MAXDDDRIVERS]; /* DirectDraw drivers information */
LPDIRECTDRAWSURFACE lpFrameRateBuffer; /* frame rate surface */
LPDIRECTDRAWSURFACE lpInfoBuffer; /* window info surface */
} rmfullglobals;
void __cdecl Msg( LPSTR fmt, ... );
/*
* STATS.CPP FUNCTION PROTOTYPES
*/
BOOL InitFontAndTextBuffers(void);
BOOL WriteInfoBuffer(void);
BOOL WriteFrameRateBuffer(float fps, long tps);
void ResetFrameRate(void);
BOOL CalculateFrameRate();
BOOL DisplayFrameRate(int* count, LPD3DRECT lpExtents );
#ifdef __cplusplus
};
#endif
#endif // __RMFULL_H__
| 1,307 |
410 | <reponame>redchew-fork/BlueshiftEngine
// Copyright(c) 2017 POLYGONTEK
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http ://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "Platform/Intrinsics.h"
BE_FORCE_INLINE avxf set_256ps(float a, float b, float c, float d, float e, float f, float g, float h) {
return _mm256_set_ps(h, g, f, e, d, c, b, a);
}
BE_FORCE_INLINE avxf set1_256ps(float a) {
return _mm256_set1_ps(a);
}
BE_FORCE_INLINE avxf set2x128_256ps(const __m128 &a, const __m128 &b) {
return _mm256_set_m128(b, a);
}
BE_FORCE_INLINE avxf setzero_256ps() {
return _mm256_setzero_ps();
}
BE_FORCE_INLINE avxf load_256ps(const float *a) {
return _mm256_load_ps(a);
}
BE_FORCE_INLINE avxf loadu_256ps(const float *a) {
return _mm256_loadu_ps(a);
}
BE_FORCE_INLINE avxf broadcast_256ss(const float *a) {
return _mm256_broadcast_ss(a);
}
BE_FORCE_INLINE avxf broadcast_256ps(const ssef *a) {
return _mm256_broadcast_ps(&a->m128);
}
BE_FORCE_INLINE void store_256ps(const avxf &a, float *dst) {
_mm256_store_ps(dst, a);
}
BE_FORCE_INLINE void storeu_256ps(const avxf &a, float *dst) {
_mm256_storeu_ps(dst, a);
}
BE_FORCE_INLINE void storent_256ps(const avxf &a, float *dst) {
_mm256_stream_ps(dst, a);
}
BE_FORCE_INLINE avxf epi32_to_256ps(const __m256i &a) {
return _mm256_cvtepi32_ps(a);
}
BE_FORCE_INLINE avxf abs_256ps(const avxf &a) {
return _mm256_and_ps(a, _mm256_castsi256_ps(_mm256_set1_epi32(0x7fffffff)));
}
BE_FORCE_INLINE avxf sqr_256ps(const avxf &a) {
return _mm256_mul_ps(a, a);
}
BE_FORCE_INLINE avxf sqrt_256ps(const avxf &a) {
return _mm256_sqrt_ps(a);
}
// Returns reciprocal with 12 bits of precision.
BE_FORCE_INLINE avxf rcp12_256ps(const avxf &a) {
return _mm256_rcp_ps(a);
}
// Returns reciprocal with at least 16 bits precision.
BE_FORCE_INLINE avxf rcp16_256ps(const avxf &a) {
avxf r = _mm256_rcp_ps(a);
// Newton-Raphson approximation to improve precision.
return _mm256_mul_ps(r, _mm256_sub_ps(_mm256_set1_ps(2.0f), _mm256_mul_ps(a, r)));
}
// Returns reciprocal with close to full precision.
BE_FORCE_INLINE avxf rcp32_256ps(const avxf &a) {
avxf r = _mm256_rcp_ps(a);
// Newton-Raphson approximation to improve precision.
r = _mm256_mul_ps(r, _mm256_sub_ps(_mm256_set1_ps(2.0f), _mm256_mul_ps(a, r)));
return _mm256_mul_ps(r, _mm256_sub_ps(_mm256_set1_ps(2.0f), _mm256_mul_ps(a, r)));
}
// Divides with at least 12 bits precision.
BE_FORCE_INLINE avxf div12_256ps(const avxf &a, const avxf &b) {
return _mm256_mul_ps(a, rcp12_256ps(b));
}
// Divides with at least 16 bits precision.
BE_FORCE_INLINE avxf div16_256ps(const avxf &a, const avxf &b) {
return _mm256_mul_ps(a, rcp16_256ps(b));
}
// Divides with close to full precision.
BE_FORCE_INLINE avxf div32_256ps(const avxf &a, const avxf &b) {
return _mm256_mul_ps(a, rcp32_256ps(b));
}
// Returns reciprocal square root with 12 bits of precision.
BE_FORCE_INLINE avxf rsqrt12_256ps(const avxf &a) {
return _mm256_rsqrt_ps(a);
}
// Returns reciprocal square root with at least 16 bits precision.
BE_FORCE_INLINE avxf rsqrt16_256ps(const avxf &a) {
avxf r = _mm256_rsqrt_ps(a);
// Newton-Raphson approximation to improve precision.
return _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(1.5f), r), _mm256_mul_ps(_mm256_mul_ps(_mm256_mul_ps(a, _mm256_set1_ps(-0.5f)), r), _mm256_mul_ps(r, r)));
}
// Returns reciprocal square root with close to full precision.
BE_FORCE_INLINE avxf rsqrt32_256ps(const avxf &a) {
avxf r = _mm256_rsqrt_ps(a);
// Newton-Raphson approximation to improve precision.
r = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(1.5f), r), _mm256_mul_ps(_mm256_mul_ps(_mm256_mul_ps(a, _mm256_set1_ps(-0.5f)), r), _mm256_mul_ps(r, r)));
return _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(1.5f), r), _mm256_mul_ps(_mm256_mul_ps(_mm256_mul_ps(a, _mm256_set1_ps(-0.5f)), r), _mm256_mul_ps(r, r)));
}
BE_FORCE_INLINE avxf operator+(const avxf &a) { return a; }
BE_FORCE_INLINE avxf operator-(const avxf &a) { return bx_mm256_neg_ps(a.m256); }
BE_FORCE_INLINE avxf operator+(const avxf &a, const avxf &b) { return _mm256_add_ps(a, b); }
BE_FORCE_INLINE avxf operator+(const avxf &a, const float &b) { return a + set1_256ps(b); }
BE_FORCE_INLINE avxf operator+(const float &a, const avxf &b) { return set1_256ps(a) + b; }
BE_FORCE_INLINE avxf operator-(const avxf &a, const avxf &b) { return _mm256_sub_ps(a, b); }
BE_FORCE_INLINE avxf operator-(const avxf &a, const float &b) { return a - set1_256ps(b); }
BE_FORCE_INLINE avxf operator-(const float &a, const avxf &b) { return set1_256ps(a) - b; }
BE_FORCE_INLINE avxf operator*(const avxf &a, const avxf &b) { return _mm256_mul_ps(a, b); }
BE_FORCE_INLINE avxf operator*(const avxf &a, const float &b) { return a * set1_256ps(b); }
BE_FORCE_INLINE avxf operator*(const float &a, const avxf &b) { return set1_256ps(a) * b; }
BE_FORCE_INLINE avxf operator/(const avxf &a, const avxf &b) { return a * rcp32_256ps(b); }
BE_FORCE_INLINE avxf operator/(const avxf &a, const float &b) { return a * rcp32_256ps(set1_256ps(b)); }
BE_FORCE_INLINE avxf operator/(const float &a, const avxf &b) { return a * rcp32_256ps(b); }
BE_FORCE_INLINE avxf operator&(const avxf &a, const avxf &b) { return _mm256_and_ps(a, b); }
BE_FORCE_INLINE avxf operator&(const avxf &a, const avxi &b) { return _mm256_and_ps(a, _mm256_castsi256_ps(b)); }
BE_FORCE_INLINE avxf operator|(const avxf &a, const avxf &b) { return _mm256_or_ps(a, b); }
BE_FORCE_INLINE avxf operator|(const avxf &a, const avxi &b) { return _mm256_or_ps(a, _mm256_castsi256_ps(b)); }
BE_FORCE_INLINE avxf operator^(const avxf &a, const avxf &b) { return _mm256_xor_ps(a, b); }
BE_FORCE_INLINE avxf operator^(const avxf &a, const avxi &b) { return _mm256_xor_ps(a, _mm256_castsi256_ps(b)); }
BE_FORCE_INLINE avxf operator==(const avxf &a, const avxf &b) { return _mm256_cmp_ps(a, b, _CMP_EQ_UQ); }
BE_FORCE_INLINE avxf operator==(const avxf &a, const float &b) { return a == set1_256ps(b); }
BE_FORCE_INLINE avxf operator==(const float &a, const avxf &b) { return set1_256ps(a) == b; }
BE_FORCE_INLINE avxf operator!=(const avxf &a, const avxf &b) { return _mm256_cmp_ps(a, b, _CMP_NEQ_UQ); }
BE_FORCE_INLINE avxf operator!=(const avxf &a, const float &b) { return a != set1_256ps(b); }
BE_FORCE_INLINE avxf operator!=(const float &a, const avxf &b) { return set1_256ps(a) != b; }
BE_FORCE_INLINE avxf operator<(const avxf &a, const avxf &b) { return _mm256_cmp_ps(a, b, _CMP_NGE_UQ); }
BE_FORCE_INLINE avxf operator<(const avxf &a, const float &b) { return a < set1_256ps(b); }
BE_FORCE_INLINE avxf operator<(const float &a, const avxf &b) { return set1_256ps(a) < b; }
BE_FORCE_INLINE avxf operator>(const avxf &a, const avxf &b) { return _mm256_cmp_ps(a, b, _CMP_NLE_UQ); }
BE_FORCE_INLINE avxf operator>(const avxf &a, const float &b) { return a > set1_256ps(b); }
BE_FORCE_INLINE avxf operator>(const float &a, const avxf &b) { return set1_256ps(a) > b; }
BE_FORCE_INLINE avxf operator>=(const avxf &a, const avxf &b) { return _mm256_cmp_ps(a, b, _CMP_NLT_UQ); }
BE_FORCE_INLINE avxf operator>=(const avxf &a, const float &b) { return a >= set1_256ps(b); }
BE_FORCE_INLINE avxf operator>=(const float &a, const avxf &b) { return set1_256ps(a) >= b; }
BE_FORCE_INLINE avxf operator<=(const avxf &a, const avxf &b) { return _mm256_cmp_ps(a, b, _CMP_NGT_UQ); }
BE_FORCE_INLINE avxf operator<=(const avxf &a, const float &b) { return a <= set1_256ps(b); }
BE_FORCE_INLINE avxf operator<=(const float &a, const avxf &b) { return set1_256ps(a) <= b; }
BE_FORCE_INLINE avxf &operator+=(avxf &a, const avxf &b) { return a = a + b; }
BE_FORCE_INLINE avxf &operator+=(avxf &a, const float &b) { return a = a + b; }
BE_FORCE_INLINE avxf &operator-=(avxf &a, const avxf &b) { return a = a - b; }
BE_FORCE_INLINE avxf &operator-=(avxf &a, const float &b) { return a = a - b; }
BE_FORCE_INLINE avxf &operator*=(avxf &a, const avxf &b) { return a = a * b; }
BE_FORCE_INLINE avxf &operator*=(avxf &a, const float &b) { return a = a * b; }
BE_FORCE_INLINE avxf &operator/=(avxf &a, const avxf &b) { return a = a / b; }
BE_FORCE_INLINE avxf &operator/=(avxf &a, const float &b) { return a = a / b; }
BE_FORCE_INLINE avxf &operator&=(avxf &a, const avxf &b) { return a = a & b; }
BE_FORCE_INLINE avxf &operator&=(avxf &a, const avxi &b) { return a = a & b; }
BE_FORCE_INLINE avxf &operator|=(avxf &a, const avxf &b) { return a = a | b; }
BE_FORCE_INLINE avxf &operator|=(avxf &a, const avxi &b) { return a = a | b; }
BE_FORCE_INLINE avxf &operator^=(avxf &a, const avxf &b) { return a = a ^ b; }
BE_FORCE_INLINE avxf &operator^=(avxf &a, const avxi &b) { return a = a ^ b; }
// dst = a * b + c
BE_FORCE_INLINE avxf madd_256ps(const avxf &a, const avxf &b, const avxf &c) { return bx_mm256_madd_ps(a.m256, b.m256, c.m256); }
// dst = a * b - c
BE_FORCE_INLINE avxf msub_256ps(const avxf &a, const avxf &b, const avxf &c) { return bx_mm256_msub_ps(a.m256, b.m256, c.m256); }
// dst = -(a * b) + c
BE_FORCE_INLINE avxf nmadd_256ps(const avxf &a, const avxf &b, const avxf &c) { return bx_mm256_nmadd_ps(a.m256, b.m256, c.m256); }
// dst = -(a * b) - c
BE_FORCE_INLINE avxf nmsub_256ps(const avxf &a, const avxf &b, const avxf &c) { return bx_mm256_nmsub_ps(a.m256, b.m256, c.m256); }
// dst = (a0 + a1, a2 + a3, b0 + b1, b2 + b3, a4 + a5, a6 + a7, b4 + b5, b6 + b7)
BE_FORCE_INLINE avxf hadd_256ps(const avxf &a, const avxf &b) { return _mm256_hadd_ps(a, b); }
BE_FORCE_INLINE avxf floor_256ps(const avxf &a) { return _mm256_round_ps(a, _MM_FROUND_TO_NEG_INF); }
BE_FORCE_INLINE avxf ceil_256ps(const avxf &a) { return _mm256_round_ps(a, _MM_FROUND_TO_POS_INF); }
BE_FORCE_INLINE avxf trunc_256ps(const avxf &a) { return _mm256_round_ps(a, _MM_FROUND_TO_ZERO); }
BE_FORCE_INLINE avxf round_256ps(const avxf &a) { return _mm256_round_ps(a, _MM_FROUND_TO_NEAREST_INT); }
BE_FORCE_INLINE avxf frac_256ps(const avxf &a) { return a - floor_256ps(a); }
// Unpacks to (a0, b0, a1, b1, a4, b4, a5, b5).
BE_FORCE_INLINE avxf unpacklo_256ps(const avxf &a, const avxf &b) { return _mm256_unpacklo_ps(a.m256, b.m256); }
// Unpacks to (a2, b2, a3, b3, a6, b6, a7, b7).
BE_FORCE_INLINE avxf unpackhi_256ps(const avxf &a, const avxf &b) { return _mm256_unpackhi_ps(a.m256, b.m256); }
// Shuffles 2x128 bits packed floats using template parameters. ix = [0(2), 1(3)].
template <size_t i0, size_t i1>
BE_FORCE_INLINE avxf shuffle_256ps(const avxf &a) {
return _mm256_permute2f128_ps(a, a, (i1 << 4) | (i0));
}
// Shuffles two 2x128 bits packed floats using template parameters. ix = [0, 3].
template <size_t i0, size_t i1>
BE_FORCE_INLINE avxf shuffle_256ps(const avxf &a, const avxf &b) {
return _mm256_permute2f128_ps(a, b, (i1 << 4) | (i0));
}
// Shuffles 4x32 bits floats for each 128 bits data using template parameters. ix = [0, 3].
template <size_t i0, size_t i1, size_t i2, size_t i3>
BE_FORCE_INLINE avxf shuffle_256ps(const avxf &b) {
return _mm256_permute_ps(b, _MM_SHUFFLE(i3, i2, i1, i0));
}
template <size_t i0>
BE_FORCE_INLINE avxf shuffle_256ps(const avxf &a) {
return _mm256_permute_ps(a, _MM_SHUFFLE(i0, i0, i0, i0));
}
// Shuffles two 4x32 bits floats for each 128 bits data using template parameters. ix = [0, 3].
template <size_t i0, size_t i1, size_t i2, size_t i3>
BE_FORCE_INLINE avxf shuffle_256ps(const avxf &a, const avxf &b) {
return _mm256_shuffle_ps(a, b, _MM_SHUFFLE(i3, i2, i1, i0));
}
template <>
BE_FORCE_INLINE avxf shuffle_256ps<0, 0, 2, 2>(const avxf &a) { return _mm256_moveldup_ps(a); }
template <>
BE_FORCE_INLINE avxf shuffle_256ps<1, 1, 3, 3>(const avxf &a) { return _mm256_movehdup_ps(a); }
template <>
BE_FORCE_INLINE avxf shuffle_256ps<0, 1, 0, 1>(const avxf &a) { return _mm256_castpd_ps(_mm256_movedup_pd(_mm256_castps_pd(a))); }
// Extracts 128 bits value with the given index. i = [0, 1].
template <size_t i>
BE_FORCE_INLINE ssef extract_256ps(const avxf &a) { return _mm256_extractf128_ps(a, i); }
// Inserts lower 128 bits of b to a in [128*dst, 128*dst+127] bits. dst = [0, 1].
template <size_t dst>
BE_FORCE_INLINE avxf insert_256ps(const avxf &a, const ssef &b) { return _mm256_insertf128_ps(a, b, dst); }
// Selects 8x32 bits floats using mask.
BE_FORCE_INLINE avxf select_256ps(const avxf &a, const avxf &b, const avxb &mask) { return _mm256_blendv_ps(a, b, mask); }
BE_FORCE_INLINE avxf min_256ps(const avxf &a, const avxf &b) { return _mm256_min_ps(a.m256, b.m256); }
BE_FORCE_INLINE avxf min_256ps(const avxf &a, const float &b) { return _mm256_min_ps(a.m256, set1_256ps(b)); }
BE_FORCE_INLINE avxf min_256ps(const float &a, const avxf &b) { return _mm256_min_ps(set1_256ps(a), b.m256); }
BE_FORCE_INLINE avxf max_256ps(const avxf &a, const avxf &b) { return _mm256_max_ps(a.m256, b.m256); }
BE_FORCE_INLINE avxf max_256ps(const avxf &a, const float &b) { return _mm256_max_ps(a.m256, set1_256ps(b)); }
BE_FORCE_INLINE avxf max_256ps(const float &a, const avxf &b) { return _mm256_max_ps(set1_256ps(a), b.m256); }
// (m01, m01, m23, m23, m45, m45, m67, m67)
BE_FORCE_INLINE avxf vreduce_min2_256ps(const avxf &a) { return min_256ps(shuffle_256ps<1, 0, 3, 2>(a), a); }
// (m0123, m0123, m0123, m0123, m4567, m4567, m4567, m4567)
BE_FORCE_INLINE avxf vreduce_min4_256ps(const avxf &a) { avxf h = vreduce_min2_256ps(a); return min_256ps(shuffle_256ps<2, 3, 0, 1>(h), h); }
// (m01234567, m01234567, m01234567, m01234567, m01234567, m01234567, m01234567, m01234567)
BE_FORCE_INLINE avxf vreduce_min_256ps(const avxf &a) { avxf h = vreduce_min4_256ps(a); return min_256ps(shuffle_256ps<1, 0>(h), h); }
// (m01, m01, m23, m23, m45, m45, m67, m67)
BE_FORCE_INLINE avxf vreduce_max2_256ps(const avxf &a) { return max_256ps(shuffle_256ps<1, 0, 3, 2>(a), a); }
// (m0123, m0123, m0123, m0123, m4567, m4567, m4567, m4567)
BE_FORCE_INLINE avxf vreduce_max4_256ps(const avxf &a) { avxf h = vreduce_max2_256ps(a); return max_256ps(shuffle_256ps<2, 3, 0, 1>(h), h); }
// (m01234567, m01234567, m01234567, m01234567, m01234567, m01234567, m01234567, m01234567)
BE_FORCE_INLINE avxf vreduce_max_256ps(const avxf &a) { avxf h = vreduce_max4_256ps(a); return max_256ps(shuffle_256ps<1, 0>(h), h); }
// Returns minimum value of all 8 components.
BE_FORCE_INLINE float reduce_min_256ps(const avxf &a) { return _mm_cvtss_f32(extract_256ps<0>(vreduce_min_256ps(a))); }
// Returns maximum value of all 8 components.
BE_FORCE_INLINE float reduce_max_256ps(const avxf &a) { return _mm_cvtss_f32(extract_256ps<0>(vreduce_max_256ps(a))); }
// Returns index of minimum component.
BE_FORCE_INLINE size_t select_min_256ps(const avxf &a) { return CountTrailingZeros(_mm256_movemask_ps(a == vreduce_min_256ps(a))); }
// Returns index of maximum component.
BE_FORCE_INLINE size_t select_max_256ps(const avxf &a) { return CountTrailingZeros(_mm256_movemask_ps(a == vreduce_max_256ps(a))); }
// Returns index of minimum component with valid index mask.
BE_FORCE_INLINE size_t select_min_256ps(const avxf &a, const avxb &validmask) {
const avxf v = select_256ps(set1_256ps(FLT_INFINITY), a, validmask);
return CountTrailingZeros(_mm256_movemask_ps(_mm256_and_ps(validmask, (v == vreduce_min_256ps(v)))));
}
// Returns index of maximum component with valid index mask.
BE_FORCE_INLINE size_t select_max_256ps(const avxf &a, const avxb &validmask) {
const avxf v = select_256ps(set1_256ps(-FLT_INFINITY), a, validmask);
return CountTrailingZeros(_mm256_movemask_ps(_mm256_and_ps(validmask, (v == vreduce_max_256ps(v)))));
}
// Broadcasts sums of all components for each 128 bits packed floats.
BE_FORCE_INLINE avxf sum_256ps(const avxf &a) {
__m256 hadd = _mm256_hadd_ps(a, a); // (x1 + y1, z1 + w1, x1 + y1, z1 + w1, x2 + y2, z2 + w2, x2 + y2, z2 + w2)
return _mm256_hadd_ps(hadd, hadd); // (x1 + y1 + z1 + w1, x1 + y1 + z1 + w1, x2 + y2 + z2 + w2, x2 + y2 + z2 + w2)
}
| 7,241 |
819 | /*
Copyright 2017 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS-IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "seurat/image/inpainting.h"
#include <array>
#include <random>
#include "gtest/gtest.h"
#include "seurat/base/array2d.h"
#include "seurat/base/color.h"
#include "seurat/testing/ion_test_utils.h"
namespace seurat {
namespace image {
namespace {
using base::Array2D;
using base::Color4f;
using ion::math::Point2i;
constexpr float kEpsilon = 1.0e-6f;
TEST(InpaintingTest, TinyImages) {
// Test with 0x0 and 1x1 images.
Image4f image;
Array2D<bool> mask;
image.Resize(0, 0);
mask.Resize(0, 0);
// This should not crash.
InpaintSmooth(mask, &image);
image.Resize(1, 1);
mask.Resize(1, 1);
mask.At(0, 0) = false;
image.At(0, 0) = {1.0f, 0.75f, 0.0f, 0.5f};
InpaintSmooth(mask, &image);
EXPECT_EQ(Color4f(1.0f, 0.75f, 0.0f, 0.5f), image.At(0, 0));
image.Resize(1, 1);
mask.Resize(1, 1);
mask.At(0, 0) = true;
image.At(0, 0) = {1.0f, 0.75f, 0.0f, 0.5f};
InpaintSmooth(mask, &image);
EXPECT_EQ(Color4f(1.0f, 0.75f, 0.0f, 0.5f), image.At(0, 0));
}
Image4f MakeTesting2x2Image() {
Image4f image(2, 2);
image.At(0, 0) = {1.0f, 0.0f, 0.0f, 0.0f};
image.At(1, 0) = {0.0f, 1.0f, 0.0f, 0.0f};
image.At(0, 1) = {0.0f, 0.0f, 1.0f, 0.0f};
image.At(1, 1) = {0.0f, 0.0f, 0.0f, 1.0f};
return image;
}
TEST(InpaintingTest, Inpaint2x2) {
const Image4f kOriginalImage = MakeTesting2x2Image();
{
Image4f image = kOriginalImage;
Array2D<bool> mask(2, 2);
mask.Fill(false);
mask.At(0, 0) = true;
InpaintSmooth(mask, &image);
const Color4f expected_color =
(kOriginalImage.At(1, 0) + kOriginalImage.At(0, 1)) * 0.5f;
EXPECT_VECTOR_NEAR(expected_color, image.At(0, 0), kEpsilon);
EXPECT_VECTOR_NEAR(kOriginalImage.At(1, 0), image.At(1, 0), kEpsilon);
EXPECT_VECTOR_NEAR(kOriginalImage.At(0, 1), image.At(0, 1), kEpsilon);
EXPECT_VECTOR_NEAR(kOriginalImage.At(1, 1), image.At(1, 1), kEpsilon);
}
{
Image4f image = kOriginalImage;
Array2D<bool> mask(2, 2);
mask.Fill(false);
mask.At(0, 1) = true;
InpaintSmooth(mask, &image);
const Color4f expected_color =
(kOriginalImage.At(0, 0) + kOriginalImage.At(1, 1)) * 0.5f;
EXPECT_VECTOR_NEAR(kOriginalImage.At(0, 0), image.At(0, 0), kEpsilon);
EXPECT_VECTOR_NEAR(kOriginalImage.At(1, 0), image.At(1, 0), kEpsilon);
EXPECT_VECTOR_NEAR(expected_color, image.At(0, 1), kEpsilon);
EXPECT_VECTOR_NEAR(kOriginalImage.At(1, 1), image.At(1, 1), kEpsilon);
}
}
TEST(InpaintingTest, InpaintLargeImageWithIsolatedPixels) {
// Large enough to perform several recursive iterations.
Image4f large_image(13, 16);
std::mt19937 random;
std::uniform_real_distribution<float> dist(0.0f, 1.0f);
for (auto& color : large_image) {
color = {dist(random), dist(random), dist(random), dist(random)};
}
Image4f inpainted_image = large_image;
Array2D<bool> mask(large_image.GetSize());
std::array<Point2i, 3> masked_pixels = {{{4, 7}, {8, 8}, {12, 0}}};
for (const auto& coord : masked_pixels) {
mask.At(coord) = true;
}
InpaintSmooth(mask, &inpainted_image);
// Test that the mask was respected and all unmasked pixels were left
// unmodified.
for (int y = 0; y < large_image.Height(); ++y) {
for (int x = 0; x < large_image.Width(); ++x) {
if (!mask.At(x, y)) {
EXPECT_EQ(large_image.At(x, y), inpainted_image.At(x, y));
}
}
}
}
TEST(InpaintingTest, InpaintLargeImageWithLinearGradient) {
// Large enough to perform several recursive iterations.
Image4f large_image(13, 16);
std::mt19937 random;
std::uniform_real_distribution<float> dist(0.0f, 1.0f);
for (auto& color : large_image) {
color = {dist(random), dist(random), dist(random), dist(random)};
}
const Color4f kTopColor(1.0f, 0.0f, 1.0f, 0.0f);
const Color4f kBottomColor(0.0f, 1.0f, 1.0f, 1.0f);
Array2D<bool> mask(large_image.GetSize());
mask.Fill(true);
// Set the top & bottom rows to a fixed color & inpaint everything between.
for (int x = 0; x < large_image.Width(); ++x) {
large_image.At(x, 0) = kBottomColor;
large_image.At(x, large_image.Height() - 1) = kTopColor;
mask.At(x, 0) = false;
mask.At(x, large_image.Height() - 1) = false;
}
Image4f inpainted_image = large_image;
InpaintSmooth(mask, &inpainted_image);
// All rows should be the same.
for (int y = 0; y < large_image.Height(); ++y) {
for (int x = 0; x < large_image.Width(); ++x) {
EXPECT_VECTOR_NEAR(inpainted_image.At(0, y), inpainted_image.At(x, y),
1e-3f);
}
}
// Verify that a vertical gradient is generated.
for (int y = 1; y < large_image.Height(); ++y) {
for (int x = 0; x < large_image.Width(); ++x) {
Color4f above_color = inpainted_image.At(x, y);
Color4f below_color = inpainted_image.At(x, y - 1);
EXPECT_GT(above_color[0], below_color[0]);
EXPECT_LT(above_color[1], below_color[1]);
EXPECT_EQ(above_color[2], below_color[2]);
EXPECT_LT(above_color[3], below_color[3]);
}
}
}
TEST(InpaintingTest, Inpaint_NaN_LargeImageWithLinearGradient) {
// Large enough to perform several recursive iterations.
Image4f large_image(13, 16);
// Start with a NaN image.
Color4f nan_color;
nan_color.Fill(std::numeric_limits<float>::quiet_NaN());
large_image.Fill(nan_color);
const Color4f kTopColor(1.0f, 0.0f, 1.0f, 0.0f);
const Color4f kBottomColor(0.0f, 1.0f, 1.0f, 1.0f);
Array2D<bool> mask(large_image.GetSize());
mask.Fill(true);
// Set the top & bottom rows to a fixed color & inpaint everything between.
for (int x = 0; x < large_image.Width(); ++x) {
large_image.At(x, 0) = kBottomColor;
large_image.At(x, large_image.Height() - 1) = kTopColor;
mask.At(x, 0) = false;
mask.At(x, large_image.Height() - 1) = false;
}
Image4f inpainted_image = large_image;
InpaintSmooth(mask, &inpainted_image);
// All rows should be the same.
for (int y = 0; y < large_image.Height(); ++y) {
for (int x = 0; x < large_image.Width(); ++x) {
EXPECT_VECTOR_NEAR(inpainted_image.At(0, y), inpainted_image.At(x, y),
1e-3f);
}
}
// Verify that a vertical gradient is generated.
for (int y = 1; y < large_image.Height(); ++y) {
for (int x = 0; x < large_image.Width(); ++x) {
Color4f above_color = inpainted_image.At(x, y);
Color4f below_color = inpainted_image.At(x, y - 1);
EXPECT_GT(above_color[0], below_color[0]);
EXPECT_LT(above_color[1], below_color[1]);
EXPECT_EQ(above_color[2], below_color[2]);
EXPECT_LT(above_color[3], below_color[3]);
}
}
}
} // namespace
} // namespace image
} // namespace seurat
| 3,125 |
4,537 | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <mesos/module/container_logger.hpp>
#include <mesos/slave/container_logger.hpp>
#include <stout/error.hpp>
#include <stout/nothing.hpp>
#include <stout/option.hpp>
#include <stout/try.hpp>
#include "module/manager.hpp"
#include "slave/container_loggers/sandbox.hpp"
using std::string;
namespace mesos {
namespace slave {
Try<ContainerLogger*> ContainerLogger::create(const Option<string>& type)
{
ContainerLogger* logger = nullptr;
if (type.isNone()) {
logger = new internal::slave::SandboxContainerLogger();
} else {
// Try to load container logger from module.
Try<ContainerLogger*> module =
modules::ModuleManager::create<ContainerLogger>(type.get());
if (module.isError()) {
return Error(
"Failed to create container logger module '" + type.get() +
"': " + module.error());
}
logger = module.get();
}
// Initialize the module.
Try<Nothing> initialize = logger->initialize();
if (initialize.isError()) {
delete logger;
return Error(
"Failed to initialize container logger module: " + initialize.error());
}
return logger;
}
} // namespace slave {
} // namespace mesos {
| 616 |
14,668 | // Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/autofill_assistant/browser/mock_client.h"
#include "components/autofill_assistant/browser/mock_personal_data_manager.h"
namespace autofill_assistant {
MockClient::MockClient() = default;
MockClient::~MockClient() = default;
} // namespace autofill_assistant
| 141 |
981 | <reponame>Alexmitter/box64
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.1.0.16) *
*******************************************************************/
#ifndef __wrappedsecret1DEFS_H_
#define __wrappedsecret1DEFS_H_
#endif // __wrappedsecret1DEFS_H_
| 87 |
1,761 | # Structural Similarity (SSIM) Example
#
# Note: You will need an SD card to run this example.
#
# This example shows off how to use the SSIM algorithm on your OpenMV Cam
# to detect differences between two images. The SSIM algorithm compares
# 8x8 blocks of pixels between two images to determine a similarity
# score between two images.
import sensor, image, pyb, os, time
# The image has likely changed if the sim.min() is lower than this.
MIN_TRIGGER_THRESHOLD = -0.4
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
clock = time.clock() # Tracks FPS.
if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory
print("About to save background image...")
sensor.skip_frames(time = 2000) # Give the user time to get ready.
sensor.snapshot().save("temp/bg.bmp")
print("Saved background image!")
while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot() # Take a picture and return the image.
sim = img.get_similarity("temp/bg.bmp")
change = "- Change -" if sim.min() < MIN_TRIGGER_THRESHOLD else "- No Change -"
print(clock.fps(), change, sim)
| 420 |
2,151 | <gh_stars>1000+
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/exo/data_source.h"
#include "base/bind.h"
#include "base/files/file_util.h"
#include "base/test/scoped_task_environment.h"
#include "components/exo/data_source_delegate.h"
#include "components/exo/test/exo_test_base.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace exo {
namespace {
constexpr char kTestData[] = "Test Data";
class DataSourceTest : public testing::Test {
protected:
base::test::ScopedTaskEnvironment scoped_task_environment_ = {
base::test::ScopedTaskEnvironment::MainThreadType::DEFAULT,
base::test::ScopedTaskEnvironment::ExecutionMode::QUEUED};
};
class TestDataSourceDelegate : public DataSourceDelegate {
public:
TestDataSourceDelegate() {}
~TestDataSourceDelegate() override {}
// Overridden from DataSourceDelegate:
void OnDataSourceDestroying(DataSource* source) override {}
void OnTarget(const std::string& mime_type) override {}
void OnSend(const std::string& mime_type, base::ScopedFD fd) override {
ASSERT_TRUE(
base::WriteFileDescriptor(fd.get(), kTestData, strlen(kTestData)));
}
void OnCancelled() override {}
void OnDndDropPerformed() override {}
void OnDndFinished() override {}
void OnAction(DndAction dnd_action) override {}
};
TEST_F(DataSourceTest, ReadData) {
TestDataSourceDelegate delegate;
DataSource data_source(&delegate);
data_source.Offer("text/plain;charset=utf-8");
data_source.ReadData(base::BindOnce([](const std::vector<uint8_t>& data) {
std::string string_data(data.begin(), data.end());
EXPECT_EQ(std::string(kTestData), string_data);
}));
scoped_task_environment_.RunUntilIdle();
}
TEST_F(DataSourceTest, ReadData_UnknwonMimeType) {
TestDataSourceDelegate delegate;
DataSource data_source(&delegate);
data_source.Offer("text/unknown");
data_source.ReadData(base::BindOnce([](const std::vector<uint8_t>& data) {
FAIL()
<< "Callback should not be invoked when known mimetype is not offerred";
}));
scoped_task_environment_.RunUntilIdle();
}
TEST_F(DataSourceTest, ReadData_Destroyed) {
TestDataSourceDelegate delegate;
{
DataSource data_source(&delegate);
data_source.Offer("text/plain;charset=utf-8");
data_source.ReadData(base::BindOnce([](const std::vector<uint8_t>& data) {
FAIL() << "Callback should not be invoked after data source is destroyed";
}));
}
scoped_task_environment_.RunUntilIdle();
}
TEST_F(DataSourceTest, ReadData_Cancelled) {
TestDataSourceDelegate delegate;
DataSource data_source(&delegate);
data_source.Offer("text/plain;charset=utf-8");
data_source.ReadData(base::BindOnce([](const std::vector<uint8_t>& data) {
FAIL() << "Callback should not be invoked after cancelled";
}));
data_source.Cancelled();
scoped_task_environment_.RunUntilIdle();
}
} // namespace
} // namespace exo
| 1,054 |
933 | /*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/trace_processor/sqlite/query_constraints.h"
#include "perfetto/base/logging.h"
#include "test/gtest_and_gmock.h"
using testing::ElementsAreArray;
using testing::Field;
using testing::Matcher;
using testing::Matches;
using testing::Pointwise;
namespace perfetto {
namespace trace_processor {
namespace {
class QueryConstraintsTest : public ::testing::Test {
public:
QueryConstraintsTest() { PERFETTO_CHECK(sqlite3_initialize() == SQLITE_OK); }
};
TEST_F(QueryConstraintsTest, ConvertToAndFromSqlString) {
QueryConstraints qc;
qc.AddConstraint(12, 0, 0);
QueryConstraints::SqliteString only_constraint = qc.ToNewSqlite3String();
ASSERT_TRUE(strcmp(only_constraint.get(), "C1,12,0,O0") == 0);
QueryConstraints qc_constraint =
QueryConstraints::FromString(only_constraint.get());
ASSERT_EQ(qc, qc_constraint);
qc.AddOrderBy(1, false);
qc.AddOrderBy(21, true);
QueryConstraints::SqliteString result = qc.ToNewSqlite3String();
ASSERT_TRUE(strcmp(result.get(), "C1,12,0,O2,1,0,21,1") == 0);
QueryConstraints qc_result = QueryConstraints::FromString(result.get());
ASSERT_EQ(qc, qc_result);
}
TEST_F(QueryConstraintsTest, CheckEmptyConstraints) {
QueryConstraints qc;
QueryConstraints::SqliteString string_result = qc.ToNewSqlite3String();
ASSERT_TRUE(strcmp(string_result.get(), "C0,O0") == 0);
QueryConstraints qc_result =
QueryConstraints::FromString(string_result.get());
ASSERT_EQ(qc_result.constraints().size(), 0u);
ASSERT_EQ(qc_result.order_by().size(), 0u);
}
TEST_F(QueryConstraintsTest, OnlyOrderBy) {
QueryConstraints qc;
qc.AddOrderBy(3, true);
QueryConstraints::SqliteString string_result = qc.ToNewSqlite3String();
ASSERT_TRUE(strcmp(string_result.get(), "C0,O1,3,1") == 0);
QueryConstraints qc_result =
QueryConstraints::FromString(string_result.get());
ASSERT_EQ(qc, qc_result);
}
} // namespace
} // namespace trace_processor
} // namespace perfetto
| 935 |
2,073 | <gh_stars>1000+
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.util;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.jms.*;
import java.util.concurrent.CountDownLatch;
public class ConsumerThread extends Thread {
private static final Logger LOG = LoggerFactory.getLogger(ConsumerThread.class);
int messageCount = 1000;
int receiveTimeOut = 3000;
Destination destination;
Session session;
boolean durable;
boolean breakOnNull = true;
int sleep;
int batchSize;
int received = 0;
int transactions = 0;
boolean running = false;
CountDownLatch finished;
boolean bytesAsText;
public ConsumerThread(Session session, Destination destination) {
this.destination = destination;
this.session = session;
}
@Override
public void run() {
running = true;
MessageConsumer consumer = null;
String threadName = Thread.currentThread().getName();
LOG.info(threadName + " wait until " + messageCount + " messages are consumed");
try {
if (durable && destination instanceof Topic) {
consumer = session.createDurableSubscriber((Topic) destination, getName());
} else {
consumer = session.createConsumer(destination);
}
while (running && received < messageCount) {
Message msg = consumer.receive(receiveTimeOut);
if (msg != null) {
LOG.info(threadName + " Received " + (msg instanceof TextMessage ? ((TextMessage) msg).getText() : msg.getJMSMessageID()));
if (bytesAsText && (msg instanceof BytesMessage)) {
long length = ((BytesMessage) msg).getBodyLength();
byte[] bytes = new byte[(int) length];
((BytesMessage) msg).readBytes(bytes);
LOG.info("BytesMessage as text string: " + new String(bytes));
}
received++;
} else {
if (breakOnNull) {
break;
}
}
if (session.getTransacted()) {
if (batchSize > 0 && received > 0 && received % batchSize == 0) {
LOG.info(threadName + " Committing transaction: " + transactions++);
session.commit();
}
} else if (session.getAcknowledgeMode() == Session.CLIENT_ACKNOWLEDGE) {
if (batchSize > 0 && received > 0 && received % batchSize == 0) {
LOG.info("Acknowledging last " + batchSize + " messages; messages so far = " + received);
msg.acknowledge();
}
}
if (sleep > 0) {
Thread.sleep(sleep);
}
}
} catch (Exception e) {
e.printStackTrace();
} finally {
if (finished != null) {
finished.countDown();
}
if (consumer != null) {
LOG.info(threadName + " Consumed: " + this.getReceived() + " messages");
try {
consumer.close();
} catch (JMSException e) {
e.printStackTrace();
}
}
}
LOG.info(threadName + " Consumer thread finished");
}
public int getReceived() {
return received;
}
public boolean isDurable() {
return durable;
}
public void setDurable(boolean durable) {
this.durable = durable;
}
public void setMessageCount(int messageCount) {
this.messageCount = messageCount;
}
public void setBreakOnNull(boolean breakOnNull) {
this.breakOnNull = breakOnNull;
}
public int getBatchSize() {
return batchSize;
}
public void setBatchSize(int batchSize) {
this.batchSize = batchSize;
}
public int getMessageCount() {
return messageCount;
}
public boolean isBreakOnNull() {
return breakOnNull;
}
public int getReceiveTimeOut() {
return receiveTimeOut;
}
public void setReceiveTimeOut(int receiveTimeOut) {
this.receiveTimeOut = receiveTimeOut;
}
public boolean isRunning() {
return running;
}
public void setRunning(boolean running) {
this.running = running;
}
public int getSleep() {
return sleep;
}
public void setSleep(int sleep) {
this.sleep = sleep;
}
public CountDownLatch getFinished() {
return finished;
}
public void setFinished(CountDownLatch finished) {
this.finished = finished;
}
public boolean isBytesAsText() {
return bytesAsText;
}
public void setBytesAsText(boolean bytesAsText) {
this.bytesAsText = bytesAsText;
}
}
| 2,498 |
846 | <reponame>Winner2015/compileflow
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.compileflow.engine.definition.bpmn;
import com.alibaba.compileflow.engine.common.CompileFlowException;
import com.alibaba.compileflow.engine.definition.common.AbstractFlowModel;
import com.alibaba.compileflow.engine.definition.common.TransitionNode;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;
/**
* @author yusu
*/
public class BpmnModel extends AbstractFlowModel<FlowNode> {
private List<Process> processes = new ArrayList<>(1);
public void addProcesses(Process process) {
processes.add(process);
}
public Process getProcess() {
return processes.get(0);
}
public <T extends FlowElement> T getFlowElement(String id) {
return (T)processes.stream().map(process -> process.getElement(id))
.filter(Objects::nonNull).findFirst()
.orElseThrow(() -> new CompileFlowException("Undefined element, element id is " + id));
}
@Override
public List<TransitionNode> getTransitionNodes() {
return processes.stream().map(Process::getFlowElements).flatMap(Collection::stream)
.filter(flowElement -> flowElement instanceof TransitionNode)
.map(flowElement -> (TransitionNode)flowElement)
.collect(Collectors.toList());
}
@Override
public List<FlowNode> getAllNodes() {
return processes.stream().map(Process::getAllNodes)
.flatMap(Collection::stream).collect(Collectors.toList());
}
@Override
public List<FlowNode> getRuntimeNodes() {
return getAllNodes();
}
@Override
public void addNode(FlowNode node) {
getAllNodes().add(node);
}
@Override
public FlowNode getNode(String id) {
return getAllNodes().stream().filter(node -> id.equals(node.getId())).findFirst()
.orElseThrow(() -> new CompileFlowException("Undefined node, node id is " + id));
}
public FlowNode getNodeByTag(String tag) {
return getAllNodes().stream().filter(node -> tag.equals(node.getTag())).findFirst()
.orElseThrow(() -> new CompileFlowException("Undefined node, node tag is " + tag));
}
@Override
public FlowNode getStartNode() {
return getAllNodes().stream().filter(node -> node instanceof StartEvent).findFirst()
.orElseThrow(() -> new CompileFlowException("No start node found"));
}
@Override
public FlowNode getEndNode() {
return getAllNodes().stream().filter(node -> node instanceof EndEvent).findFirst()
.orElseThrow(() -> new CompileFlowException("No end node found"));
}
}
| 1,199 |
1,532 | {
"args": ["PREROUTING", "SNAT"],
"kwargs": {
"to_source": "8.8.8.8"
},
"exception": {
"name": "OperationError",
"message": "iptables only supports to_source on the nat table and the SNAT jump (table=filter, jump=SNAT)"
}
}
| 124 |
839 | <filename>rt/bindings/soap/src/main/java/org/apache/cxf/binding/soap/interceptor/SoapActionInInterceptor.java
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cxf.binding.soap.interceptor;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.logging.Logger;
import org.apache.cxf.attachment.AttachmentDeserializer;
import org.apache.cxf.binding.soap.Soap11;
import org.apache.cxf.binding.soap.Soap12;
import org.apache.cxf.binding.soap.SoapBindingConstants;
import org.apache.cxf.binding.soap.SoapMessage;
import org.apache.cxf.binding.soap.jms.interceptor.SoapJMSInInterceptor;
import org.apache.cxf.binding.soap.model.SoapOperationInfo;
import org.apache.cxf.common.logging.LogUtils;
import org.apache.cxf.common.util.StringUtils;
import org.apache.cxf.endpoint.Endpoint;
import org.apache.cxf.helpers.CastUtils;
import org.apache.cxf.interceptor.Fault;
import org.apache.cxf.message.Exchange;
import org.apache.cxf.message.Message;
import org.apache.cxf.message.MessageUtils;
import org.apache.cxf.phase.Phase;
import org.apache.cxf.service.model.BindingOperationInfo;
import org.apache.cxf.service.model.OperationInfo;
import org.apache.cxf.ws.addressing.JAXWSAConstants;
public class SoapActionInInterceptor extends AbstractSoapInterceptor {
private static final Logger LOG = LogUtils.getL7dLogger(SoapActionInInterceptor.class);
private static final String ALLOW_NON_MATCHING_TO_DEFAULT = "allowNonMatchingToDefaultSoapAction";
private static final String CALCULATED_WSA_ACTION = SoapActionInInterceptor.class.getName() + ".ACTION";
public SoapActionInInterceptor() {
super(Phase.READ);
addAfter(ReadHeadersInterceptor.class.getName());
addAfter(EndpointSelectionInterceptor.class.getName());
}
public static String getSoapAction(Message m) {
if (!(m instanceof SoapMessage)) {
return null;
}
SoapMessage message = (SoapMessage)m;
if (message.getVersion() instanceof Soap11) {
Map<String, List<String>> headers
= CastUtils.cast((Map<?, ?>)message.get(Message.PROTOCOL_HEADERS));
if (headers != null) {
List<String> sa = headers.get(SoapBindingConstants.SOAP_ACTION);
if (sa != null && !sa.isEmpty()) {
String action = sa.get(0);
if (action.startsWith("\"") || action.startsWith("\'")) {
action = action.substring(1, action.length() - 1);
}
return action;
}
}
} else if (message.getVersion() instanceof Soap12) {
String ct = (String) message.get(Message.CONTENT_TYPE);
if (ct == null) {
return null;
}
int start = ct.indexOf("action=");
if (start == -1 && ct.indexOf("multipart/related") == 0 && ct.indexOf("start-info") == -1) {
// the action property may not be found at the package's content-type for non-mtom multipart message
// but skip searching if the start-info property is set
List<String> cts = CastUtils.cast((List<?>)(((Map<?, ?>)
message.get(AttachmentDeserializer.ATTACHMENT_PART_HEADERS)).get(Message.CONTENT_TYPE)));
if (cts != null && !cts.isEmpty()) {
ct = cts.get(0);
start = ct.indexOf("action=");
}
}
if (start != -1) {
int end;
char c = ct.charAt(start + 7);
// handle the extraction robustly
if (c == '\"') {
start += 8;
end = ct.indexOf('\"', start);
} else if (c == '\\' && ct.charAt(start + 8) == '\"') {
start += 9;
end = ct.indexOf('\\', start);
} else {
start += 7;
end = ct.indexOf(';', start);
if (end == -1) {
end = ct.length();
}
}
return ct.substring(start, end);
}
}
// Return the Soap Action for the JMS Case
if (message.containsKey(SoapJMSInInterceptor.JMS_SOAP_ACTION_VALUE)) {
return (String)message.get(SoapJMSInInterceptor.JMS_SOAP_ACTION_VALUE);
}
return null;
}
public void handleMessage(SoapMessage message) throws Fault {
if (isRequestor(message)) {
return;
}
String action = getSoapAction(message);
if (!StringUtils.isEmpty(action)) {
getAndSetOperation(message, action);
message.put(SoapBindingConstants.SOAP_ACTION, action);
}
}
public static void getAndSetOperation(SoapMessage message, String action) {
getAndSetOperation(message, action, true);
}
public static void getAndSetOperation(SoapMessage message, String action, boolean strict) {
if (StringUtils.isEmpty(action)) {
return;
}
Exchange ex = message.getExchange();
Endpoint ep = ex.getEndpoint();
if (ep == null) {
return;
}
BindingOperationInfo bindingOp = null;
Collection<BindingOperationInfo> bops = ep.getEndpointInfo()
.getBinding().getOperations();
if (bops != null) {
for (BindingOperationInfo boi : bops) {
if (isActionMatch(message, boi, action)) {
if (bindingOp != null) {
//more than one op with the same action, will need to parse normally
return;
}
bindingOp = boi;
}
if (matchWSAAction(boi, action)) {
if (bindingOp != null && bindingOp != boi) {
//more than one op with the same action, will need to parse normally
return;
}
bindingOp = boi;
}
}
}
if (bindingOp == null) {
if (strict) {
//we didn't match the an operation, we'll try again later to make
//sure the incoming message did end up matching an operation.
//This could occur in some cases like WS-RM and WS-SecConv that will
//intercept the message with a new endpoint/operation
message.getInterceptorChain().add(new SoapActionInAttemptTwoInterceptor(action));
}
return;
}
ex.put(BindingOperationInfo.class, bindingOp);
}
private static boolean matchWSAAction(BindingOperationInfo boi, String action) {
Object o = getWSAAction(boi);
if (o != null) {
String oa = o.toString();
if (action.equals(oa)
|| action.equals(oa + "Request")
|| oa.equals(action + "Request")) {
return true;
}
}
return false;
}
private static String getWSAAction(BindingOperationInfo boi) {
Object o = boi.getOperationInfo().getInput().getProperty(CALCULATED_WSA_ACTION);
if (o == null) {
o = boi.getOperationInfo().getInput().getExtensionAttribute(JAXWSAConstants.WSAM_ACTION_QNAME);
if (o == null) {
o = boi.getOperationInfo().getInput().getExtensionAttribute(JAXWSAConstants.WSAW_ACTION_QNAME);
}
if (o == null) {
String start = getActionBaseUri(boi.getOperationInfo());
if (null == boi.getOperationInfo().getInputName()) {
o = addPath(start, boi.getOperationInfo().getName().getLocalPart());
} else {
o = addPath(start, boi.getOperationInfo().getInputName());
}
}
if (o != null) {
boi.getOperationInfo().getInput().setProperty(CALCULATED_WSA_ACTION, o);
}
}
return o.toString();
}
private static String getActionBaseUri(final OperationInfo operation) {
String interfaceName = operation.getInterface().getName().getLocalPart();
return addPath(operation.getName().getNamespaceURI(), interfaceName);
}
private static String getDelimiter(String uri) {
if (uri.startsWith("urn")) {
return ":";
}
return "/";
}
private static String addPath(String uri, String path) {
StringBuilder buffer = new StringBuilder();
buffer.append(uri);
String delimiter = getDelimiter(uri);
if (!uri.endsWith(delimiter) && !path.startsWith(delimiter)) {
buffer.append(delimiter);
}
buffer.append(path);
return buffer.toString();
}
public static class SoapActionInAttemptTwoInterceptor extends AbstractSoapInterceptor {
final String action;
public SoapActionInAttemptTwoInterceptor(String action) {
super(action, Phase.PRE_LOGICAL);
this.action = action;
}
public void handleMessage(SoapMessage message) throws Fault {
BindingOperationInfo boi = message.getExchange().getBindingOperationInfo();
if (boi == null) {
return;
}
if (StringUtils.isEmpty(action)) {
return;
}
if (isActionMatch(message, boi, action)) {
return;
}
if (matchWSAAction(boi, action)) {
return;
}
boolean synthetic = Boolean.TRUE.equals(boi.getProperty("operation.is.synthetic"));
if (!synthetic) {
throw new Fault("SOAP_ACTION_MISMATCH", LOG, null, action);
}
}
}
private static boolean isActionMatch(SoapMessage message, BindingOperationInfo boi, String action) {
SoapOperationInfo soi = boi.getExtensor(SoapOperationInfo.class);
if (soi == null) {
return false;
}
boolean allowNoMatchingToDefault = MessageUtils.getContextualBoolean(message,
ALLOW_NON_MATCHING_TO_DEFAULT,
false);
return action.equals(soi.getAction())
|| (allowNoMatchingToDefault && StringUtils.isEmpty(soi.getAction())
|| (message.getVersion() instanceof Soap12) && StringUtils.isEmpty(soi.getAction()));
}
}
| 5,326 |
679 | /**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
package org.openoffice.xmerge.converter.xml.sxw.wordsmith;
import java.io.IOException;
import org.w3c.dom.NodeList;
import org.w3c.dom.Node;
import org.w3c.dom.NamedNodeMap;
import org.w3c.dom.Element;
import org.openoffice.xmerge.Document;
import org.openoffice.xmerge.ConverterCapabilities;
import org.openoffice.xmerge.converter.xml.OfficeDocument;
import org.openoffice.xmerge.converter.xml.sxw.SxwDocument;
import org.openoffice.xmerge.converter.xml.*;
/**
* This class represents a paragraph in a WordSmith document.
* (A paragraph is "5" followed by 12 bytes of attributes.)
*
* @author <NAME>
*/
class WsePara extends Wse {
private byte spaceBefore = 0;
private byte spaceAfter = 0;
private byte leftIndent = 0;
private byte firstIndent = 0;
private byte rightIndent = 0;
private byte misc = 0;
private byte style = 0;
private byte lineSpace = 0;
private byte outline = 0;
private byte reserved = 0;
private static final byte LS_EXACTLY = (byte)0xC0;
private static final byte LS_ATLEAST = (byte)0x80;
private static final byte LS_MULTIPLE = (byte)0x40;
private static final byte LS_VALUEMASK = (byte)0x3F;
private static final byte ALIGN_RIGHT = (byte)2;
private static final byte ALIGN_LEFT = (byte)0;
private static final byte ALIGN_CENTER = (byte)1;
private static final byte ALIGN_JUST = (byte)3;
private StyleCatalog sc = null;
/**
* Constructor for use when going from DOM to WordSmith.
*
* @param p The paragraph style.
* @param sc The <code>StyleCatalog</code>.
*/
public WsePara(ParaStyle p, StyleCatalog sc) {
this.sc = sc;
ParaStyle ps = (ParaStyle)p.getResolved();
if (ps.isAttributeSet(ParaStyle.MARGIN_LEFT)) {
double temp = ps.getAttribute(ParaStyle.MARGIN_LEFT) * 1.6 / 100;
leftIndent = (byte) temp;
if ((temp - leftIndent) > 0.5) leftIndent++;
}
if (ps.isAttributeSet(ParaStyle.MARGIN_RIGHT)) {
double temp = ps.getAttribute(ParaStyle.MARGIN_RIGHT) * 1.6 / 100;
rightIndent = (byte) temp;
if ((temp - rightIndent) > 0.5) rightIndent++;
}
if (ps.isAttributeSet(ParaStyle.TEXT_INDENT)) {
double temp = ps.getAttribute(ParaStyle.TEXT_INDENT) * 1.6 / 100;
firstIndent = (byte) temp;
if ((temp - firstIndent) > 0.5) firstIndent++;
}
if (ps.isAttributeSet(ParaStyle.MARGIN_TOP)) {
double temp = ps.getAttribute(ParaStyle.MARGIN_TOP) * 1.6 / 100;
spaceBefore = (byte) temp;
if ((temp - spaceBefore) > 0.5) spaceBefore++;
}
if (ps.isAttributeSet(ParaStyle.MARGIN_BOTTOM)) {
double temp = ps.getAttribute(ParaStyle.MARGIN_BOTTOM) * 1.6 / 100;
spaceAfter = (byte) temp;
if ((temp - spaceAfter) > 0.5) spaceAfter++;
}
if (ps.isAttributeSet(ParaStyle.LINE_HEIGHT)) {
int lh = ps.getAttribute(ParaStyle.LINE_HEIGHT);
if ((lh & ~ParaStyle.LH_VALUEMASK) == 0)
lineSpace = (byte)(LS_MULTIPLE | (lh * 2));
else if ((lh & ParaStyle.LH_PCT) != 0) {
lh = (lh & ParaStyle.LH_VALUEMASK) / 100;
lineSpace = (byte)(LS_MULTIPLE | (lh * 2));
}
// DJP: handle other cases....
}
if (ps.isAttributeSet(ParaStyle.TEXT_ALIGN)) {
int val = ps.getAttribute(ParaStyle.TEXT_ALIGN);
switch (val) {
case ParaStyle.ALIGN_RIGHT:
misc = ALIGN_RIGHT;
break;
case ParaStyle.ALIGN_LEFT:
misc = ALIGN_LEFT;
break;
case ParaStyle.ALIGN_CENTER:
misc = ALIGN_CENTER;
break;
case ParaStyle.ALIGN_JUST:
misc = ALIGN_JUST;
break;
}
}
}
/**
* Constructor for use when going from WordSmith to DOM.
* Assumes <code>dataArray[startIndex]</code> is the first
* <code>byte</code> of a valid WordSmith paragraph descriptor.
*
* @param dataArray <code>byte</code> array.
* @param startIndex The start index.
*/
public WsePara(byte dataArray[], int startIndex) {
spaceBefore = dataArray[startIndex + 1];
spaceAfter = dataArray[startIndex + 2];
leftIndent = dataArray[startIndex + 3];
firstIndent = dataArray[startIndex + 4];
rightIndent = dataArray[startIndex + 5];
misc = dataArray[startIndex + 6];
style = dataArray[startIndex + 7];
lineSpace = dataArray[startIndex + 8];
outline = dataArray[startIndex + 9];
}
/**
* Compute the index of the first <code>byte</code> following the
* paragraph descriptor, assuming that
* <code>dataArray[startIndex]</code> is the beginning of a valid
* paragraph descriptor.
*
* @param dataArray <code>byte</code> array.
* @param startIndex The start index.
*
* @return The index of the first <code>byte</code> following the
* paragraph description.
*/
static int computeNewIndex(byte dataArray[], int startIndex) {
return startIndex + 13;
}
/**
* Return true if <code>dataArray[startIndex]</code> is the start
* of a valid paragraph descriptor.
*
* @param dataArray <code>byte</code> array.
* @param startIndex The start index.
*
* @return true if <code>dataArray[startIndex]</code> is the start
* of a valid paragraph descriptor, false otherwise.
*/
static boolean isValid(byte dataArray[], int startIndex) {
return (dataArray[startIndex] == 5);
}
/**
* Return the number of bytes needed to represent this paragraph.
*
* @return The number of bytes needed to represent this paragraph.
*/
int getByteCount() {
return 13;
}
/**
* Return an <code>byte</code> array representing this paragraph.
*
* @return An <code>byte</code> array representing this paragraph.
*/
byte[] getBytes() {
byte b[] = new byte[13];
b[0] = 5;
b[1] = spaceBefore;
b[2] = spaceAfter;
b[3] = leftIndent;
b[4] = firstIndent;
b[5] = rightIndent;
b[6] = misc;
b[7] = style;
b[8] = lineSpace;
b[9] = outline;
b[10] = reserved;
b[11] = 0;
b[12] = 0;
return b;
}
/**
* Return a <code>ParaStyle</code> that reflects the formatting of
* this run.
*
* @return A <code>ParaStyle</code> that reflects the formatting
* of this run.
*/
ParaStyle makeStyle() {
/* Csaba: Commented out the LINE_HEIGHT syle, because there was no
incoming data for that style. It was resulting a zero line
height in the xml document, ie. the doc looked empty.
*/
int attrs[] = { ParaStyle.MARGIN_LEFT, ParaStyle.MARGIN_RIGHT,
ParaStyle.TEXT_INDENT, //ParaStyle.LINE_HEIGHT,
ParaStyle.MARGIN_TOP, ParaStyle.MARGIN_BOTTOM,
ParaStyle.TEXT_ALIGN };
String values[] = new String[attrs.length];
double temp;
temp = leftIndent / 1.6;
values[0] = (new Double(temp)).toString() + "mm";
temp = rightIndent / 1.6;
values[1] = (new Double(temp)).toString() + "mm";
temp = firstIndent / 1.6;
values[2] = (new Double(temp)).toString() + "mm";
/* if ((lineSpace & LS_MULTIPLE) != 0) {
temp = (lineSpace & LS_VALUEMASK) / 2;
temp *= 100;
values[3] = (new Double(temp)).toString() + "%";
} else {
values[3] = (new Double(temp)).toString() + "mm";
// DJP: handle other cases
}
*/
temp = spaceBefore / 1.6;
// values[4] = (new Double(temp)).toString() + "mm";
values[3] = (new Double(temp)).toString() + "mm";
temp = spaceAfter / 1.6;
// values[5] = (new Double(temp)).toString() + "mm";
values[4] = (new Double(temp)).toString() + "mm";
switch (misc) {
// case ALIGN_RIGHT: values[6] = "right"; break;
// case ALIGN_LEFT: values[6] = "left"; break;
// case ALIGN_CENTER:values[6] = "center"; break;
// case ALIGN_JUST: values[6] = "justified"; break;
case ALIGN_RIGHT: values[5] = "right"; break;
case ALIGN_LEFT: values[5] = "left"; break;
case ALIGN_CENTER:values[5] = "center"; break;
case ALIGN_JUST: values[5] = "justified"; break;
}
ParaStyle x = new ParaStyle(null, "paragraph", null, attrs,
values, sc);
return x;
}
}
| 4,599 |
388 | // Copyright 2020-2021 CesiumGS, Inc. and Contributors
#pragma once
class UObject;
class UTexture;
class CesiumLifetime {
public:
static void destroy(UObject* pObject);
private:
static bool runDestruction(UObject* pObject);
static void addToPending(UObject* pObject);
static void processPending();
static void finalizeDestroy(UObject* pObject);
static TArray<TWeakObjectPtr<UObject>> _pending;
static TArray<TWeakObjectPtr<UObject>> _nextPending;
static bool _isScheduled;
};
| 162 |
10,016 | /*
* Zed Attack Proxy (ZAP) and its related class files.
*
* ZAP is an HTTP/HTTPS proxy for assessing web application security.
*
* Copyright 2021 The ZAP Development Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.zaproxy.zap.extension.alert;
import java.awt.Dialog;
import java.awt.GridBagConstraints;
import java.awt.GridBagLayout;
import java.awt.Insets;
import javax.swing.JLabel;
import javax.swing.JOptionPane;
import javax.swing.JPanel;
import javax.swing.JScrollPane;
import javax.swing.event.DocumentEvent;
import javax.swing.event.DocumentListener;
import org.parosproxy.paros.Constant;
import org.zaproxy.zap.utils.ZapTextArea;
import org.zaproxy.zap.utils.ZapTextField;
import org.zaproxy.zap.view.AbstractFormDialog;
import org.zaproxy.zap.view.LayoutHelper;
public class DialogAddAlertTag extends AbstractFormDialog {
private static final long serialVersionUID = 1L;
private static final String DIALOG_TITLE =
Constant.messages.getString("alert.tags.dialog.add.title");
private static final String CONFIRM_BUTTON_LABEL =
Constant.messages.getString("alert.tags.dialog.add.button.confirm");
private static final String KEY_FIELD_LABEL =
Constant.messages.getString("alert.tags.dialog.add.key");
private static final String VALUE_FIELD_LABEL =
Constant.messages.getString("alert.tags.dialog.add.value");
private static final String REPEATED_TAG_KEY_TITLE =
Constant.messages.getString("alert.tags.dialog.warning.title.repeated.key");
private static final String REPEATED_TAG_KEY_BODY =
Constant.messages.getString("alert.tags.dialog.warning.body.repeated.key");
protected static final int MAX_KEY_LENGTH = 1024;
protected static final int MAX_VALUE_LENGTH = 4000;
private ZapTextField keyTextField;
private ZapTextArea valueTextArea;
private JScrollPane valueTextAreaScrollPane;
private ConfirmButtonValidatorDocListener confirmButtonValidatorDocListener;
protected AlertTagsTableModel model;
public DialogAddAlertTag(Dialog owner, AlertTagsTableModel model) {
this(owner, model, DIALOG_TITLE);
}
protected DialogAddAlertTag(Dialog owner, AlertTagsTableModel model, String title) {
super(owner, title);
this.model = model;
}
@Override
protected JPanel getFieldsPanel() {
JPanel fieldsPanel = new JPanel(new GridBagLayout());
JLabel keyLabel = new JLabel(KEY_FIELD_LABEL);
JLabel valueLabel = new JLabel(VALUE_FIELD_LABEL);
int gbcRow = 0;
fieldsPanel.add(
keyLabel,
LayoutHelper.getGBC(
0,
gbcRow,
1,
0,
0.1,
GridBagConstraints.HORIZONTAL,
new Insets(2, 2, 2, 2)));
fieldsPanel.add(
getKeyTextField(),
LayoutHelper.getGBC(
1,
gbcRow,
1,
1,
0.1,
GridBagConstraints.HORIZONTAL,
new Insets(2, 2, 2, 2)));
gbcRow++;
fieldsPanel.add(
valueLabel,
LayoutHelper.getGBC(
0,
gbcRow,
1,
0,
0.1,
GridBagConstraints.HORIZONTAL,
new Insets(2, 2, 2, 2)));
gbcRow++;
fieldsPanel.add(
getValueTextAreaScrollPane(),
LayoutHelper.getGBC(
0, gbcRow, 2, 1, 0.5, GridBagConstraints.BOTH, new Insets(2, 2, 2, 2)));
return fieldsPanel;
}
@Override
protected String getConfirmButtonLabel() {
return CONFIRM_BUTTON_LABEL;
}
@Override
protected void init() {
getKeyTextField().setText("");
getValueTextArea().setText("");
}
@Override
protected boolean validateFields() {
return validateKey() && validateValue();
}
protected boolean validateKey() {
String key = getKeyTextField().getText().trim();
if (model.getTags().containsKey(key)) {
JOptionPane.showMessageDialog(
this,
REPEATED_TAG_KEY_BODY,
REPEATED_TAG_KEY_TITLE,
JOptionPane.INFORMATION_MESSAGE);
getKeyTextField().requestFocusInWindow();
return false;
}
int len = getKeyTextField().getDocument().getLength();
return len > 0 && len <= MAX_KEY_LENGTH;
}
protected boolean validateValue() {
return getValueTextArea().getDocument().getLength() <= MAX_VALUE_LENGTH;
}
@Override
protected void performAction() {
model.addTag(getKeyTextField().getText(), getValueTextArea().getText());
}
@Override
protected void clearFields() {
getKeyTextField().setText("");
getKeyTextField().discardAllEdits();
getValueTextArea().setText("");
getValueTextArea().discardAllEdits();
}
protected ZapTextField getKeyTextField() {
if (keyTextField == null) {
keyTextField = new ZapTextField(20);
keyTextField.getDocument().addDocumentListener(getConfirmButtonValidatorDocListener());
}
return keyTextField;
}
protected ZapTextArea getValueTextArea() {
if (valueTextArea == null) {
valueTextArea = new ZapTextArea(5, 20);
valueTextArea.setLineWrap(true);
valueTextArea.getDocument().addDocumentListener(getConfirmButtonValidatorDocListener());
}
return valueTextArea;
}
protected JScrollPane getValueTextAreaScrollPane() {
if (valueTextAreaScrollPane == null) {
valueTextAreaScrollPane = new JScrollPane(getValueTextArea());
valueTextAreaScrollPane.setHorizontalScrollBarPolicy(
JScrollPane.HORIZONTAL_SCROLLBAR_NEVER);
valueTextAreaScrollPane.setVerticalScrollBarPolicy(
JScrollPane.VERTICAL_SCROLLBAR_AS_NEEDED);
}
return valueTextAreaScrollPane;
}
private ConfirmButtonValidatorDocListener getConfirmButtonValidatorDocListener() {
if (confirmButtonValidatorDocListener == null) {
confirmButtonValidatorDocListener = new ConfirmButtonValidatorDocListener();
}
return confirmButtonValidatorDocListener;
}
private class ConfirmButtonValidatorDocListener implements DocumentListener {
@Override
public void insertUpdate(DocumentEvent e) {
checkAndEnableConfirmButton();
}
@Override
public void removeUpdate(DocumentEvent e) {
checkAndEnableConfirmButton();
}
@Override
public void changedUpdate(DocumentEvent e) {
checkAndEnableConfirmButton();
}
private void checkAndEnableConfirmButton() {
int keyLen = getKeyTextField().getDocument().getLength();
int valueLen = getValueTextArea().getDocument().getLength();
boolean enabled =
keyLen > 0 && keyLen <= MAX_KEY_LENGTH && valueLen <= MAX_VALUE_LENGTH;
setConfirmButtonEnabled(enabled);
}
}
}
| 3,551 |
868 | <gh_stars>100-1000
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.artemis.cli.commands.user;
import javax.json.JsonArray;
import javax.json.JsonObject;
import io.airlift.airline.Command;
import org.apache.activemq.artemis.api.core.JsonUtil;
import org.apache.activemq.artemis.api.core.client.ClientMessage;
import org.apache.activemq.artemis.api.core.management.ManagementHelper;
import org.apache.activemq.artemis.cli.commands.AbstractAction;
import org.apache.activemq.artemis.cli.commands.ActionContext;
/**
* list existing users, example:
* ./artemis user list --user guest
*/
@Command(name = "list", description = "List existing user(s)")
public class ListUser extends UserAction {
@Override
public Object execute(ActionContext context) throws Exception {
super.execute(context);
list();
return null;
}
/**
* List a single user or all users if username is not specified
*
* @throws Exception if communication with the broker fails
*/
private void list() throws Exception {
StringBuilder logMessage = new StringBuilder("--- \"user\"(roles) ---\n");
int userCount = 0;
final String[] result = new String[1];
performCoreManagement(new AbstractAction.ManagementCallback<ClientMessage>() {
@Override
public void setUpInvocation(ClientMessage message) throws Exception {
ManagementHelper.putOperationInvocation(message, "broker", "listUser", userCommandUser);
}
@Override
public void requestSuccessful(ClientMessage reply) throws Exception {
result[0] = (String) ManagementHelper.getResult(reply, String.class);
}
@Override
public void requestFailed(ClientMessage reply) throws Exception {
String errMsg = (String) ManagementHelper.getResult(reply, String.class);
context.err.println("Failed to list user " + userCommandUser + ". Reason: " + errMsg);
}
});
// process the JSON results from the broker
JsonArray array = JsonUtil.readJsonArray(result[0]);
for (JsonObject object : array.getValuesAs(JsonObject.class)) {
logMessage.append("\"").append(object.getString("username")).append("\"").append("(");
JsonArray roles = object.getJsonArray("roles");
for (int i = 0; i < roles.size(); i++) {
logMessage.append(roles.getString(i));
if ((i + 1) < roles.size()) {
logMessage.append(",");
}
}
logMessage.append(")\n");
userCount++;
}
logMessage.append("\n Total: ").append(userCount);
context.out.println(logMessage);
}
}
| 1,195 |
348 | {"nom":"Lézignan-la-Cèbe","circ":"5ème circonscription","dpt":"Hérault","inscrits":1100,"abs":606,"votants":494,"blancs":27,"nuls":11,"exp":456,"res":[{"nuance":"FN","nom":"<NAME>","voix":229},{"nuance":"REM","nom":"M. <NAME>","voix":227}]} | 100 |
3,081 |
import numpy as np
import pandas as pd
from itertools import product
from tensortrade.feed import Stream
from tests.utils.ops import assert_op
configurations = [
{"min_periods": 0},
{"min_periods": 2}
]
arrays = [
[1, 2, 3, 4, 5, 6, 7],
[1, np.nan, 3, 4, 5, 6, np.nan, 7]
]
def test_expanding_count():
for array, config in product(arrays, configurations):
s = Stream.source(array, dtype="float")
w = s.expanding(**config).mean().rename("w")
expected = list(pd.Series(array).expanding(**config).mean())
assert_op([w], expected)
def test_expanding_sum():
for array, config in product(arrays, configurations):
s = Stream.source(array, dtype="float")
w = s.expanding(**config).sum().rename("w")
expected = list(pd.Series(array).expanding(**config).sum())
assert_op([w], expected)
def test_expanding_mean():
for array, config in product(arrays, configurations):
s = Stream.source(array, dtype="float")
w = s.expanding(**config).mean().rename("w")
expected = list(pd.Series(array).expanding(**config).mean())
assert_op([w], expected)
def test_expanding_var():
for array, config in product(arrays, configurations):
s = Stream.source(array, dtype="float")
w = s.expanding(**config).var().rename("w")
expected = list(pd.Series(array).expanding(**config).var())
assert_op([w], expected)
def test_expanding_median():
for array, config in product(arrays, configurations):
s = Stream.source(array, dtype="float")
w = s.expanding(**config).median().rename("w")
expected = list(pd.Series(array).expanding(**config).median())
assert_op([w], expected)
def test_expanding_std():
for array, config in product(arrays, configurations):
s = Stream.source(array, dtype="float")
w = s.expanding(**config).std().rename("w")
expected = list(pd.Series(array).expanding(**config).std())
assert_op([w], expected)
def test_expanding_min():
for array, config in product(arrays, configurations):
s = Stream.source(array, dtype="float")
w = s.expanding(**config).min().rename("w")
expected = list(pd.Series(array).expanding(**config).min())
assert_op([w], expected)
def test_expanding_max():
for array, config in product(arrays, configurations):
s = Stream.source(array, dtype="float")
w = s.expanding(**config).max().rename("w")
expected = list(pd.Series(array).expanding(**config).max())
assert_op([w], expected)
| 1,043 |
2,542 | // ------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License (MIT). See License.txt in the repo root for license information.
// ------------------------------------------------------------
#include "stdafx.h"
using namespace std;
using namespace Common;
using namespace ServiceModel;
using namespace Reliability;
StringLiteral const FabricClientSource("FabricClient");
PartitionedServiceDescWrapper::PartitionedServiceDescWrapper()
: serviceKind_(FABRIC_SERVICE_KIND_INVALID)
, partitionDescription_()
, instanceCount_(0)
, targetReplicaSetSize_(0)
, minReplicaSetSize_(0)
, hasPersistedState_(false)
, flags_(FABRIC_STATEFUL_SERVICE_SETTINGS_NONE)
, isServiceGroup_(false)
, defaultMoveCost_(FABRIC_MOVE_COST_LOW)
, isDefaultMoveCostSpecified_(false)
, servicePackageActivationMode_(ServicePackageActivationMode::SharedProcess)
, scalingPolicies_()
{
}
PartitionedServiceDescWrapper::PartitionedServiceDescWrapper(
FABRIC_SERVICE_KIND serviceKind,
wstring const& applicationName,
wstring const& serviceName,
wstring const& serviceTypeName,
ByteBuffer const& initializationData,
PartitionSchemeDescription::Enum partScheme,
uint partitionCount,
__int64 lowKeyInt64,
__int64 highKeyInt64,
vector<wstring> const& partitionNames,
LONG instanceCount,
LONG targetReplicaSize,
LONG minReplicaSize,
bool hasPersistedState,
wstring const& placementConstraints,
vector<Reliability::ServiceCorrelationDescription> const& correlations,
vector<Reliability::ServiceLoadMetricDescription> const& metrics,
vector<ServicePlacementPolicyDescription> const& placementPolicies,
FABRIC_STATEFUL_SERVICE_FAILOVER_SETTINGS_FLAGS flags,
DWORD replicaRestartWaitDurationSeconds,
DWORD quorumLossWaitDurationSeconds,
DWORD standByReplicaKeepDurationSeconds,
FABRIC_MOVE_COST defaultMoveCost,
ServicePackageActivationMode::Enum const servicePackageActivationMode,
std::wstring const& serviceDnsName,
std::vector<Reliability::ServiceScalingPolicyDescription> const & scalingPolicies)
: serviceKind_(serviceKind)
, applicationName_(applicationName)
, serviceName_(serviceName)
, serviceTypeName_(serviceTypeName)
, initializationData_(initializationData)
, partitionDescription_(partScheme, partitionCount, lowKeyInt64, highKeyInt64, partitionNames)
, instanceCount_(instanceCount)
, targetReplicaSetSize_(targetReplicaSize)
, minReplicaSetSize_(minReplicaSize)
, hasPersistedState_(hasPersistedState)
, placementConstraints_(placementConstraints)
, correlations_(correlations)
, metrics_(metrics)
, placementPolicies_(placementPolicies)
, flags_(flags)
, replicaRestartWaitDurationSeconds_(replicaRestartWaitDurationSeconds)
, quorumLossWaitDurationSeconds_(quorumLossWaitDurationSeconds)
, standByReplicaKeepDurationSeconds_(standByReplicaKeepDurationSeconds)
, isServiceGroup_(false)
, defaultMoveCost_(defaultMoveCost)
, isDefaultMoveCostSpecified_(true)
, servicePackageActivationMode_(servicePackageActivationMode)
, serviceDnsName_(serviceDnsName)
, scalingPolicies_(scalingPolicies)
{
StringUtility::ToLower(serviceDnsName_);
}
ErrorCode PartitionedServiceDescWrapper::FromPublicApi(
__in FABRIC_SERVICE_DESCRIPTION const & serviceDescription)
{
ServiceTypeIdentifier typeIdentifier;
if (serviceDescription.Kind == FABRIC_SERVICE_DESCRIPTION_KIND_STATEFUL)
{
serviceKind_ = FABRIC_SERVICE_KIND_STATEFUL;
auto stateful = reinterpret_cast<FABRIC_STATEFUL_SERVICE_DESCRIPTION*>(serviceDescription.Value);
auto hr = StringUtility::LpcwstrToWstring(stateful->ServiceName, false /*acceptNull*/, serviceName_);
if (FAILED(hr)) { return ErrorCode::FromHResult(hr); }
hr = StringUtility::LpcwstrToWstring(stateful->ApplicationName, true /*acceptNull*/, applicationName_);
if (FAILED(hr)) { return ErrorCode::FromHResult(hr); }
hr = StringUtility::LpcwstrToWstring(stateful->ServiceTypeName, false /*acceptNull*/, serviceTypeName_);
if (FAILED(hr)) { return ErrorCode::FromHResult(hr); }
ErrorCode error = ServiceTypeIdentifier::FromString(serviceTypeName_, typeIdentifier);
if (!error.IsSuccess())
{
Trace.WriteWarning(FabricClientSource, "Could not parse ServiceTypeName '{0}'", stateful->ServiceTypeName);
return error;
}
if (stateful->CorrelationCount > 0 && stateful->Correlations == NULL)
{
return TraceNullArgumentAndGetErrorDetails(wformatString("{0} {1}", GET_CM_RC(Invalid_Null_Pointer), "FABRIC_STATEFUL_SERVICE_DESCRIPTION->Correlations"));
}
for(ULONG i = 0; i < stateful->CorrelationCount; i++)
{
wstring correlationServiceName;
hr = StringUtility::LpcwstrToWstring(stateful->Correlations[i].ServiceName, true /*acceptNull*/, correlationServiceName);
if (FAILED(hr)) { return ErrorCode::FromHResult(hr); }
Reliability::ServiceCorrelationDescription correlationDescription(correlationServiceName, stateful->Correlations[i].Scheme);
correlations_.push_back(correlationDescription);
}
hr = StringUtility::LpcwstrToWstring(stateful->PlacementConstraints, true /*acceptNull*/, placementConstraints_);
if (FAILED(hr)) { return ErrorCode::FromHResult(hr); }
if (stateful->MetricCount > 0 && stateful->Metrics == NULL)
{
return TraceNullArgumentAndGetErrorDetails(wformatString("{0} {1}", GET_CM_RC(Invalid_Null_Pointer), "FABRIC_STATEFUL_SERVICE_DESCRIPTION->Metrics"));
}
for(ULONG i = 0; i < stateful->MetricCount; i++)
{
wstring metricsName;
hr = StringUtility::LpcwstrToWstring(stateful->Metrics[i].Name, true /*acceptNull*/, metricsName);
if (FAILED(hr)) { return ErrorCode::FromHResult(hr); }
metrics_.push_back(Reliability::ServiceLoadMetricDescription(
move(metricsName),
stateful->Metrics[i].Weight,
stateful->Metrics[i].PrimaryDefaultLoad,
stateful->Metrics[i].SecondaryDefaultLoad));
}
initializationData_ = std::vector<byte>(
stateful->InitializationData,
stateful->InitializationData + stateful->InitializationDataSize);
targetReplicaSetSize_ = stateful->TargetReplicaSetSize;
minReplicaSetSize_ = stateful->MinReplicaSetSize;
hasPersistedState_ = stateful->HasPersistedState ? true : false;
error = this->InitializePartitionDescription(stateful->PartitionScheme, stateful->PartitionSchemeDescription, typeIdentifier);
if (!error.IsSuccess())
{
return error;
}
if (stateful->Reserved == NULL)
{
return ErrorCodeValue::Success;
}
auto statefulEx1 = reinterpret_cast<FABRIC_STATEFUL_SERVICE_DESCRIPTION_EX1*>(stateful->Reserved);
if (statefulEx1->PolicyList != NULL)
{
auto pList = reinterpret_cast<FABRIC_SERVICE_PLACEMENT_POLICY_LIST*>(statefulEx1->PolicyList);
if (pList->PolicyCount > 0 && pList->Policies == NULL)
{
return TraceNullArgumentAndGetErrorDetails(wformatString("{0} {1}", GET_CM_RC(Invalid_Null_Pointer), "FABRIC_STATEFUL_SERVICE_DESCRIPTION_EX1->PolicyList->Policies"));
}
for (ULONG i = 0; i < pList->PolicyCount; i++)
{
std::wstring domainName;
FABRIC_SERVICE_PLACEMENT_POLICY_DESCRIPTION & policyDesc = pList->Policies[i];
ServicePlacementPolicyHelper::PolicyDescriptionToDomainName(policyDesc, domainName);
placementPolicies_.push_back(ServiceModel::ServicePlacementPolicyDescription(move(domainName), policyDesc.Type));
}
}
if (statefulEx1->FailoverSettings != NULL)
{
auto failoverSettings = statefulEx1->FailoverSettings;
if ((failoverSettings->Flags & FABRIC_STATEFUL_SERVICE_SETTINGS_REPLICA_RESTART_WAIT_DURATION) != 0)
{
flags_ = (FABRIC_STATEFUL_SERVICE_FAILOVER_SETTINGS_FLAGS)(flags_ | FABRIC_STATEFUL_SERVICE_SETTINGS_REPLICA_RESTART_WAIT_DURATION);
replicaRestartWaitDurationSeconds_ = failoverSettings->ReplicaRestartWaitDurationSeconds;
}
if ((failoverSettings->Flags & FABRIC_STATEFUL_SERVICE_SETTINGS_QUORUM_LOSS_WAIT_DURATION) != 0)
{
flags_ = (FABRIC_STATEFUL_SERVICE_FAILOVER_SETTINGS_FLAGS)(flags_ | FABRIC_STATEFUL_SERVICE_SETTINGS_QUORUM_LOSS_WAIT_DURATION);
quorumLossWaitDurationSeconds_ = failoverSettings->QuorumLossWaitDurationSeconds;
}
if (failoverSettings->Reserved != NULL)
{
auto failoverSettingsEx1 = reinterpret_cast<FABRIC_STATEFUL_SERVICE_FAILOVER_SETTINGS_EX1*>(failoverSettings->Reserved);
if (failoverSettingsEx1 == NULL)
{
return TraceNullArgumentAndGetErrorDetails(
wformatString("{0} {1}", GET_CM_RC(Invalid_Null_Pointer), "FABRIC_STATEFUL_SERVICE_DESCRIPTION_EX1->FailoverSettings->FABRIC_STATEFUL_SERVICE_FAILOVER_SETTINGS_EX1"));
}
if ((failoverSettings->Flags & FABRIC_STATEFUL_SERVICE_SETTINGS_STANDBY_REPLICA_KEEP_DURATION) != 0)
{
flags_ = (FABRIC_STATEFUL_SERVICE_FAILOVER_SETTINGS_FLAGS)(flags_ | FABRIC_STATEFUL_SERVICE_SETTINGS_STANDBY_REPLICA_KEEP_DURATION);
standByReplicaKeepDurationSeconds_ = failoverSettingsEx1->StandByReplicaKeepDurationSeconds;
}
}
}
if (statefulEx1->Reserved == NULL)
{
isDefaultMoveCostSpecified_ = false;
return ErrorCodeValue::Success;
}
auto statefulEx2 = reinterpret_cast<FABRIC_STATEFUL_SERVICE_DESCRIPTION_EX2*>(statefulEx1->Reserved);
isDefaultMoveCostSpecified_ = statefulEx2->IsDefaultMoveCostSpecified == TRUE ? true : false;
defaultMoveCost_ = statefulEx2->DefaultMoveCost;
if (statefulEx2->Reserved == NULL)
{
servicePackageActivationMode_ = ServicePackageActivationMode::SharedProcess;
return ErrorCodeValue::Success;
}
auto statefulEx3 = reinterpret_cast<FABRIC_STATEFUL_SERVICE_DESCRIPTION_EX3*>(statefulEx2->Reserved);
auto err = ServicePackageActivationMode::FromPublicApi(statefulEx3->ServicePackageActivationMode, servicePackageActivationMode_);
if (!err.IsSuccess())
{
return this->TraceAndGetErrorDetails(err.ReadValue(), L"Invalid value of FABRIC_ISOLATION_LEVEL provided.");;
}
hr = StringUtility::LpcwstrToWstring(statefulEx3->ServiceDnsName, true /*acceptNull*/, serviceDnsName_);
if (FAILED(hr)) { return ErrorCode::FromHResult(hr); }
StringUtility::ToLower(serviceDnsName_);
if (statefulEx3->Reserved == NULL)
{
return ErrorCodeValue::Success;
}
auto statefulEx4 = reinterpret_cast<FABRIC_STATEFUL_SERVICE_DESCRIPTION_EX4*>(statefulEx3->Reserved);
if (statefulEx4->ScalingPolicyCount > 1)
{
// Currently, only one scaling policy is allowed per service.
// Vector is there for future uses (when services could have multiple scaling policies).
return TraceAndGetErrorDetails(ErrorCodeValue::InvalidServiceScalingPolicy, wformatString(GET_NS_RC(ScalingPolicy_Scaling_Count), statefulEx4->ScalingPolicyCount));
}
for (ULONG i = 0; i < statefulEx4->ScalingPolicyCount; i++)
{
Reliability::ServiceScalingPolicyDescription scalingDescription;
auto scalingError = scalingDescription.FromPublicApi(statefulEx4->ServiceScalingPolicies[i]);
if (!scalingError.IsSuccess())
{
return scalingError;
}
scalingPolicies_.push_back(move(scalingDescription));
}
return err;
}
else if (serviceDescription.Kind == FABRIC_SERVICE_DESCRIPTION_KIND_STATELESS)
{
serviceKind_ = FABRIC_SERVICE_KIND_STATELESS;
auto stateless = reinterpret_cast<FABRIC_STATELESS_SERVICE_DESCRIPTION*>(
serviceDescription.Value);
HRESULT hr = StringUtility::LpcwstrToWstring(stateless->ServiceName, false /*acceptNull*/, serviceName_);
if (FAILED(hr)) { return ErrorCode::FromHResult(hr); }
hr = StringUtility::LpcwstrToWstring(stateless->ApplicationName, true /*acceptNull*/, applicationName_);
if (FAILED(hr)) { return ErrorCode::FromHResult(hr); }
hr = StringUtility::LpcwstrToWstring(stateless->ServiceTypeName, false /*acceptNull*/, serviceTypeName_);
if (FAILED(hr)) { return ErrorCode::FromHResult(hr); }
ErrorCode error = ServiceTypeIdentifier::FromString(serviceTypeName_, typeIdentifier);
if (!error.IsSuccess())
{
Trace.WriteWarning(FabricClientSource, "Could not parse ServiceTypeName '{0}'", stateless->ServiceTypeName);
return error;
}
if (stateless->CorrelationCount > 0 && stateless->Correlations == NULL)
{
return TraceNullArgumentAndGetErrorDetails(
wformatString("{0} {1}", GET_CM_RC(Invalid_Null_Pointer), "FABRIC_STATELESS_SERVICE_DESCRIPTION->Correlation"));
}
for (ULONG i = 0; i < stateless->CorrelationCount; i++)
{
wstring correlationServiceName;
hr = StringUtility::LpcwstrToWstring(stateless->Correlations[i].ServiceName, true /*acceptNull*/, correlationServiceName);
if (FAILED(hr)) { return ErrorCode::FromHResult(hr); }
Reliability::ServiceCorrelationDescription correlationDescription(correlationServiceName, stateless->Correlations[i].Scheme);
correlations_.push_back(correlationDescription);
}
hr = StringUtility::LpcwstrToWstring(stateless->PlacementConstraints, true /*acceptNull*/, placementConstraints_);
if (FAILED(hr)) { return ErrorCode::FromHResult(hr); }
if (stateless->MetricCount > 0 && stateless->Metrics == NULL)
{
return TraceNullArgumentAndGetErrorDetails(
wformatString("{0} {1}", GET_CM_RC(Invalid_Null_Pointer), "FABRIC_STATEFUL_SERVICE_DESCRIPTION->Metrics"));
}
for (ULONG i = 0; i < stateless->MetricCount; i++)
{
wstring metricsName;
hr = StringUtility::LpcwstrToWstring(stateless->Metrics[i].Name, true /*acceptNull*/, metricsName);
if (FAILED(hr)) { return ErrorCode::FromHResult(hr); }
metrics_.push_back(Reliability::ServiceLoadMetricDescription(
move(metricsName),
stateless->Metrics[i].Weight,
stateless->Metrics[i].PrimaryDefaultLoad,
stateless->Metrics[i].SecondaryDefaultLoad));
}
initializationData_ = std::vector<byte>(
stateless->InitializationData,
stateless->InitializationData + stateless->InitializationDataSize);
instanceCount_ = stateless->InstanceCount;
error = this->InitializePartitionDescription(stateless->PartitionScheme, stateless->PartitionSchemeDescription, typeIdentifier);
if (!error.IsSuccess())
{
return error;
}
if (stateless->Reserved == NULL)
{
return ErrorCodeValue::Success;
}
auto statelessEx1 = reinterpret_cast<FABRIC_STATELESS_SERVICE_DESCRIPTION_EX1*>(stateless->Reserved);
if (statelessEx1->PolicyList != NULL)
{
auto pList = reinterpret_cast<FABRIC_SERVICE_PLACEMENT_POLICY_LIST*>(statelessEx1->PolicyList);
if (pList->PolicyCount > 0 && pList->Policies == NULL)
{
return TraceNullArgumentAndGetErrorDetails(
wformatString("{0} {1}", GET_CM_RC(Invalid_Null_Pointer), "FABRIC_STATEFUL_SERVICE_DESCRIPTION_EX1->PolicyList->Policies"));
}
for (ULONG i = 0; i < pList->PolicyCount; i++)
{
std::wstring domainName;
FABRIC_SERVICE_PLACEMENT_POLICY_DESCRIPTION & policyDesc = pList->Policies[i];
ServicePlacementPolicyHelper::PolicyDescriptionToDomainName(policyDesc, domainName);
placementPolicies_.push_back(ServiceModel::ServicePlacementPolicyDescription(move(domainName), policyDesc.Type));
}
}
if (statelessEx1->Reserved == NULL)
{
isDefaultMoveCostSpecified_ = false;
return ErrorCodeValue::Success;
}
auto statelessEx2 = reinterpret_cast<FABRIC_STATELESS_SERVICE_DESCRIPTION_EX2*>(statelessEx1->Reserved);
isDefaultMoveCostSpecified_ = statelessEx2->IsDefaultMoveCostSpecified == TRUE ? true : false;
defaultMoveCost_ = statelessEx2->DefaultMoveCost;
if (statelessEx2->Reserved == NULL)
{
servicePackageActivationMode_ = ServicePackageActivationMode::SharedProcess;
return ErrorCodeValue::Success;
}
auto statelessEx3 = reinterpret_cast<FABRIC_STATELESS_SERVICE_DESCRIPTION_EX3*>(statelessEx2->Reserved);
auto err = ServicePackageActivationMode::FromPublicApi(statelessEx3->ServicePackageActivationMode, servicePackageActivationMode_);
if (!err.IsSuccess())
{
return this->TraceAndGetErrorDetails(err.ReadValue(), L"Invalid value of FABRIC_ISOLATION_LEVEL provided.");;
}
hr = StringUtility::LpcwstrToWstring(statelessEx3->ServiceDnsName, true /*acceptNull*/, serviceDnsName_);
if (FAILED(hr)) { return ErrorCode::FromHResult(hr); }
StringUtility::ToLower(serviceDnsName_);
if (statelessEx3->Reserved == NULL)
{
return ErrorCodeValue::Success;
}
auto statelessEx4 = reinterpret_cast<FABRIC_STATELESS_SERVICE_DESCRIPTION_EX4*>(statelessEx3->Reserved);
if (statelessEx4->ScalingPolicyCount > 1)
{
// Currently, only one scaling policy is allowed per service.
// Vector is there for future uses (when services could have multiple scaling policies).
return TraceAndGetErrorDetails(ErrorCodeValue::InvalidServiceScalingPolicy, wformatString(GET_NS_RC(ScalingPolicy_Scaling_Count), statelessEx4->ScalingPolicyCount));
}
for (ULONG i = 0; i < statelessEx4->ScalingPolicyCount; i++)
{
Reliability::ServiceScalingPolicyDescription scalingDescription;
auto scalingError = scalingDescription.FromPublicApi(statelessEx4->ServiceScalingPolicies[i]);
if (!scalingError.IsSuccess())
{
return scalingError;
}
scalingPolicies_.push_back(move(scalingDescription));
}
return err;
}
return ErrorCodeValue::InvalidArgument;
}
ErrorCode PartitionedServiceDescWrapper::InitializePartitionDescription(
::FABRIC_PARTITION_SCHEME partitionScheme,
void * partitionDescription,
ServiceTypeIdentifier const & typeIdentifier)
{
switch (partitionScheme)
{
case FABRIC_PARTITION_SCHEME_SINGLETON:
partitionDescription_.Scheme = PartitionSchemeDescription::Singleton;
partitionDescription_.PartitionCount = 1;
break;
case FABRIC_PARTITION_SCHEME_UNIFORM_INT64_RANGE:
{
if (partitionDescription == NULL)
{
Trace.WriteWarning(
FabricClientSource,
"Partition description cannot be NULL for service: type = {0} name = {1}",
typeIdentifier,
serviceName_);
return TraceNullArgumentAndGetErrorDetails(
wformatString("{0} {1}", GET_CM_RC(Invalid_Null_Pointer), "PartitionDescription"));
}
auto d = reinterpret_cast<FABRIC_UNIFORM_INT64_RANGE_PARTITION_SCHEME_DESCRIPTION*>(partitionDescription);
partitionDescription_.Scheme = PartitionSchemeDescription::UniformInt64Range;
partitionDescription_.PartitionCount = d->PartitionCount;
partitionDescription_.LowKey = d->LowKey;
partitionDescription_.HighKey = d->HighKey;
break;
}
case FABRIC_PARTITION_SCHEME_NAMED:
{
if (partitionDescription == NULL)
{
Trace.WriteWarning(
FabricClientSource,
"Partition description cannot be NULL for service: type = {0} name = {1}",
typeIdentifier,
serviceName_);
return TraceNullArgumentAndGetErrorDetails(
wformatString("{0} {1}", GET_CM_RC(Invalid_Null_Pointer), "PartitionDescription"));
}
auto d = reinterpret_cast<FABRIC_NAMED_PARTITION_SCHEME_DESCRIPTION*>(partitionDescription);
partitionDescription_.Scheme = PartitionSchemeDescription::Named;
partitionDescription_.PartitionCount = d->PartitionCount;
auto hr = StringUtility::FromLPCWSTRArray(d->PartitionCount, d->Names, partitionDescription_.PartitionNames);
if (FAILED(hr)) { return ErrorCode::FromHResult(hr); }
break;
}
default:
if (partitionDescription == NULL)
{
return TraceNullArgumentAndGetErrorDetails(
wformatString("{0} {1}", GET_CM_RC(Invalid_Null_Pointer), "PartitionDescription"));
}
else
{
return ErrorCode(ErrorCodeValue::InvalidArgument);
}
}
return ErrorCode(ErrorCodeValue::Success);
}
void PartitionedServiceDescWrapper::ToPublicApi(__in ScopedHeap &heap, __in FABRIC_SERVICE_DESCRIPTION & serviceDescription) const
{
ULONG const initDataSize = static_cast<const ULONG>(initializationData_.size());
auto initData = heap.AddArray<BYTE>(initDataSize);
for (size_t i = 0; i < initDataSize; i++)
{
initData[i] = initializationData_[i];
}
if (serviceKind_ == FABRIC_SERVICE_KIND_STATEFUL)
{
auto statefulDescription = heap.AddItem<FABRIC_STATEFUL_SERVICE_DESCRIPTION>();
serviceDescription.Kind = FABRIC_SERVICE_DESCRIPTION_KIND_STATEFUL;
serviceDescription.Value = statefulDescription.GetRawPointer();
statefulDescription->ApplicationName = heap.AddString(applicationName_);
statefulDescription->ServiceName = heap.AddString(serviceName_);
statefulDescription->ServiceTypeName = heap.AddString(serviceTypeName_);
statefulDescription->InitializationData = initData.GetRawArray();
statefulDescription->InitializationDataSize = initDataSize;
statefulDescription->TargetReplicaSetSize = targetReplicaSetSize_;
statefulDescription->MinReplicaSetSize = minReplicaSetSize_;
statefulDescription->PlacementConstraints = heap.AddString(placementConstraints_);
size_t correlationCount = correlations_.size();
statefulDescription->CorrelationCount = static_cast<ULONG>(correlationCount);
if (correlationCount > 0)
{
auto correlations = heap.AddArray<FABRIC_SERVICE_CORRELATION_DESCRIPTION>(correlationCount);
statefulDescription->Correlations = correlations.GetRawArray();
for (size_t correlationIndex = 0; correlationIndex < correlationCount; ++correlationIndex)
{
correlations[correlationIndex].ServiceName = heap.AddString(correlations_[correlationIndex].ServiceName);
correlations[correlationIndex].Scheme = correlations_[correlationIndex].Scheme;
}
}
else
{
statefulDescription->Correlations = nullptr;
}
statefulDescription->HasPersistedState = hasPersistedState_;
if (partitionDescription_.Scheme == PartitionSchemeDescription::UniformInt64Range)
{
statefulDescription->PartitionScheme = FABRIC_PARTITION_SCHEME_UNIFORM_INT64_RANGE;
auto partitionDescription = heap.AddItem<FABRIC_UNIFORM_INT64_RANGE_PARTITION_SCHEME_DESCRIPTION>();
partitionDescription->PartitionCount = partitionDescription_.PartitionCount;
partitionDescription->LowKey = partitionDescription_.LowKey;
partitionDescription->HighKey = partitionDescription_.HighKey;
partitionDescription->Reserved = NULL;
statefulDescription->PartitionSchemeDescription = partitionDescription.GetRawPointer();
}
else if (partitionDescription_.Scheme == PartitionSchemeDescription::Named)
{
auto partitionDescription = heap.AddItem<FABRIC_NAMED_PARTITION_SCHEME_DESCRIPTION>();
statefulDescription->PartitionScheme = FABRIC_PARTITION_SCHEME_NAMED;
partitionDescription->PartitionCount = partitionDescription_.PartitionCount;
auto pNames = heap.AddArray<LPCWSTR>(partitionDescription_.PartitionCount);
for (size_t i = 0; i < static_cast<size_t>(partitionDescription_.PartitionCount); ++i)
{
pNames[i] = heap.AddString(partitionDescription_.PartitionNames[i]);
}
partitionDescription->Names = pNames.GetRawArray();
partitionDescription->Reserved = NULL;
statefulDescription->PartitionSchemeDescription = partitionDescription.GetRawPointer();
}
else
{
statefulDescription->PartitionScheme = FABRIC_PARTITION_SCHEME_SINGLETON;
statefulDescription->PartitionSchemeDescription = NULL;
}
size_t metricCount = metrics_.size();
statefulDescription->MetricCount = static_cast<ULONG>(metricCount);
if (metricCount > 0)
{
auto metrics = heap.AddArray<FABRIC_SERVICE_LOAD_METRIC_DESCRIPTION>(metricCount);
statefulDescription->Metrics = metrics.GetRawArray();
for (size_t metricIndex = 0; metricIndex < metricCount; ++metricIndex)
{
metrics[metricIndex].Name = heap.AddString(metrics_[metricIndex].Name);
metrics[metricIndex].Weight = metrics_[metricIndex].Weight;
metrics[metricIndex].PrimaryDefaultLoad = metrics_[metricIndex].PrimaryDefaultLoad;
metrics[metricIndex].SecondaryDefaultLoad = metrics_[metricIndex].SecondaryDefaultLoad;
}
}
else
{
statefulDescription->Metrics = nullptr;
}
auto statefulDescriptionEx1 = heap.AddItem<FABRIC_STATEFUL_SERVICE_DESCRIPTION_EX1>();
statefulDescription->Reserved = statefulDescriptionEx1.GetRawPointer();
// policy description
size_t policyCount = placementPolicies_.size();
if (policyCount > 0)
{
auto policyDescription = heap.AddItem<FABRIC_SERVICE_PLACEMENT_POLICY_LIST>();
statefulDescriptionEx1->PolicyList = policyDescription.GetRawPointer();
policyDescription->PolicyCount = static_cast<ULONG>(policyCount);
auto policies = heap.AddArray<FABRIC_SERVICE_PLACEMENT_POLICY_DESCRIPTION>(policyCount);
policyDescription->Policies = policies.GetRawArray();
for (size_t policyIndex = 0; policyIndex < policyCount; ++policyIndex)
{
placementPolicies_[policyIndex].ToPublicApi(heap, policies[policyIndex]);
}
}
else
{
statefulDescriptionEx1->PolicyList = nullptr;
}
auto failoverSettings = heap.AddItem<FABRIC_STATEFUL_SERVICE_FAILOVER_SETTINGS>();
auto failoverSettingsEx1 = heap.AddItem<FABRIC_STATEFUL_SERVICE_FAILOVER_SETTINGS_EX1>();
failoverSettings->Reserved = failoverSettingsEx1.GetRawPointer();
statefulDescriptionEx1->FailoverSettings = failoverSettings.GetRawPointer();
failoverSettings->Flags |= FABRIC_STATEFUL_SERVICE_SETTINGS_REPLICA_RESTART_WAIT_DURATION;
failoverSettings->ReplicaRestartWaitDurationSeconds = replicaRestartWaitDurationSeconds_;
failoverSettings->Flags |= FABRIC_STATEFUL_SERVICE_SETTINGS_QUORUM_LOSS_WAIT_DURATION;
failoverSettings->QuorumLossWaitDurationSeconds = quorumLossWaitDurationSeconds_;
failoverSettings->Flags |= FABRIC_STATEFUL_SERVICE_SETTINGS_STANDBY_REPLICA_KEEP_DURATION;
failoverSettingsEx1->StandByReplicaKeepDurationSeconds = standByReplicaKeepDurationSeconds_;
auto statefulDescriptionEx2 = heap.AddItem<FABRIC_STATEFUL_SERVICE_DESCRIPTION_EX2>();
statefulDescriptionEx1->Reserved = statefulDescriptionEx2.GetRawPointer();
statefulDescriptionEx2->IsDefaultMoveCostSpecified = isDefaultMoveCostSpecified_;
statefulDescriptionEx2->DefaultMoveCost = defaultMoveCost_;
auto statefulDescriptionEx3 = heap.AddItem<FABRIC_STATEFUL_SERVICE_DESCRIPTION_EX3>();
statefulDescriptionEx2->Reserved = statefulDescriptionEx3.GetRawPointer();
statefulDescriptionEx3->ServicePackageActivationMode = ServicePackageActivationMode::ToPublicApi(servicePackageActivationMode_);
statefulDescriptionEx3->ServiceDnsName = heap.AddString(serviceDnsName_);
auto statefulDescriptionEx4 = heap.AddItem<FABRIC_STATEFUL_SERVICE_DESCRIPTION_EX4>();
statefulDescriptionEx3->Reserved = statefulDescriptionEx4.GetRawPointer();
size_t scalingPolicyCount = scalingPolicies_.size();
if (scalingPolicyCount > 0)
{
statefulDescriptionEx4->ScalingPolicyCount = static_cast<ULONG>(scalingPolicyCount);
auto spArray = heap.AddArray<FABRIC_SERVICE_SCALING_POLICY>(scalingPolicyCount);
statefulDescriptionEx4->ServiceScalingPolicies = spArray.GetRawArray();
for (size_t spIndex = 0; spIndex < scalingPolicyCount; ++spIndex)
{
scalingPolicies_[spIndex].ToPublicApi(heap, spArray[spIndex]);
}
}
else
{
statefulDescriptionEx4->ScalingPolicyCount = 0;
statefulDescriptionEx4->ServiceScalingPolicies = nullptr;
}
}
else
{
auto statelessDescription = heap.AddItem<FABRIC_STATELESS_SERVICE_DESCRIPTION>();
serviceDescription.Kind = FABRIC_SERVICE_DESCRIPTION_KIND_STATELESS;
serviceDescription.Value = statelessDescription.GetRawPointer();
statelessDescription->ApplicationName = heap.AddString(applicationName_);
statelessDescription->ServiceName = heap.AddString(serviceName_);
statelessDescription->ServiceTypeName = heap.AddString(serviceTypeName_);
statelessDescription->InitializationData = initData.GetRawArray();
statelessDescription->InitializationDataSize = initDataSize;
statelessDescription->InstanceCount = instanceCount_;
statelessDescription->PlacementConstraints = heap.AddString(placementConstraints_);
statelessDescription->Reserved = NULL;
size_t correlationCount = correlations_.size();
statelessDescription->CorrelationCount = static_cast<ULONG>(correlationCount);
if (correlationCount > 0)
{
auto correlations = heap.AddArray<FABRIC_SERVICE_CORRELATION_DESCRIPTION>(correlationCount);
statelessDescription->Correlations = correlations.GetRawArray();
for (size_t correlationIndex = 0; correlationIndex < correlationCount; ++correlationIndex)
{
correlations[correlationIndex].ServiceName = heap.AddString(correlations_[correlationIndex].ServiceName);
correlations[correlationIndex].Scheme = correlations_[correlationIndex].Scheme;
}
}
else
{
statelessDescription->Correlations = nullptr;
}
if (partitionDescription_.Scheme == PartitionSchemeDescription::UniformInt64Range)
{
statelessDescription->PartitionScheme = FABRIC_PARTITION_SCHEME_UNIFORM_INT64_RANGE;
auto partitionDescription = heap.AddItem<FABRIC_UNIFORM_INT64_RANGE_PARTITION_SCHEME_DESCRIPTION>();
partitionDescription->PartitionCount = partitionDescription_.PartitionCount;
partitionDescription->LowKey = partitionDescription_.LowKey;
partitionDescription->HighKey = partitionDescription_.HighKey;
statelessDescription->PartitionSchemeDescription = partitionDescription.GetRawPointer();
}
else if (partitionDescription_.Scheme == PartitionSchemeDescription::Named)
{
statelessDescription->PartitionScheme = FABRIC_PARTITION_SCHEME_NAMED;
auto partitionDescription = heap.AddItem<FABRIC_NAMED_PARTITION_SCHEME_DESCRIPTION>();
partitionDescription->PartitionCount = partitionDescription_.PartitionCount;
auto pNames = heap.AddArray<LPCWSTR>(partitionDescription_.PartitionCount);
for (size_t i = 0; i < static_cast<size_t>(partitionDescription_.PartitionCount); ++i)
{
pNames[i] = heap.AddString(partitionDescription_.PartitionNames[i]);
}
partitionDescription->Names = pNames.GetRawArray();
statelessDescription->PartitionSchemeDescription = partitionDescription.GetRawPointer();
}
else
{
statelessDescription->PartitionScheme = FABRIC_PARTITION_SCHEME_SINGLETON;
statelessDescription->PartitionSchemeDescription = NULL;
}
size_t metricCount = metrics_.size();
statelessDescription->MetricCount = static_cast<ULONG>(metricCount);
if (metricCount > 0)
{
auto metrics = heap.AddArray<FABRIC_SERVICE_LOAD_METRIC_DESCRIPTION>(metricCount);
statelessDescription->Metrics = metrics.GetRawArray();
for (size_t metricIndex = 0; metricIndex < metricCount; ++metricIndex)
{
metrics[metricIndex].Name = heap.AddString(metrics_[metricIndex].Name);
metrics[metricIndex].Weight = metrics_[metricIndex].Weight;
metrics[metricIndex].PrimaryDefaultLoad = metrics_[metricIndex].PrimaryDefaultLoad;
metrics[metricIndex].SecondaryDefaultLoad = metrics_[metricIndex].SecondaryDefaultLoad;
}
}
else
{
statelessDescription->Metrics = nullptr;
}
auto statelessDescriptionEx1 = heap.AddItem<FABRIC_STATELESS_SERVICE_DESCRIPTION_EX1>();
statelessDescription->Reserved = statelessDescriptionEx1.GetRawPointer();
// policy description
size_t policyCount = placementPolicies_.size();
if (policyCount > 0)
{
auto policyDescription = heap.AddItem<FABRIC_SERVICE_PLACEMENT_POLICY_LIST>();
statelessDescriptionEx1->PolicyList = policyDescription.GetRawPointer();
policyDescription->PolicyCount = static_cast<ULONG>(policyCount);
auto policies = heap.AddArray<FABRIC_SERVICE_PLACEMENT_POLICY_DESCRIPTION>(policyCount);
policyDescription->Policies = policies.GetRawArray();
for (size_t policyIndex = 0; policyIndex < policyCount; ++policyIndex)
{
placementPolicies_[policyIndex].ToPublicApi(heap, policies[policyIndex]);
}
}
else
{
statelessDescriptionEx1->PolicyList = nullptr;
}
auto statelessDescriptionEx2 = heap.AddItem<FABRIC_STATELESS_SERVICE_DESCRIPTION_EX2>();
statelessDescriptionEx1->Reserved = statelessDescriptionEx2.GetRawPointer();
statelessDescriptionEx2->IsDefaultMoveCostSpecified = isDefaultMoveCostSpecified_;
statelessDescriptionEx2->DefaultMoveCost = defaultMoveCost_;
auto statelessDescriptionEx3 = heap.AddItem<FABRIC_STATELESS_SERVICE_DESCRIPTION_EX3>();
statelessDescriptionEx2->Reserved = statelessDescriptionEx3.GetRawPointer();
statelessDescriptionEx3->ServicePackageActivationMode = ServicePackageActivationMode::ToPublicApi(servicePackageActivationMode_);
statelessDescriptionEx3->ServiceDnsName = heap.AddString(serviceDnsName_);
auto statelessDescriptionEx4 = heap.AddItem<FABRIC_STATELESS_SERVICE_DESCRIPTION_EX4>();
statelessDescriptionEx3->Reserved = statelessDescriptionEx4.GetRawPointer();
size_t scalingPolicyCount = scalingPolicies_.size();
if (scalingPolicyCount > 0)
{
statelessDescriptionEx4->ScalingPolicyCount = static_cast<ULONG>(scalingPolicyCount);
auto spArray = heap.AddArray<FABRIC_SERVICE_SCALING_POLICY> (scalingPolicyCount);
statelessDescriptionEx4->ServiceScalingPolicies = spArray.GetRawArray();
for (size_t spIndex = 0; spIndex < scalingPolicyCount; ++spIndex)
{
scalingPolicies_[spIndex].ToPublicApi(heap, spArray[spIndex]);
}
}
else
{
statelessDescriptionEx4->ScalingPolicyCount = 0;
statelessDescriptionEx4->ServiceScalingPolicies = nullptr;
}
}
}
ErrorCode PartitionedServiceDescWrapper::TraceAndGetErrorDetails(ErrorCodeValue::Enum errorCode, std::wstring && msg)
{
Trace.WriteWarning(FabricClientSource, "{0}", msg);
return ErrorCode(errorCode, move(msg));
}
ErrorCode PartitionedServiceDescWrapper::TraceNullArgumentAndGetErrorDetails(std::wstring && msg)
{
return TraceAndGetErrorDetails(ErrorCode::FromHResult(E_POINTER).ReadValue(), move(msg));
} | 15,372 |
645 | <gh_stars>100-1000
// Copyright 2011 Viewfinder. All rights reserved.
// Author: <NAME>.
#ifndef VIEWFINDER_TIMER_H
#define VIEWFINDER_TIMER_H
#include <algorithm>
#include "Logging.h"
#include "WallTime.h"
using std::min;
class WallTimer {
public:
WallTimer()
: total_time_(0),
start_time_(WallTime_Now()) {
}
void Reset() {
total_time_ = 0;
start_time_ = 0;
}
void Start() {
start_time_ = WallTime_Now();
}
void Restart() {
total_time_ = 0;
Start();
}
void Stop() {
if (start_time_ > 0) {
total_time_ += WallTime_Now() - start_time_;
start_time_ = 0;
}
}
WallTime Get() const {
WallTime r = total_time_;
if (start_time_ > 0) {
r += WallTime_Now() - start_time_;
}
return r;
}
double Milliseconds() const {
return 1000 * Get();
}
private:
WallTime total_time_;
WallTime start_time_;
};
class ScopedTimer {
public:
ScopedTimer(const string& n)
: name_(n) {
timer_.Start();
}
~ScopedTimer() {
LOG("%s: %0.3f sec", name_.c_str(), timer_.Get());
}
private:
const string name_;
WallTimer timer_;
};
class AverageTimer {
public:
AverageTimer(int n)
: size_(n),
count_(0),
average_(0.0) {
}
void SetSize(int n) {
size_ = n;
int new_count = min(count_, size_);
if (new_count > 0) {
average_ = (average_ * count_) / new_count;
}
count_ = new_count;
}
void Add(WallTime value) {
if (count_ < size_) {
++count_;
}
average_ = (average_ * (count_ - 1) + value) / count_;
}
WallTime Get() const {
return average_;
}
double Milliseconds() const {
return 1000 * Get();
}
private:
int size_;
int count_;
WallTime average_;
};
#endif // VIEWFINDER_TIMER_H
| 785 |
476 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.prestosql.jdbc;
import com.google.common.collect.ImmutableMap;
import io.airlift.log.Logging;
import io.airlift.security.pem.PemReader;
import io.jsonwebtoken.Jwts;
import io.jsonwebtoken.SignatureAlgorithm;
import io.prestosql.plugin.tpch.TpchPlugin;
import io.prestosql.server.testing.TestingPrestoServer;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import java.io.File;
import java.net.URL;
import java.security.PrivateKey;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Base64;
import java.util.Map;
import java.util.Optional;
import java.util.Properties;
import static com.google.common.io.Files.asCharSource;
import static com.google.common.io.Resources.getResource;
import static io.jsonwebtoken.JwsHeader.KEY_ID;
import static io.prestosql.jdbc.TestPrestoDriver.closeQuietly;
import static io.prestosql.jdbc.TestPrestoDriver.waitForNodeRefresh;
import static java.lang.String.format;
import static java.nio.charset.StandardCharsets.US_ASCII;
import static java.util.Base64.getMimeDecoder;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertNotNull;
import static org.testng.Assert.assertTrue;
public class TestPrestoDriverAuth
{
private static final String TEST_CATALOG = "test_catalog";
private TestingPrestoServer server;
private byte[] defaultKey;
private byte[] hmac222;
private PrivateKey privateKey33;
@BeforeClass
public void setup()
throws Exception
{
Logging.initialize();
URL resource = getClass().getClassLoader().getResource("33.privateKey");
assertNotNull(resource, "key directory not found");
File keyDir = new File(resource.getFile()).getAbsoluteFile().getParentFile();
defaultKey = getMimeDecoder().decode(asCharSource(new File(keyDir, "default-key.key"), US_ASCII).read().getBytes(US_ASCII));
hmac222 = getMimeDecoder().decode(asCharSource(new File(keyDir, "222.key"), US_ASCII).read().getBytes(US_ASCII));
privateKey33 = PemReader.loadPrivateKey(new File(keyDir, "33.privateKey"), Optional.empty());
server = new TestingPrestoServer(ImmutableMap.<String, String>builder()
.put("http-server.authentication.type", "JWT")
.put("http.authentication.jwt.key-file", new File(keyDir, "${KID}.key").toString())
.put("http-server.https.enabled", "true")
.put("http-server.https.keystore.path", getResource("localhost.keystore").getPath())
.put("http-server.https.keystore.key", "changeit")
.build());
server.installPlugin(new TpchPlugin());
server.createCatalog(TEST_CATALOG, "tpch");
waitForNodeRefresh(server);
}
@AfterClass(alwaysRun = true)
public void teardown()
{
closeQuietly(server);
}
@Test
public void testSuccessDefaultKey()
throws Exception
{
String accessToken = Jwts.builder()
.setSubject("test")
.signWith(SignatureAlgorithm.HS512, defaultKey)
.compact();
try (Connection connection = createConnection(ImmutableMap.of("accessToken", accessToken))) {
try (Statement statement = connection.createStatement()) {
assertTrue(statement.execute("SELECT 123"));
ResultSet rs = statement.getResultSet();
assertTrue(rs.next());
assertEquals(rs.getLong(1), 123);
assertFalse(rs.next());
}
}
}
@Test
public void testSuccessHmac()
throws Exception
{
String accessToken = Jwts.builder()
.setSubject("test")
.setHeaderParam(KEY_ID, "222")
.signWith(SignatureAlgorithm.HS512, hmac222)
.compact();
try (Connection connection = createConnection(ImmutableMap.of("accessToken", accessToken))) {
try (Statement statement = connection.createStatement()) {
assertTrue(statement.execute("SELECT 123"));
ResultSet rs = statement.getResultSet();
assertTrue(rs.next());
assertEquals(rs.getLong(1), 123);
assertFalse(rs.next());
}
}
}
@Test
public void testSuccessPublicKey()
throws Exception
{
String accessToken = Jwts.builder()
.setSubject("test")
.setHeaderParam(KEY_ID, "33")
.signWith(SignatureAlgorithm.RS256, privateKey33)
.compact();
try (Connection connection = createConnection(ImmutableMap.of("accessToken", accessToken))) {
try (Statement statement = connection.createStatement()) {
assertTrue(statement.execute("SELECT 123"));
ResultSet rs = statement.getResultSet();
assertTrue(rs.next());
assertEquals(rs.getLong(1), 123);
assertFalse(rs.next());
}
}
}
@Test(expectedExceptions = SQLException.class, expectedExceptionsMessageRegExp = "Authentication failed: Unauthorized")
public void testFailedNoToken()
throws Exception
{
try (Connection connection = createConnection(ImmutableMap.of())) {
try (Statement statement = connection.createStatement()) {
statement.execute("SELECT 123");
}
}
}
@Test(expectedExceptions = SQLException.class, expectedExceptionsMessageRegExp = "Authentication failed: Unauthorized")
public void testFailedUnsigned()
throws Exception
{
String accessToken = Jwts.builder()
.setSubject("test")
.compact();
try (Connection connection = createConnection(ImmutableMap.of("accessToken", accessToken))) {
try (Statement statement = connection.createStatement()) {
statement.execute("SELECT 123");
}
}
}
@Test(expectedExceptions = SQLException.class, expectedExceptionsMessageRegExp = "Authentication failed: Unauthorized")
public void testFailedBadHmacSignature()
throws Exception
{
String accessToken = Jwts.builder()
.setSubject("test")
.signWith(SignatureAlgorithm.HS512, Base64.getEncoder().encodeToString("bad-key".getBytes(US_ASCII)))
.compact();
try (Connection connection = createConnection(ImmutableMap.of("accessToken", accessToken))) {
try (Statement statement = connection.createStatement()) {
statement.execute("SELECT 123");
}
}
}
@Test(expectedExceptions = SQLException.class, expectedExceptionsMessageRegExp = "Authentication failed: Unauthorized")
public void testFailedWrongPublicKey()
throws Exception
{
String accessToken = Jwts.builder()
.setSubject("test")
.setHeaderParam(KEY_ID, "42")
.signWith(SignatureAlgorithm.RS256, privateKey33)
.compact();
try (Connection connection = createConnection(ImmutableMap.of("accessToken", accessToken))) {
try (Statement statement = connection.createStatement()) {
statement.execute("SELECT 123");
}
}
}
@Test(expectedExceptions = SQLException.class, expectedExceptionsMessageRegExp = "Authentication failed: Unauthorized")
public void testFailedUnknownPublicKey()
throws Exception
{
String accessToken = Jwts.builder()
.setSubject("test")
.setHeaderParam(KEY_ID, "unknown")
.signWith(SignatureAlgorithm.RS256, privateKey33)
.compact();
try (Connection connection = createConnection(ImmutableMap.of("accessToken", accessToken))) {
try (Statement statement = connection.createStatement()) {
statement.execute("SELECT 123");
}
}
}
private Connection createConnection(Map<String, String> additionalProperties)
throws SQLException
{
String url = format("jdbc:presto://localhost:%s", server.getHttpsAddress().getPort());
Properties properties = new Properties();
properties.setProperty("user", "test");
properties.setProperty("SSL", "true");
properties.setProperty("SSLTrustStorePath", getResource("localhost.truststore").getPath());
properties.setProperty("SSLTrustStorePassword", "<PASSWORD>");
properties.putAll(additionalProperties);
return DriverManager.getConnection(url, properties);
}
}
| 3,827 |
717 | <reponame>vvirag/openpose
#ifndef OPENPOSE_NET_NMS_BASE_HPP
#define OPENPOSE_NET_NMS_BASE_HPP
#include <openpose/core/common.hpp>
namespace op
{
template <typename T>
OP_API void nmsCpu(T* targetPtr, int* kernelPtr, const T* const sourcePtr, const T threshold,
const std::array<int, 4>& targetSize, const std::array<int, 4>& sourceSize);
template <typename T>
OP_API void nmsGpu(T* targetPtr, int* kernelPtr, const T* const sourcePtr, const T threshold,
const std::array<int, 4>& targetSize, const std::array<int, 4>& sourceSize);
template <typename T>
OP_API void nmsOcl(T* targetPtr, int* kernelPtr, const T* const sourcePtr, const T threshold,
const std::array<int, 4>& targetSize, const std::array<int, 4>& sourceSize,
const int gpuID = 0);
}
#endif // OPENPOSE_NET_NMS_BASE_HPP
| 401 |
412 | <reponame>ParkerJX/datart
package datart.server.base.params;
import lombok.Data;
@Data
public class BaseUpdateParam {
private String id;
}
| 55 |
2,542 | <reponame>AnthonyM/service-fabric
// ------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License (MIT). See License.txt in the repo root for license information.
// ------------------------------------------------------------
#pragma once
namespace Hosting2
{
class RuntimeRegistration
{
DENY_COPY(RuntimeRegistration)
public:
RuntimeRegistration(FabricRuntimeContext const & runtimeContext);
__declspec(property(get=get_RuntimeContext)) FabricRuntimeContext const& RuntimeContext;
inline FabricRuntimeContext const& get_RuntimeContext() const {return runtimeContext_;};
void WriteTo(Common::TextWriter& w, Common::FormatOptions const&) const;
private:
FabricRuntimeContext runtimeContext_;
};
}
| 243 |
558 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
import functools
import os
import posixpath
import random
import socket
import sys
import time
from urllib.parse import urlparse
import pytest
import tensorflow as tf
import tensorflow_io as tfio # pylint: disable=unused-import
# `ROOT_PREFIX` shouldn't be called directly in tests.
ROOT_PREFIX = f"tf-io-root-{int(time.time())}/"
# This is the number of attributes each filesystem should return in `*_fs`.
NUM_ATR_FS = 6
S3_URI = "s3"
AZ_URI = "az"
AZ_DSN_URI = "az_dsn"
HTTPS_URI = "https"
GCS_URI = "gs"
HDFS_URI = "hdfs"
def mock_patchs(monkeypatch, patchs):
if isinstance(patchs, dict):
for key, value in patchs.items():
if value is None:
monkeypatch.delenv(key, raising=False)
else:
monkeypatch.setenv(key, value)
elif callable(patchs):
patchs(monkeypatch)
else:
pass
@pytest.fixture(autouse=True)
def reload_filesystem():
# We need to find a way to reload `tensorflow` or filesystems since
# the envs are read in the first time the filesystems are called.
pass
# Helper to check if we should skip tests for an `uri`.
def should_skip(uri, check_only=True):
message = None
if uri == S3_URI and sys.platform in ("win32", "darwin"):
message = "TODO: `s3` emulator not setup properly on macOS/Windows yet"
elif uri in (AZ_URI, AZ_DSN_URI) and sys.platform == "win32":
message = "TODO: `az` does not work on Windows yet"
elif uri == GCS_URI and sys.platform in ("win32", "darwin"):
message = "TODO: `gs` does not work on Windows yet"
elif uri == HDFS_URI and sys.platform in ("win32", "darwin"):
message = "TODO: `hdfs` does not work properly on macOS/Windows yet"
if message is not None:
if check_only:
return True
else:
pytest.skip(message)
else:
return False
@pytest.fixture(scope="module")
def s3_fs():
if should_skip(S3_URI):
yield [None] * NUM_ATR_FS
return
import boto3
monkeypatch = pytest.MonkeyPatch()
bucket_name = os.environ.get("S3_TEST_BUCKET")
client = None
# This means we are running against emulator.
if bucket_name is None:
endpoint_url = "http://localhost:4566"
monkeypatch.setenv("AWS_REGION", "us-east-1")
monkeypatch.setenv("AWS_ACCESS_KEY_ID", "TEST")
monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "TEST")
monkeypatch.setenv("S3_ENDPOINT", endpoint_url)
bucket_name = f"tf-io-bucket-s3-{int(time.time())}"
client = boto3.client("s3", endpoint_url=endpoint_url)
client.create_bucket(Bucket=bucket_name)
else:
# TODO(vnvo2409): Implement for testing against production scenario
pass
client.put_object(Bucket=bucket_name, Key=ROOT_PREFIX, Body="")
def parse(path):
res = urlparse(path, scheme=S3_URI, allow_fragments=False)
return res.netloc, res.path[1:]
def path_to(*args):
return f"{S3_URI}://{bucket_name}/{posixpath.join(ROOT_PREFIX, *args)}"
def read(path):
bucket_name, key_name = parse(path)
response = client.get_object(Bucket=bucket_name, Key=key_name)
return response["Body"].read()
def write(path, body):
bucket_name, key_name = parse(path)
client.put_object(Bucket=bucket_name, Key=key_name, Body=body)
def mkdirs(path):
if path[-1] != "/":
path += "/"
write(path, b"")
yield path_to, read, write, mkdirs, posixpath.join, (client, bucket_name)
monkeypatch.undo()
@pytest.fixture(scope="module")
def az_fs():
if should_skip(AZ_URI):
yield [None] * NUM_ATR_FS
return
from azure.storage.blob import ContainerClient
monkeypatch = pytest.MonkeyPatch()
container_name = os.environ.get("AZ_TEST_CONTAINER")
account = None
client = None
# This means we are running against emulator.
if container_name is None:
monkeypatch.setenv("TF_AZURE_USE_DEV_STORAGE", "1")
container_name = f"tf-io-bucket-az-{int(time.time())}"
account = "devstoreaccount1"
conn_str = (
"DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;"
"AccountKey=<KEY>"
"/K1SZFPTOtr/KBHBeksoGMGw==;"
"BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;"
)
client = ContainerClient.from_connection_string(conn_str, container_name)
client.create_container()
else:
# TODO(vnvo2409): Implement for testing against production scenario
pass
client.upload_blob(ROOT_PREFIX, b"")
def parse(path):
res = urlparse(path, scheme=AZ_URI, allow_fragments=False)
return res.path.split("/", 2)[2]
def path_to(*args):
return f"{AZ_URI}://{account}/{container_name}/{posixpath.join(ROOT_PREFIX, *args)}"
def read(path):
key_name = parse(path)
return client.download_blob(key_name).content_as_bytes()
def write(path, body):
key_name = parse(path)
client.upload_blob(key_name, body)
def mkdirs(path):
if path[-1] == "/":
write(path, b"")
yield path_to, read, write, mkdirs, posixpath.join, (
client,
container_name,
account,
)
monkeypatch.undo()
@pytest.fixture(scope="module")
def az_dsn_fs(az_fs):
if should_skip(AZ_DSN_URI):
yield [None] * NUM_ATR_FS
return
_, read, write, mkdirs, join, fs_internal = az_fs
_, container_name, account = fs_internal
def path_to_dsn(*args):
return f"{AZ_URI}://{account}.blob.core.windows.net/{container_name}/{posixpath.join(ROOT_PREFIX, *args)}"
yield path_to_dsn, read, write, mkdirs, join, fs_internal
@pytest.fixture(scope="module")
def https_fs():
if should_skip(HTTPS_URI):
yield [None] * NUM_ATR_FS
return
def path_to(*_):
return f"{HTTPS_URI}://www.apache.org/licenses/LICENSE-2.0.txt"
def read(_):
pass
def write(*_):
pass
def mkdirs(_):
pass
yield path_to, read, write, mkdirs, posixpath.join, None
@pytest.fixture(scope="module")
def gcs_fs():
if should_skip(GCS_URI):
yield [None] * NUM_ATR_FS
return
import tensorflow_io_gcs_filesystem
from google.cloud import storage
monkeypatch = pytest.MonkeyPatch()
bucket_name = os.environ.get("GCS_TEST_BUCKET")
bucket = None
# This means we are running against emulator.
if bucket_name is None:
monkeypatch.setenv("STORAGE_EMULATOR_HOST", "http://localhost:9099")
monkeypatch.setenv("CLOUD_STORAGE_EMULATOR_ENDPOINT", "http://localhost:9099")
bucket_name = f"tf-io-bucket-gs-{int(time.time())}"
client = storage.Client.create_anonymous_client()
client.project = "test_project"
bucket = client.create_bucket(bucket_name)
else:
# TODO(vnvo2409): Implement for testing against production scenario
pass
def parse(path):
res = urlparse(path, scheme=GCS_URI, allow_fragments=False)
return res.path[1:]
def path_to(*args):
return f"{GCS_URI}://{bucket_name}/{posixpath.join(ROOT_PREFIX, *args)}"
def read(path):
key_name = parse(path)
blob = bucket.get_blob(key_name)
return blob.download_as_bytes()
def write(path, body):
key_name = parse(path)
blob = bucket.blob(key_name)
blob.upload_from_string(body)
def mkdirs(path):
if path[-1] != "/":
path += "/"
write(path, b"")
yield path_to, read, write, mkdirs, posixpath.join, None
monkeypatch.undo()
@pytest.fixture(scope="module")
def hdfs_fs():
if should_skip(HDFS_URI):
yield [None] * NUM_ATR_FS
return
from pyarrow.fs import HadoopFileSystem
monkeypatch = pytest.MonkeyPatch()
hdfs_host = os.environ.get("HDFS_HOST")
hdfs_port = int(os.environ.get("HDFS_PORT", 9000))
if hdfs_host is None:
hdfs_host = socket.gethostbyname(socket.gethostname())
hdfs = HadoopFileSystem(hdfs_host, hdfs_port)
def path_to(*args):
return (
f"{HDFS_URI}://{hdfs_host}:{hdfs_port}/{posixpath.join(ROOT_PREFIX, *args)}"
)
def read(path):
f = hdfs.open_input_stream(path)
return f.readall()
def write(path, body):
with hdfs.open_output_stream(path) as f:
f.write(body)
def mkdirs(path):
hdfs.create_dir(path, recursive=True)
yield path_to, read, write, mkdirs, posixpath.join, None
monkeypatch.undo()
@pytest.fixture
def fs(request, s3_fs, az_fs, az_dsn_fs, https_fs, gcs_fs, hdfs_fs):
path_to, read, write, mkdirs, join, internal = [None] * NUM_ATR_FS
test_fs_uri = request.param
real_uri = test_fs_uri
should_skip(test_fs_uri, check_only=False)
if test_fs_uri == S3_URI:
path_to, read, write, mkdirs, join, internal = s3_fs
elif test_fs_uri == AZ_URI:
path_to, read, write, mkdirs, join, internal = az_fs
elif test_fs_uri == AZ_DSN_URI:
real_uri = AZ_URI
path_to, read, write, mkdirs, join, internal = az_dsn_fs
elif test_fs_uri == HTTPS_URI:
path_to, read, write, mkdirs, join, internal = https_fs
elif test_fs_uri == GCS_URI:
path_to, read, write, mkdirs, join, internal = gcs_fs
elif test_fs_uri == HDFS_URI:
path_to, read, write, mkdirs, join, internal = hdfs_fs
path_to_rand = None
test_patchs = request.getfixturevalue("patchs")
if (test_fs_uri, test_patchs) in fs.path_to_rand_cache:
path_to_rand = fs.path_to_rand_cache[(test_fs_uri, test_patchs)]
else:
path_to_rand = functools.partial(path_to, str(random.getrandbits(32)))
mkdirs(path_to_rand(""))
fs.path_to_rand_cache[(test_fs_uri, test_patchs)] = path_to_rand
yield real_uri, path_to_rand, read, write, mkdirs, join, internal
fs.path_to_rand_cache = {}
@pytest.mark.parametrize(
"fs, patchs",
[
(S3_URI, None),
(AZ_URI, None),
(AZ_DSN_URI, None),
(GCS_URI, None),
(HDFS_URI, None),
],
indirect=["fs"],
)
def test_init(fs, patchs, monkeypatch):
_, path_to, _, _, _, _, _ = fs
mock_patchs(monkeypatch, patchs)
assert tf.io.gfile.exists(path_to("")) is True
@pytest.mark.parametrize(
"fs, patchs",
[
(S3_URI, None),
(AZ_URI, None),
(AZ_DSN_URI, None),
(GCS_URI, None),
(HDFS_URI, None),
],
indirect=["fs"],
)
def test_io_read_file(fs, patchs, monkeypatch):
_, path_to, _, write, _, _, _ = fs
mock_patchs(monkeypatch, patchs)
fname = path_to("test_io_read_file")
body = b"abcdefghijklmn"
write(fname, body)
assert tf.io.read_file(fname) == body
@pytest.mark.parametrize(
"fs, patchs",
[
(S3_URI, None),
(AZ_URI, None),
(AZ_DSN_URI, None),
(GCS_URI, None),
(HDFS_URI, None),
],
indirect=["fs"],
)
def test_io_write_file(fs, patchs, monkeypatch):
_, path_to, read, _, _, _, _ = fs
mock_patchs(monkeypatch, patchs)
fname = path_to("test_io_write_file")
assert tf.io.gfile.exists(fname) is False
body = b"abcdefghijklmn"
tf.io.write_file(fname, body)
assert read(fname) == body
def get_readable_body(uri):
if uri != HTTPS_URI:
num_lines = 10
base_body = b"abcdefghijklmn\n"
lines = [base_body] * num_lines
body = b"".join(lines)
return body
else:
local_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "test_http", "LICENSE-2.0.txt"
)
with open(local_path, "rb") as f:
return f.read()
@pytest.mark.parametrize(
"fs, patchs",
[
(
S3_URI,
# `use_multi_part_download` does not work with `seekable`.
lambda monkeypatch: monkeypatch.setattr(
tf.io.gfile.GFile, "seekable", lambda _: False
),
),
(AZ_URI, None),
(HTTPS_URI, None),
(GCS_URI, None),
(HDFS_URI, None),
],
indirect=["fs"],
)
def test_gfile_GFile_readable(fs, patchs, monkeypatch):
uri, path_to, _, write, _, _, _ = fs
mock_patchs(monkeypatch, patchs)
fname = path_to("test_gfile_GFile_readable")
body = get_readable_body(uri)
lines = body.splitlines(True)
write(fname, body)
# Simple
with tf.io.gfile.GFile(fname, "rb") as f:
file_read = f.read()
assert file_read == body
# Notfound
# TODO(vnvo2409): `az` should raise `tf.errors.NotFoundError`.
if uri != AZ_URI:
with pytest.raises(tf.errors.NotFoundError):
fname_not_found = fname + "_not_found"
with tf.io.gfile.GFile(fname_not_found, "rb") as f:
_ = f.read()
# Read length
with tf.io.gfile.GFile(fname, "rb") as f:
read_length = 10
file_read = f.read(read_length)
assert file_read == body[:read_length]
file_read = f.read()
assert file_read == body[read_length:]
# Readline
with tf.io.gfile.GFile(fname, "rb") as f:
line_count = 0
while True:
line = f.readline()
if not line:
break
line_count += 1
assert line == lines[line_count - 1]
assert line_count == len(lines)
# Readlines
with tf.io.gfile.GFile(fname, "rb") as f:
gfile_lines = f.readlines()
assert gfile_lines == lines
# Seek/Tell
with tf.io.gfile.GFile(fname, "rb") as f:
assert f.size() == len(body)
if f.seekable():
seek_size = 15
read_length = 10
f.seek(seek_size)
file_read = f.read(read_length)
assert f.tell() == seek_size + read_length
assert file_read == body[seek_size : seek_size + read_length]
@pytest.mark.parametrize(
"fs, patchs",
[
(S3_URI, None),
(AZ_URI, None),
(HTTPS_URI, None),
(GCS_URI, None),
(HDFS_URI, None),
],
indirect=["fs"],
)
def test_dataset_from_remote_filename(fs, patchs, monkeypatch):
uri, path_to, _, write, _, _, _ = fs
mock_patchs(monkeypatch, patchs)
fname = path_to("test_dataset_from_remote_filename")
body = get_readable_body(uri)
lines = body.splitlines(True)
write(fname, body)
# TextLineDataset
line_dataset = tf.data.TextLineDataset(fname)
count_line_dataset = 0
for line in line_dataset:
assert line == lines[count_line_dataset].rstrip()
count_line_dataset += 1
assert count_line_dataset == len(lines)
@pytest.mark.parametrize(
"fs, patchs",
[(S3_URI, None), (AZ_URI, None), (GCS_URI, None), (HDFS_URI, None)],
indirect=["fs"],
)
def test_gfile_GFile_writable(fs, patchs, monkeypatch):
uri, path_to, read, _, _, _, _ = fs
mock_patchs(monkeypatch, patchs)
fname = path_to("test_gfile_GFile_writable")
assert tf.io.gfile.exists(fname) is False
num_lines = 10
base_body = b"abcdefghijklmn\n"
body = base_body * num_lines
# Simple
with tf.io.gfile.GFile(fname, "wb") as f:
f.write(body)
f.flush()
assert read(fname) == body
# Append
# TODO(vnvo2409): implement `az` appendable file.
# TODO(vnvo2409): implement `hdfs` appendable file.
if uri != AZ_URI and uri != HDFS_URI:
with tf.io.gfile.GFile(fname, "ab") as f:
f.write(base_body)
f.flush()
assert read(fname) == body + base_body
@pytest.mark.parametrize(
"fs, patchs",
[(S3_URI, None), (AZ_URI, None), (GCS_URI, None), (HDFS_URI, None)],
indirect=["fs"],
)
def test_gfile_isdir(fs, patchs, monkeypatch):
_, path_to, _, write, mkdirs, join, _ = fs
mock_patchs(monkeypatch, patchs)
root_path = "test_gfile_isdir"
dname = path_to(root_path)
fname = join(dname, "fname")
mkdirs(dname)
write(fname, b"123456789")
assert tf.io.gfile.isdir(dname) is True
assert tf.io.gfile.isdir(fname) is False
@pytest.mark.parametrize(
"fs, patchs",
[(S3_URI, None), (AZ_URI, None), (GCS_URI, None), (HDFS_URI, None)],
indirect=["fs"],
)
def test_gfile_listdir(fs, patchs, monkeypatch):
uri, path_to, _, write, mkdirs, join, _ = fs
mock_patchs(monkeypatch, patchs)
root_path = "test_gfile_listdir"
dname = path_to(root_path)
mkdirs(dname)
num_childs = 5
childrens = [None] * num_childs
childrens[0] = join(dname, "subdir")
# TODO(vnvo2409): `gs` filesystem requires `/` at the end of directory's path.
# Consider if we could change the behavior for matching the other filesystems.
if uri == GCS_URI:
childrens[0] += "/"
mkdirs(childrens[0])
body = b"123456789"
for i in range(1, num_childs):
childrens[i] = join(dname, f"child_{i}")
write(childrens[i], body)
write(join(childrens[0], f"subchild_{i}"), body)
entries = tf.io.gfile.listdir(dname)
assert sorted(childrens) == sorted([join(dname, entry) for entry in entries])
@pytest.mark.parametrize(
"fs, patchs",
[(S3_URI, None), (AZ_URI, None), (GCS_URI, None), (HDFS_URI, None)],
indirect=["fs"],
)
def test_gfile_makedirs(fs, patchs, monkeypatch):
_, path_to, _, write, _, join, _ = fs
mock_patchs(monkeypatch, patchs)
root_path = "test_gfile_makedirs/"
dname = path_to(root_path)
subdname = join(dname, "subdir_1")
assert tf.io.gfile.exists(dname) is False
assert tf.io.gfile.exists(subdname) is False
tf.io.gfile.mkdir(subdname)
write(join(subdname, "fname"), b"123456789")
assert tf.io.gfile.isdir(subdname) is True
@pytest.mark.parametrize(
"fs, patchs",
[(S3_URI, None), (AZ_URI, None), (GCS_URI, None), (HDFS_URI, None)],
indirect=["fs"],
)
def test_gfile_remove(fs, patchs, monkeypatch):
_, path_to, read, write, _, _, _ = fs
mock_patchs(monkeypatch, patchs)
fname = path_to("test_gfile_remove")
body = b"123456789"
write(fname, body)
tf.io.gfile.remove(fname)
assert tf.io.gfile.exists(fname) is False
with pytest.raises(Exception):
read(fname)
@pytest.mark.parametrize(
"fs, patchs",
[(S3_URI, None), (AZ_URI, None), (GCS_URI, None), (HDFS_URI, None)],
indirect=["fs"],
)
def test_gfile_rmtree(fs, patchs, monkeypatch):
_, path_to, _, write, mkdirs, join, _ = fs
mock_patchs(monkeypatch, patchs)
num_entries = 3
trees = [path_to("test_gfile_rmtree")] * num_entries
mkdirs(trees[0])
for i in range(1, num_entries - 1):
trees[i] = join(trees[i - 1], f"subdir_{i}")
mkdirs(trees[i])
trees[-1] = join(trees[-2], "fname")
write(trees[-1], b"123456789")
tf.io.gfile.rmtree(trees[0])
assert [tf.io.gfile.exists(entry) for entry in trees] == [False] * num_entries
# TODO(vnvo2409): `az` copy operations causes an infinite loop.
@pytest.mark.parametrize(
"fs, patchs", [(S3_URI, None), (GCS_URI, None), (HDFS_URI, None)], indirect=["fs"]
)
def test_gfile_copy(fs, patchs, monkeypatch):
_, path_to, read, write, _, _, _ = fs
mock_patchs(monkeypatch, patchs)
src = path_to("test_gfile_copy_src")
dst = path_to("test_gfile_copy_dst")
body = b"123456789"
write(src, body)
tf.io.gfile.copy(src, dst)
assert read(dst) == body
new_body = body + body.capitalize()
write(src, new_body)
with pytest.raises(tf.errors.AlreadyExistsError):
tf.io.gfile.copy(src, dst)
assert read(dst) == body
tf.io.gfile.copy(src, dst, overwrite=True)
assert read(dst) == new_body
@pytest.mark.parametrize(
"fs, patchs",
[(S3_URI, None), (AZ_URI, None), (GCS_URI, None), (HDFS_URI, None)],
indirect=["fs"],
)
def test_gfile_rename(fs, patchs, monkeypatch):
_, path_to, read, write, _, _, _ = fs
mock_patchs(monkeypatch, patchs)
src = path_to("test_gfile_rename_src")
dst = path_to("test_gfile_rename_dst")
body = b"123456789"
write(src, body)
tf.io.gfile.rename(src, dst)
assert read(dst) == body
assert tf.io.gfile.exists(src) is False
new_body = body + body.capitalize()
write(src, new_body)
with pytest.raises(tf.errors.AlreadyExistsError):
tf.io.gfile.rename(src, dst)
assert read(dst) == body
tf.io.gfile.rename(src, dst, overwrite=True)
assert read(dst) == new_body
@pytest.mark.parametrize(
"fs, patchs",
[(S3_URI, None), (AZ_URI, None), (GCS_URI, None), (HDFS_URI, None)],
indirect=["fs"],
)
def test_gfile_glob(fs, patchs, monkeypatch):
_, path_to, _, write, _, join, _ = fs
mock_patchs(monkeypatch, patchs)
dname = path_to("test_gfile_glob/")
num_items = 3
childs = [None] * 3
for ext in ["txt", "md"]:
for i in range(num_items):
fname = join(dname, f"{i}.{ext}")
if ext == "txt":
childs[i] = fname
write(fname, b"123456789")
txt_files = tf.io.gfile.glob(join(dname, "*.txt"))
assert sorted(txt_files) == sorted(childs)
| 10,093 |
320 | #include "HideWin10VolumeOSD.h"
static HWND hWndInject = 0;
bool HideWin10VolumeOSD::Init()
{
if (hWndInject != 0)
{
return true;
}
hWndInject = FindOSDWindow(true);
if (hWndInject == 0)
{
return false;
}
HideOSD();
return true;
}
HWND HideWin10VolumeOSD::FindOSDWindow(bool bSilent)
{
HWND hwndRet = 0;
HWND hwndHost = 0;
int pairCount = 0;
// search for all windows with class 'NativeHWNDHost'
while ((hwndHost = FindWindowEx(0, hwndHost, L"NativeHWNDHost", L"")) != 0)
{
// if this window has a child with class 'DirectUIHWND' it might be the volume OSD
if (FindWindowEx(hwndHost, 0, L"DirectUIHWND", L"") != 0)
{
// if this is the only pair we are sure
if (pairCount == 0)
{
hwndRet = hwndHost;
}
pairCount++;
// if there are more pairs the criteria has failed...
if (pairCount > 1)
{
throw(L"Severe error: Multiple pairs found!");
return 0;
}
}
}
// if no window found yet, there is no OSD window at all
if (hwndRet == 0 && !bSilent)
{
throw(L"Severe error: OSD window not found!");
}
return hwndRet;
}
void HideWin10VolumeOSD::HideOSD()
{
ShowWindow(hWndInject, 6); // SW_MINIMIZE
}
void HideWin10VolumeOSD::ShowOSD()
{
ShowWindow(hWndInject, 9); // SW_RESTORE
// show window on the screen
keybd_event(VK_VOLUME_UP, 0, 0, 0);
keybd_event(VK_VOLUME_DOWN, 0, 0, 0);
}
| 729 |
530 | /*******************************************************************************
* Copyright 2014 <NAME>.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.bladecoder.engine.i18n;
import java.util.Locale;
import java.util.ResourceBundle;
import com.bladecoder.engine.util.EngineLogger;
public class I18N {
public static final char PREFIX = '@';
public static final String ENCODING = "UTF-8";
// public static final String ENCODING = "ISO-8859-1";
private ResourceBundle i18nWorld;
private ResourceBundle i18nChapter;
private Locale locale = Locale.getDefault();
private String i18nChapterFilename = null;
private String i18nWorldFilename = null;
public void loadChapter(String i18nChapterFilename) {
try {
i18nChapter = getBundle(i18nChapterFilename, false);
this.i18nChapterFilename = i18nChapterFilename;
} catch (Exception e) {
EngineLogger.error("ERROR LOADING BUNDLE: " + i18nChapterFilename);
}
}
public void loadWorld(String i18nWorldFilename) {
try {
i18nWorld = getBundle(i18nWorldFilename, true);
this.i18nWorldFilename = i18nWorldFilename;
} catch (Exception e) {
EngineLogger.error("ERROR LOADING BUNDLE: " + i18nWorldFilename);
}
}
public ResourceBundle getBundle(String filename, boolean clearCache) {
ResourceBundle rb = null;
try {
if (clearCache)
ResourceBundle.clearCache();
rb = ResourceBundle.getBundle(filename, locale, new I18NControl(ENCODING));
} catch (Exception e) {
EngineLogger.error("ERROR LOADING BUNDLE: " + filename);
}
return rb;
}
public void setLocale(Locale l) {
locale = l;
// RELOAD TRANSLATIONS
if (i18nWorld != null) {
loadWorld(i18nWorldFilename);
}
if (i18nChapter != null) {
loadChapter(i18nChapterFilename);
}
}
public String getString(String key) {
try {
return i18nChapter.getString(key);
} catch (Exception e) {
try {
return i18nWorld.getString(key);
} catch (Exception e2) {
EngineLogger.error("MISSING TRANSLATION KEY: " + key);
return key;
}
}
}
public Locale getCurrentLocale() {
return locale;
}
}
| 897 |
2,144 | <reponame>aashitk/pinot
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.pinot.controller.helix.core;
import java.io.IOException;
import java.net.URI;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Date;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TimeZone;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import javax.annotation.Nullable;
import org.apache.commons.lang3.StringUtils;
import org.apache.helix.AccessOption;
import org.apache.helix.HelixAdmin;
import org.apache.helix.ZNRecord;
import org.apache.helix.model.ExternalView;
import org.apache.helix.model.IdealState;
import org.apache.helix.store.zk.ZkHelixPropertyStore;
import org.apache.pinot.common.metadata.ZKMetadataProvider;
import org.apache.pinot.common.utils.SegmentName;
import org.apache.pinot.common.utils.URIUtils;
import org.apache.pinot.spi.config.table.SegmentsValidationAndRetentionConfig;
import org.apache.pinot.spi.config.table.TableConfig;
import org.apache.pinot.spi.filesystem.PinotFS;
import org.apache.pinot.spi.filesystem.PinotFSFactory;
import org.apache.pinot.spi.utils.TimeUtils;
import org.apache.pinot.spi.utils.builder.TableNameBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class SegmentDeletionManager {
private static final Logger LOGGER = LoggerFactory.getLogger(SegmentDeletionManager.class);
private static final long MAX_DELETION_DELAY_SECONDS = 300L; // Maximum of 5 minutes back-off to retry the deletion
private static final long DEFAULT_DELETION_DELAY_SECONDS = 2L;
// Retention date format will be written as suffix to deleted segments under `Deleted_Segments` folder. for example:
// `Deleted_Segments/myTable/myTable_mySegment_0__RETENTION_UNTIL__202202021200` to indicate that this segment
// file will be permanently deleted after Feb 2nd 2022 12PM.
private static final String DELETED_SEGMENTS = "Deleted_Segments";
private static final String RETENTION_UNTIL_SEPARATOR = "__RETENTION_UNTIL__";
private static final String RETENTION_DATE_FORMAT_STR = "yyyyMMddHHmm";
private static final SimpleDateFormat RETENTION_DATE_FORMAT;
static {
RETENTION_DATE_FORMAT = new SimpleDateFormat(RETENTION_DATE_FORMAT_STR);
RETENTION_DATE_FORMAT.setTimeZone(TimeZone.getTimeZone("UTC"));
}
private final ScheduledExecutorService _executorService;
private final String _dataDir;
private final String _helixClusterName;
private final HelixAdmin _helixAdmin;
private final ZkHelixPropertyStore<ZNRecord> _propertyStore;
private final long _defaultDeletedSegmentsRetentionMs;
public SegmentDeletionManager(String dataDir, HelixAdmin helixAdmin, String helixClusterName,
ZkHelixPropertyStore<ZNRecord> propertyStore, int deletedSegmentsRetentionInDays) {
_dataDir = dataDir;
_helixAdmin = helixAdmin;
_helixClusterName = helixClusterName;
_propertyStore = propertyStore;
_defaultDeletedSegmentsRetentionMs = TimeUnit.DAYS.toMillis(deletedSegmentsRetentionInDays);
_executorService = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() {
@Override
public Thread newThread(Runnable runnable) {
Thread thread = new Thread(runnable);
thread.setName("PinotHelixResourceManagerExecutorService");
return thread;
}
});
}
public void stop() {
_executorService.shutdownNow();
}
public void deleteSegments(String tableName, Collection<String> segmentIds) {
deleteSegments(tableName, segmentIds, (Long) null);
}
public void deleteSegments(String tableName, Collection<String> segmentIds,
@Nullable TableConfig tableConfig) {
deleteSegments(tableName, segmentIds, getRetentionMsFromTableConfig(tableConfig));
}
public void deleteSegments(String tableName, Collection<String> segmentIds,
@Nullable Long deletedSegmentsRetentionMs) {
deleteSegmentsWithDelay(tableName, segmentIds, deletedSegmentsRetentionMs, DEFAULT_DELETION_DELAY_SECONDS);
}
protected void deleteSegmentsWithDelay(String tableName, Collection<String> segmentIds,
Long deletedSegmentsRetentionMs, long deletionDelaySeconds) {
_executorService.schedule(new Runnable() {
@Override
public void run() {
deleteSegmentFromPropertyStoreAndLocal(tableName, segmentIds, deletedSegmentsRetentionMs,
deletionDelaySeconds);
}
}, deletionDelaySeconds, TimeUnit.SECONDS);
}
protected synchronized void deleteSegmentFromPropertyStoreAndLocal(String tableName, Collection<String> segmentIds,
Long deletedSegmentsRetentionMs, long deletionDelay) {
// Check if segment got removed from ExternalView or IdealState
ExternalView externalView = _helixAdmin.getResourceExternalView(_helixClusterName, tableName);
IdealState idealState = _helixAdmin.getResourceIdealState(_helixClusterName, tableName);
if (externalView == null || idealState == null) {
LOGGER.warn("Resource: {} is not set up in idealState or ExternalView, won't do anything", tableName);
return;
}
List<String> segmentsToDelete = new ArrayList<>(segmentIds.size()); // Has the segments that will be deleted
Set<String> segmentsToRetryLater = new HashSet<>(segmentIds.size()); // List of segments that we need to retry
try {
for (String segmentId : segmentIds) {
Map<String, String> segmentToInstancesMapFromExternalView = externalView.getStateMap(segmentId);
Map<String, String> segmentToInstancesMapFromIdealStates = idealState.getInstanceStateMap(segmentId);
if ((segmentToInstancesMapFromExternalView == null || segmentToInstancesMapFromExternalView.isEmpty()) && (
segmentToInstancesMapFromIdealStates == null || segmentToInstancesMapFromIdealStates.isEmpty())) {
segmentsToDelete.add(segmentId);
} else {
segmentsToRetryLater.add(segmentId);
}
}
} catch (Exception e) {
LOGGER.warn("Caught exception while checking helix states for table {} " + tableName, e);
segmentsToDelete.clear();
segmentsToDelete.addAll(segmentIds);
segmentsToRetryLater.clear();
}
if (!segmentsToDelete.isEmpty()) {
List<String> propStorePathList = new ArrayList<>(segmentsToDelete.size());
for (String segmentId : segmentsToDelete) {
String segmentPropertyStorePath = ZKMetadataProvider.constructPropertyStorePathForSegment(tableName, segmentId);
propStorePathList.add(segmentPropertyStorePath);
}
boolean[] deleteSuccessful = _propertyStore.remove(propStorePathList, AccessOption.PERSISTENT);
List<String> propStoreFailedSegs = new ArrayList<>(segmentsToDelete.size());
for (int i = 0; i < deleteSuccessful.length; i++) {
final String segmentId = segmentsToDelete.get(i);
if (!deleteSuccessful[i]) {
// remove API can fail because the prop store entry did not exist, so check first.
if (_propertyStore.exists(propStorePathList.get(i), AccessOption.PERSISTENT)) {
LOGGER.info("Could not delete {} from propertystore", propStorePathList.get(i));
segmentsToRetryLater.add(segmentId);
propStoreFailedSegs.add(segmentId);
}
}
}
segmentsToDelete.removeAll(propStoreFailedSegs);
removeSegmentsFromStore(tableName, segmentsToDelete, deletedSegmentsRetentionMs);
}
LOGGER.info("Deleted {} segments from table {}:{}", segmentsToDelete.size(), tableName,
segmentsToDelete.size() <= 5 ? segmentsToDelete : "");
if (!segmentsToRetryLater.isEmpty()) {
long effectiveDeletionDelay = Math.min(deletionDelay * 2, MAX_DELETION_DELAY_SECONDS);
LOGGER.info("Postponing deletion of {} segments from table {}", segmentsToRetryLater.size(), tableName);
deleteSegmentsWithDelay(tableName, segmentsToRetryLater, deletedSegmentsRetentionMs, effectiveDeletionDelay);
return;
}
}
public void removeSegmentsFromStore(String tableNameWithType, List<String> segments) {
removeSegmentsFromStore(tableNameWithType, segments, null);
}
public void removeSegmentsFromStore(String tableNameWithType, List<String> segments,
@Nullable Long deletedSegmentsRetentionMs) {
for (String segment : segments) {
removeSegmentFromStore(tableNameWithType, segment, deletedSegmentsRetentionMs);
}
}
protected void removeSegmentFromStore(String tableNameWithType, String segmentId,
@Nullable Long deletedSegmentsRetentionMs) {
// Ignore HLC segments as they are not stored in Pinot FS
if (SegmentName.isHighLevelConsumerSegmentName(segmentId)) {
return;
}
if (_dataDir != null) {
long retentionMs = deletedSegmentsRetentionMs == null
? _defaultDeletedSegmentsRetentionMs : deletedSegmentsRetentionMs;
String rawTableName = TableNameBuilder.extractRawTableName(tableNameWithType);
URI fileToDeleteURI = URIUtils.getUri(_dataDir, rawTableName, URIUtils.encode(segmentId));
PinotFS pinotFS = PinotFSFactory.create(fileToDeleteURI.getScheme());
if (retentionMs <= 0) {
// delete the segment file instantly if retention is set to zero
try {
if (pinotFS.delete(fileToDeleteURI, true)) {
LOGGER.info("Deleted segment {} from {}", segmentId, fileToDeleteURI.toString());
} else {
LOGGER.warn("Failed to delete segment {} from {}", segmentId, fileToDeleteURI.toString());
}
} catch (IOException e) {
LOGGER.warn("Could not delete segment {} from {}", segmentId, fileToDeleteURI.toString(), e);
}
} else {
// move the segment file to deleted segments first and let retention manager handler the deletion
String deletedFileName = deletedSegmentsRetentionMs == null ? URIUtils.encode(segmentId)
: getDeletedSegmentFileName(URIUtils.encode(segmentId), deletedSegmentsRetentionMs);
URI deletedSegmentMoveDestURI = URIUtils.getUri(_dataDir, DELETED_SEGMENTS, rawTableName, deletedFileName);
try {
if (pinotFS.exists(fileToDeleteURI)) {
// Overwrites the file if it already exists in the target directory.
if (pinotFS.move(fileToDeleteURI, deletedSegmentMoveDestURI, true)) {
// Updates last modified.
// Touch is needed here so that removeAgedDeletedSegments() works correctly.
pinotFS.touch(deletedSegmentMoveDestURI);
LOGGER.info("Moved segment {} from {} to {}", segmentId, fileToDeleteURI.toString(),
deletedSegmentMoveDestURI.toString());
} else {
LOGGER.warn("Failed to move segment {} from {} to {}", segmentId, fileToDeleteURI.toString(),
deletedSegmentMoveDestURI.toString());
}
} else {
LOGGER.warn("Failed to find local segment file for segment {}", fileToDeleteURI.toString());
}
} catch (IOException e) {
LOGGER.warn("Could not move segment {} from {} to {}", segmentId, fileToDeleteURI.toString(),
deletedSegmentMoveDestURI.toString(), e);
}
}
} else {
LOGGER.info("dataDir is not configured, won't delete segment {} from disk", segmentId);
}
}
/**
* Removes aged deleted segments from the deleted directory
*/
public void removeAgedDeletedSegments() {
if (_dataDir != null) {
URI deletedDirURI = URIUtils.getUri(_dataDir, DELETED_SEGMENTS);
PinotFS pinotFS = PinotFSFactory.create(deletedDirURI.getScheme());
try {
// Directly return when the deleted directory does not exist (no segment deleted yet)
if (!pinotFS.exists(deletedDirURI)) {
return;
}
if (!pinotFS.isDirectory(deletedDirURI)) {
LOGGER.warn("Deleted segments URI: {} is not a directory", deletedDirURI);
return;
}
String[] tableNameDirs = pinotFS.listFiles(deletedDirURI, false);
if (tableNameDirs == null) {
LOGGER.warn("Failed to list files from the deleted segments directory: {}", deletedDirURI);
return;
}
for (String tableNameDir : tableNameDirs) {
URI tableNameURI = URIUtils.getUri(tableNameDir);
// Get files that are aged
final String[] targetFiles = pinotFS.listFiles(tableNameURI, false);
int numFilesDeleted = 0;
for (String targetFile : targetFiles) {
URI targetURI = URIUtils.getUri(targetFile);
long deletionTimeMs = getDeletionTimeMsFromFile(targetFile, pinotFS.lastModified(targetURI));
if (System.currentTimeMillis() >= deletionTimeMs) {
if (!pinotFS.delete(targetURI, true)) {
LOGGER.warn("Cannot remove file {} from deleted directory.", targetURI.toString());
} else {
numFilesDeleted++;
}
}
}
if (numFilesDeleted == targetFiles.length) {
// Delete directory if it's empty
if (!pinotFS.delete(tableNameURI, false)) {
LOGGER.warn("The directory {} cannot be removed.", tableNameDir);
}
}
}
} catch (IOException e) {
LOGGER.error("Had trouble deleting directories: {}", deletedDirURI.toString(), e);
}
} else {
LOGGER.info("dataDir is not configured, won't delete any expired segments from deleted directory.");
}
}
private String getDeletedSegmentFileName(String fileName, long deletedSegmentsRetentionMs) {
return fileName + RETENTION_UNTIL_SEPARATOR + RETENTION_DATE_FORMAT.format(new Date(
System.currentTimeMillis() + deletedSegmentsRetentionMs));
}
private long getDeletionTimeMsFromFile(String targetFile, long lastModifiedTime) {
String[] split = StringUtils.splitByWholeSeparator(targetFile, RETENTION_UNTIL_SEPARATOR);
if (split.length == 2) {
try {
return RETENTION_DATE_FORMAT.parse(split[1]).getTime();
} catch (Exception e) {
LOGGER.warn("No retention suffix found for file: {}", targetFile);
}
}
LOGGER.info("Fallback to using default cluster retention config: {} ms", _defaultDeletedSegmentsRetentionMs);
return lastModifiedTime + _defaultDeletedSegmentsRetentionMs;
}
@Nullable
private static Long getRetentionMsFromTableConfig(@Nullable TableConfig tableConfig) {
if (tableConfig != null) {
SegmentsValidationAndRetentionConfig validationConfig = tableConfig.getValidationConfig();
if (!StringUtils.isEmpty(validationConfig.getDeletedSegmentsRetentionPeriod())) {
try {
return TimeUtils.convertPeriodToMillis(validationConfig.getDeletedSegmentsRetentionPeriod());
} catch (Exception e) {
LOGGER.warn("Unable to parse deleted segment retention config for table {}", tableConfig.getTableName(), e);
}
}
}
return null;
}
}
| 5,730 |
4,262 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.xslt;
import java.io.IOException;
import java.io.StringReader;
import java.io.StringWriter;
import javax.xml.transform.Source;
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerFactory;
import javax.xml.transform.stream.StreamResult;
import javax.xml.transform.stream.StreamSource;
import net.sf.saxon.TransformerFactoryImpl;
import org.apache.camel.support.ResourceHelper;
import org.apache.camel.test.junit5.CamelTestSupport;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class SaxonUriResolverTest extends CamelTestSupport {
private static final String XSL_PATH = "org/apache/camel/component/xslt/transform_includes_data.xsl";
private static final String XML_DATA = "<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?><root>1</root>";
private static final String XML_RESP = "<?xml version=\"1.0\" encoding=\"UTF-8\"?><MyDate>February</MyDate>";
@Test
public void test() throws Exception {
StringWriter writer = new StringWriter();
StreamResult result = new StreamResult(writer);
Source xsl = fromClasspath(XSL_PATH);
xsl.setSystemId("classpath:/" + XSL_PATH);
Source xml = fromString(XML_DATA);
TransformerFactory factory = new TransformerFactoryImpl();
Transformer transformer = factory.newTransformer(xsl);
transformer.setURIResolver(new XsltUriResolver(context(), XSL_PATH));
transformer.transform(xml, result);
assertEquals(XML_RESP, writer.toString());
}
protected Source fromString(String data) {
return new StreamSource(new StringReader(data));
}
protected Source fromClasspath(String path) throws IOException {
return new StreamSource(
ResourceHelper.resolveMandatoryResourceAsInputStream(context(), path));
}
}
| 858 |
2,211 | // Copyright (c) 2016 Intel Corporation. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "xwalk/runtime/common/xwalk_resource_delegate.h"
#include "base/files/file_path.h"
#include "base/path_service.h"
#include "ui/gfx/image/image.h"
namespace xwalk {
XWalkResourceDelegate::XWalkResourceDelegate() {}
XWalkResourceDelegate::~XWalkResourceDelegate() {}
base::FilePath XWalkResourceDelegate::GetPathForResourcePack(
const base::FilePath& pack_path,
ui::ScaleFactor scale_factor) {
return pack_path;
}
base::FilePath XWalkResourceDelegate::GetPathForLocalePack(
const base::FilePath& pack_path,
const std::string& locale) {
base::FilePath product_dir;
if (!PathService::Get(base::DIR_MODULE, &product_dir)) {
NOTREACHED();
}
return product_dir.
Append(FILE_PATH_LITERAL("locales")).
Append(FILE_PATH_LITERAL("xwalk")).
AppendASCII(locale + ".pak");
}
gfx::Image XWalkResourceDelegate::GetImageNamed(int resource_id) {
return gfx::Image();
}
gfx::Image XWalkResourceDelegate::GetNativeImageNamed(int resource_id) {
return gfx::Image();
}
base::RefCountedStaticMemory* XWalkResourceDelegate::LoadDataResourceBytes(
int resource_id,
ui::ScaleFactor scale_factor) {
return nullptr;
}
bool XWalkResourceDelegate::GetRawDataResource(int resource_id,
ui::ScaleFactor scale_factor,
base::StringPiece* value) {
return false;
}
bool XWalkResourceDelegate::GetLocalizedString(int message_id,
base::string16* value) {
return false;
}
} // namespace xwalk
| 702 |
3,508 | package com.fishercoder.solutions;
import java.util.Deque;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.Set;
public class _353 {
public class SnakeGame {
private Set<Integer> set;//Use a set to hold all occupied points for the snake body, this is for easy access for the case of eating its own body
private Deque<Integer> body;//use a queue to hold all occupied points for the snake body as well, this is for easy access to update the tail
int[][] food;
int score;
int foodIndex;
int width;
int height;
/**
* Initialize your data structure here.
*
* @param width - screen width
* @param height - screen height
* @param food - A list of food positions
* E.g food = [[1,1], [1,0]] means the first food is positioned at [1,1], the second is at [1,0].
*/
public SnakeGame(int width, int height, int[][] food) {
this.set = new HashSet();
set.add(0);//initially at [0][0]
this.body = new LinkedList<Integer>();
body.offerLast(0);
this.food = food;
this.width = width;
this.height = height;
}
/**
* Moves the snake.
*
* @param direction - 'U' = Up, 'L' = Left, 'R' = Right, 'D' = Down
* @return The game's score after the move. Return -1 if game over.
* Game over when snake crosses the screen boundary or bites its body.
*/
public int move(String direction) {
if (score == -1) {
return -1;
}
//compute head
int rowHead = body.peekFirst() / width;
int colHead = body.peekFirst() % width;
switch (direction) {
case "U":
rowHead--;
break;
case "D":
rowHead++;
break;
case "L":
colHead--;
break;
default:
colHead++;
}
int newHead = rowHead * width + colHead;
set.remove(body.peekLast());//we'll remove the tail from set for now to see if it hits its tail
//if it hits the boundary
if (set.contains(newHead) || rowHead < 0 || colHead < 0 || rowHead >= height || colHead >= width) {
return score = -1;
}
//add head for the following two normal cases:
set.add(newHead);
body.offerFirst(newHead);
//normal eat case: keep tail, add head
if (foodIndex < food.length && rowHead == food[foodIndex][0] && colHead == food[foodIndex][1]) {
set.add(body.peekLast());//old tail does not change, so add it back to set since we removed it earlier
foodIndex++;
return ++score;
}
//normal move case without eating: move head and remove tail
body.pollLast();
return score;
}
}
/**
* Your SnakeGame object will be instantiated and called as such:
* SnakeGame obj = new SnakeGame(width, height, food);
* int param_1 = obj.move(direction);
*/
}
| 1,582 |
678 | /**
* This header is generated by class-dump-z 0.2b.
*
* Source: /System/Library/PrivateFrameworks/iTunesStoreUI.framework/iTunesStoreUI
*/
#import <iTunesStoreUI/iTunesStoreUI-Structs.h>
#import <iTunesStoreUI/SUItemCellContext.h>
@interface SUMediaItemCellContext : SUItemCellContext {
float _artworkWidth; // 56 = 0x38
int _hiddenMediaIconTypes; // 60 = 0x3c
BOOL _itemsHaveArtwork; // 64 = 0x40
}
@property(assign, nonatomic) BOOL itemsHaveArtwork; // G=0xb00e1; S=0xb00f1; @synthesize=_itemsHaveArtwork
@property(assign, nonatomic) int hiddenMediaIconTypes; // G=0xb00c1; S=0xb00d1; @synthesize=_hiddenMediaIconTypes
@property(assign, nonatomic) float artworkWidth; // G=0xb00a1; S=0xb00b1; @synthesize=_artworkWidth
// declared property setter: - (void)setItemsHaveArtwork:(BOOL)artwork; // 0xb00f1
// declared property getter: - (BOOL)itemsHaveArtwork; // 0xb00e1
// declared property setter: - (void)setHiddenMediaIconTypes:(int)types; // 0xb00d1
// declared property getter: - (int)hiddenMediaIconTypes; // 0xb00c1
// declared property setter: - (void)setArtworkWidth:(float)width; // 0xb00b1
// declared property getter: - (float)artworkWidth; // 0xb00a1
- (id)init; // 0xb004d
@end
| 456 |
4,196 | <gh_stars>1000+
{
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"sku": {
"value": "Free"
},
"workspaceName": {
"value": "rg-abc-logs"
},
"solutionType": {
"value": "Containers"
}
}
}
| 153 |
1,704 | <reponame>yusufraji/siren
["002.Laysan_Albatross","006.Least_Auklet","010.Red_winged_Blackbird","014.Indigo_Bunting","018.Spotted_Catbird","022.Chuck_will_Widow","026.Bronzed_Cowbird","030.Fish_Crow","034.Gray_crowned_Rosy_Finch","038.Great_Crested_Flycatcher","042.Vermilion_Flycatcher","046.Gadwall","050.Eared_Grebe","054.Blue_Grosbeak","058.Pigeon_Guillemot","062.Herring_Gull","066.Western_Gull","070.Green_Violetear","074.Florida_Jay","078.Gray_Kingbird","082.Ringed_Kingfisher","086.Pacific_Loon","090.Red_breasted_Merganser","094.White_breasted_Nuthatch","098.Scott_Oriole","102.Western_Wood_Pewee","106.Horned_Puffin","110.Geococcyx","114.Black_throated_Sparrow","118.House_Sparrow","122.Harris_Sparrow","126.Nelson_Sharp_tailed_Sparrow","130.Tree_Sparrow","134.Cape_Glossy_Starling","138.Tree_Swallow","142.Black_Tern","146.Forsters_Tern","150.Sage_Thrasher","154.Red_eyed_Vireo","158.Bay_breasted_Warbler","162.Canada_Warbler","166.Golden_winged_Warbler","170.Mourning_Warbler","174.Palm_Warbler","178.Swainson_Warbler","182.Yellow_Warbler","186.Cedar_Waxwing","190.Red_cockaded_Woodpecker","194.Cactus_Wren","198.Rock_Wren"] | 471 |
327 | from sublime_plugin import WindowCommand
from ..libraries.tools import get_setting, save_setting
class DeviotAutoCleanCommand(WindowCommand):
"""
Stores the automatic monitor cleaning user selection
and save it in the preferences file.
Extends: sublime_plugin.WindowCommand
"""
def run(self):
auto_clean = get_setting('auto_clean', True)
save_setting('auto_clean', not auto_clean)
def is_checked(self):
return get_setting('auto_clean', True)
| 163 |
428 | #
# This is an example of a (sub)application, which can be made a part of
# bigger site using "app mount" feature, see example_app_router.py.
#
import picoweb
app = picoweb.WebApp(__name__)
@app.route("/")
def index(req, resp):
yield from picoweb.start_response(resp)
yield from resp.awrite("This is webapp #1")
if __name__ == "__main__":
app.run(debug=True)
| 140 |
7,113 | /*
* Copyright (C) 2010-2101 Alibaba Group Holding Limited.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.otter.shared.arbitrate.impl.manage;
import java.util.ArrayList;
import java.util.List;
import org.I0Itec.zkclient.exception.ZkException;
import org.I0Itec.zkclient.exception.ZkNoNodeException;
import org.apache.zookeeper.CreateMode;
import com.alibaba.otter.shared.arbitrate.exception.ArbitrateException;
import com.alibaba.otter.shared.arbitrate.impl.ArbitrateConstants;
import com.alibaba.otter.shared.arbitrate.impl.ArbitrateEvent;
import com.alibaba.otter.shared.arbitrate.impl.manage.helper.ManagePathUtils;
import com.alibaba.otter.shared.arbitrate.impl.zookeeper.ZooKeeperClient;
import com.alibaba.otter.shared.common.utils.zookeeper.ZkClientx;
/**
* 机器node节点的相关信号
*
* @author jianghang 2011-8-31 下午07:26:02
*/
public class NodeArbitrateEvent implements ArbitrateEvent {
private ZkClientx zookeeper = ZooKeeperClient.getInstance();
/**
* 创建相应的node节点,说明:node节点的生命周期为EPHEMERAL
*
* <pre>
* 1. 是个同步调用
* </pre>
*/
public void init(Long nid) {
String path = ManagePathUtils.getNode(nid);
try {
zookeeper.create(path, new byte[0], CreateMode.EPHEMERAL);// 创建为临时节点
} catch (ZkException e) {
throw new ArbitrateException("Node_init", nid.toString(), e);
}
}
/**
* 销毁的node节点
*
* <pre>
* 1. 是个同步调用
* </pre>
*/
public void destory(Long nid) {
String path = ManagePathUtils.getNode(nid);
try {
zookeeper.delete(path); // 删除节点,不关心版本
} catch (ZkNoNodeException e) {
// 如果节点已经不存在,则不抛异常
// ignore
} catch (ZkException e) {
throw new ArbitrateException("Node_destory", nid.toString(), e);
}
}
/**
* 获取当前存活的节点列表
*/
public List<Long> liveNodes() {
String path = ArbitrateConstants.NODE_NID_ROOT;
try {
List<String> nids = zookeeper.getChildren(path);
List<Long> result = new ArrayList<Long>();
for (String nid : nids) {
result.add(Long.valueOf(nid));
}
return result;
} catch (ZkException e) {
throw new ArbitrateException("liveNodes", e);
}
}
}
| 1,364 |
6,304 | <gh_stars>1000+
// Copyright 2020 Google LLC.
// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
#include "tools/fiddle/examples.h"
REG_FIDDLE(skcanvas_paint, 256, 256, false, 5) {
void draw(SkCanvas* canvas) {
canvas->drawColor(SK_ColorWHITE);
SkPaint paint;
paint.setStyle(SkPaint::kStroke_Style);
paint.setStrokeWidth(4);
paint.setColor(SK_ColorRED);
SkRect rect = SkRect::MakeXYWH(50, 50, 40, 60);
canvas->drawRect(rect, paint);
SkRRect oval;
oval.setOval(rect);
oval.offset(40, 60);
paint.setColor(SK_ColorBLUE);
canvas->drawRRect(oval, paint);
paint.setColor(SK_ColorCYAN);
canvas->drawCircle(180, 50, 25, paint);
rect.offset(80, 0);
paint.setColor(SK_ColorYELLOW);
canvas->drawRoundRect(rect, 10, 10, paint);
SkPath path;
path.cubicTo(768, 0, -512, 256, 256, 256);
paint.setColor(SK_ColorGREEN);
canvas->drawPath(path, paint);
canvas->drawImage(image, 128, 128, SkSamplingOptions(), &paint);
SkRect rect2 = SkRect::MakeXYWH(0, 0, 40, 60);
canvas->drawImageRect(image, rect2, SkSamplingOptions(), &paint);
SkPaint paint2;
auto text = SkTextBlob::MakeFromString("Hello, Skia!", SkFont(nullptr, 18));
canvas->drawTextBlob(text.get(), 50, 25, paint2);
}
} // END FIDDLE
| 543 |
448 | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.parquet;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.io.CloseableGroup;
import com.netflix.iceberg.io.CloseableIterable;
import org.apache.parquet.hadoop.ParquetReader;
import java.io.Closeable;
import java.io.IOException;
import java.util.Iterator;
import java.util.NoSuchElementException;
public class ParquetIterable<T> extends CloseableGroup implements CloseableIterable<T> {
private final ParquetReader.Builder<T> builder;
ParquetIterable(ParquetReader.Builder<T> builder) {
this.builder = builder;
}
@Override
public Iterator<T> iterator() {
try {
ParquetReader<T> reader = builder.build();
addCloseable(reader);
return new ParquetIterator<>(reader);
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to create Parquet reader");
}
}
private static class ParquetIterator<T> implements Iterator<T>, Closeable {
private final ParquetReader<T> parquet;
private boolean needsAdvance = false;
private boolean hasNext = false;
private T next = null;
ParquetIterator(ParquetReader<T> parquet) {
this.parquet = parquet;
this.next = advance();
}
@Override
public boolean hasNext() {
if (needsAdvance) {
this.next = advance();
}
return hasNext;
}
@Override
public T next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
this.needsAdvance = true;
return next;
}
private T advance() {
// this must be called in hasNext because it reuses an UnsafeRow
try {
T next = parquet.read();
this.needsAdvance = false;
this.hasNext = (next != null);
return next;
} catch (IOException e) {
throw new RuntimeIOException(e);
}
}
@Override
public void remove() {
throw new UnsupportedOperationException("Remove is not supported");
}
@Override
public void close() throws IOException {
parquet.close();
}
}
}
| 923 |
5,169 | <filename>Specs/AppConsole/0.2.7/AppConsole.podspec.json
{
"name": "AppConsole",
"version": "0.2.7",
"summary": "AppConsole for Swifter.jl",
"description": "iOS REPL with Swifter.jl + Console",
"homepage": "https://github.com/wookay/AppConsole",
"license": {
"type": "MIT",
"file": "LICENSE.md"
},
"authors": {
"<NAME>": "<EMAIL>"
},
"platforms": {
"ios": "8.0"
},
"source": {
"git": "https://github.com/wookay/AppConsole.git",
"tag": "v0.2.7"
},
"source_files": "AppConsole/**/*.swift",
"dependencies": {
"NetUtils": [
],
"Swifter": [
]
}
}
| 276 |
945 | <filename>Modules/Filtering/FastMarching/test/itkFastMarchingQuadEdgeMeshFilterBaseTest.cxx
/*=========================================================================
*
* Copyright NumFOCUS
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*=========================================================================*/
#include "itkFastMarchingQuadEdgeMeshFilterBase.h"
#include "itkQuadEdgeMeshExtendedTraits.h"
#include "itkRegularSphereMeshSource.h"
#include "itkFastMarchingThresholdStoppingCriterion.h"
#include "itkMeshFileWriter.h"
int
itkFastMarchingQuadEdgeMeshFilterBaseTest(int, char *[])
{
using PixelType = float;
using CoordType = double;
constexpr unsigned int Dimension = 3;
using Traits = itk::QuadEdgeMeshExtendedTraits<PixelType, // type of data for vertices
Dimension, // geometrical dimension of space
2, // Mac topological dimension of a cell
CoordType, // type for point coordinate
CoordType, // type for interpolation weight
PixelType, // type of data for cell
bool, // type of data for primal edges
bool // type of data for dual edges
>;
using MeshType = itk::QuadEdgeMesh<PixelType, Dimension, Traits>;
using FastMarchingType = itk::FastMarchingQuadEdgeMeshFilterBase<MeshType, MeshType>;
MeshType::PointType center;
center.Fill(0.);
using SphereSourceType = itk::RegularSphereMeshSource<MeshType>;
SphereSourceType::Pointer sphere_filter = SphereSourceType::New();
sphere_filter->SetCenter(center);
sphere_filter->SetResolution(5);
sphere_filter->Update();
MeshType::Pointer sphere_output = sphere_filter->GetOutput();
MeshType::PointsContainerConstPointer points = sphere_output->GetPoints();
MeshType::PointsContainerConstIterator p_it = points->Begin();
MeshType::PointsContainerConstIterator p_end = points->End();
while (p_it != p_end)
{
sphere_output->SetPointData(p_it->Index(), 1.);
++p_it;
}
using NodePairType = FastMarchingType::NodePairType;
// using NodeContainerType = FastMarchingType::NodeContainerType;
using NodePairContainerType = FastMarchingType::NodePairContainerType;
NodePairContainerType::Pointer trial = NodePairContainerType::New();
NodePairType node_pair(0, 0.);
trial->push_back(node_pair);
using CriterionType = itk::FastMarchingThresholdStoppingCriterion<MeshType, MeshType>;
CriterionType::Pointer criterion = CriterionType::New();
criterion->SetThreshold(100.);
FastMarchingType::Pointer fmm_filter = FastMarchingType::New();
fmm_filter->SetInput(sphere_output);
fmm_filter->SetTrialPoints(trial);
fmm_filter->SetStoppingCriterion(criterion);
try
{
fmm_filter->Update();
}
catch (const itk::ExceptionObject & excep)
{
std::cerr << "Exception caught !" << std::endl;
std::cerr << excep << std::endl;
return EXIT_FAILURE;
}
using WriterType = itk::MeshFileWriter<MeshType>;
WriterType::Pointer writer = WriterType::New();
writer->SetInput(fmm_filter->GetOutput());
writer->SetFileName("itkFastMarchingQuadEdgeMeshFilterBase.vtk");
writer->Update();
return EXIT_SUCCESS;
}
| 1,499 |
903 | <gh_stars>100-1000
#include "../../../src/gui/text/qfontengine_mac_p.h"
| 33 |
892 | <reponame>github/advisory-database<filename>advisories/unreviewed/2022/05/GHSA-c29x-q9mw-w4j4/GHSA-c29x-q9mw-w4j4.json<gh_stars>100-1000
{
"schema_version": "1.2.0",
"id": "GHSA-c29x-q9mw-w4j4",
"modified": "2022-05-13T01:36:23Z",
"published": "2022-05-13T01:36:23Z",
"aliases": [
"CVE-2017-6873"
],
"details": "A vulnerability was discovered in Siemens OZW672 (all versions) and OZW772 (all versions) that could allow an attacker to read and manipulate data in TLS sessions while performing a man-in-the-middle (MITM) attack on the integrated web server on port 443/tcp.",
"severity": [
{
"type": "CVSS_V3",
"score": "CVSS:3.0/AV:N/AC:H/PR:N/UI:N/S:U/C:H/I:H/A:N"
}
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2017-6873"
},
{
"type": "WEB",
"url": "https://www.siemens.com/cert/pool/cert/siemens_security_advisory_ssa-563539.pdf"
},
{
"type": "WEB",
"url": "http://www.securityfocus.com/bid/99473"
}
],
"database_specific": {
"cwe_ids": [
],
"severity": "HIGH",
"github_reviewed": false
}
} | 567 |
1,056 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.openide.filesystems;
import java.awt.Component;
import java.awt.FileDialog;
import java.awt.Frame;
import java.awt.HeadlessException;
import java.awt.KeyboardFocusManager;
import java.io.File;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import javax.swing.Icon;
import javax.swing.JFileChooser;
import javax.swing.SwingUtilities;
import javax.swing.filechooser.FileFilter;
import javax.swing.filechooser.FileSystemView;
import javax.swing.filechooser.FileView;
import org.netbeans.modules.openide.filesystems.FileFilterSupport;
import org.openide.filesystem.spi.FileChooserBuilderProvider;
import org.openide.util.*;
/**
* Utility class for working with JFileChoosers. In particular, remembering
* the last-used directory for a given file is made transparent. You pass an
* ad-hoc string key to the constructor (the fully qualified name of the
* calling class is good for uniqueness, and there is a constructor that takes
* a <code>Class</code> object as an argument for this purpose). That key is
* used to look up the most recently-used directory from any previous invocations
* with the same key. This makes it easy to have your user interface
* “remember” where the user keeps particular types of files, and
* saves the user from having to navigate through the same set of directories
* every time they need to locate a file from a particular place.
* <p/>
* <code>FileChooserBuilder</code>'s methods each return <code>this</code>, so
* it is possible to chain invocations to simplify setting up a file chooser.
* Example usage:
* <pre>
* <font color="gray">//The default dir to use if no value is stored</font>
* File home = new File (System.getProperty("user.home") + File.separator + "lib");
* <font color="gray">//Now build a file chooser and invoke the dialog in one line of code</font>
* <font color="gray">//"libraries-dir" is our unique key</font>
* File toAdd = new FileChooserBuilder ("libraries-dir").setTitle("Add Library").
* setDefaultWorkingDirectory(home).setApproveText("Add").showOpenDialog();
* <font color="gray">//Result will be null if the user clicked cancel or closed the dialog w/o OK</font>
* if (toAdd != null) {
* //do something
* }
*</pre>
* <p/>
* Instances of this class are intended to be thrown away after use. Typically
* you create a builder, set it to create file choosers as you wish, then
* use it to show a dialog or create a file chooser you then do something
* with.
* <p/>
* Supports the most common subset of JFileChooser functionality; if you
* need to do something exotic with a file chooser, you are probably better
* off creating your own.
* <p/>
* <b>Note:</b> If you use the constructor that takes a <code>Class</code> object,
* please use <code>new FileChooserBuilder(MyClass.class)</code>, not
* <code>new FileChooserBuilder(getClass())</code>. This avoids unexpected
* behavior in the case of subclassing.
*
* @author <NAME>
*/
public class FileChooserBuilder {
private boolean dirsOnly;
private BadgeProvider badger;
private String title;
private String approveText;
//Just in case...
private static boolean PREVENT_SYMLINK_TRAVERSAL =
!Boolean.getBoolean("allow.filechooser.symlink.traversal"); //NOI18N
private final String dirKey;
private File failoverDir;
private FileFilter filter;
private boolean fileHiding;
private boolean controlButtonsShown = true;
private String aDescription;
private boolean filesOnly;
private static final boolean DONT_STORE_DIRECTORIES =
Boolean.getBoolean("forget.recent.dirs");
private SelectionApprover approver;
private final List<FileFilter> filters = new ArrayList<FileFilter>(3);
private boolean useAcceptAllFileFilter = true;
/**
* Creates a new FileChooserBuilder which can interact with the given file system.
* @param fileSystem A virtual file system
* @return FileChooserBuilder related to the given file system
* @since 9.11
*/
public static FileChooserBuilder create(FileSystem fileSystem) {
Collection<? extends FileChooserBuilderProvider> providers = Lookup.getDefault().lookupAll(FileChooserBuilderProvider.class);
for (FileChooserBuilderProvider provider : providers) {
FileChooserBuilder builder = provider.createFileChooserBuilder(fileSystem);
if (builder != null) {
return builder;
}
}
return new FileChooserBuilder(fileSystem.getDisplayName());
}
/**
* Create a new FileChooserBuilder using the name of the passed class
* as the metadata for looking up a starting directory from previous
* application sessions or invocations.
* @param type A non-null class object, typically the calling class
*/
public FileChooserBuilder(Class type) {
this(type.getName());
}
/**
* Create a new FileChooserBuilder. The passed key is used as a key
* into NbPreferences to look up the directory the file chooser should
* initially be rooted on.
*
* @param dirKey A non-null ad-hoc string. If a FileChooser was previously
* used with the same string as is passed, then the initial directory
*/
public FileChooserBuilder(String dirKey) {
Parameters.notNull("dirKey", dirKey);
this.dirKey = dirKey;
}
/**
* Set whether or not any file choosers created by this builder will show
* only directories.
* @param val true if files should not be shown
* @return this
*/
public FileChooserBuilder setDirectoriesOnly(boolean val) {
dirsOnly = val;
assert !filesOnly : "FilesOnly and DirsOnly are mutually exclusive";
return this;
}
public FileChooserBuilder setFilesOnly(boolean val) {
filesOnly = val;
assert !dirsOnly : "FilesOnly and DirsOnly are mutually exclusive";
return this;
}
/**
* Provide an implementation of BadgeProvider which will "badge" the
* icons of some files.
*
* @param provider A badge provider which will alter the icon of files
* or folders that may be of particular interest to the user
* @return this
*/
public FileChooserBuilder setBadgeProvider(BadgeProvider provider) {
this.badger = provider;
return this;
}
/**
* Set the dialog title for any JFileChoosers created by this builder.
* @param val A localized, human-readable title
* @return this
*/
public FileChooserBuilder setTitle(String val) {
title = val;
return this;
}
/**
* Set the text on the OK button for any file chooser dialogs produced
* by this builder.
* @param val A short, localized, human-readable string
* @return this
*/
public FileChooserBuilder setApproveText(String val) {
approveText = val;
return this;
}
/**
* Set a file filter which filters the list of selectable files.
* @param filter
* @return this
*/
public FileChooserBuilder setFileFilter (FileFilter filter) {
this.filter = filter;
return this;
}
/**
* Determines whether the <code>AcceptAll FileFilter</code> is used
* as an available choice in the choosable filter list.
* If false, the <code>AcceptAll</code> file filter is removed from
* the list of available file filters.
* If true, the <code>AcceptAll</code> file filter will become the
* the actively used file filter.
* @param accept whether the <code>AcceptAll FileFilter</code> is used
* @return this
* @since 8.3
*/
public FileChooserBuilder setAcceptAllFileFilterUsed(boolean accept) {
useAcceptAllFileFilter = accept;
return this;
}
/**
* Set the current directory which should be used <b>only if</b>
* a last-used directory cannot be found for the key string passed
* into this builder's constructor.
* @param dir A directory to root any created file choosers on if
* there is no stored path for this builder's key
* @return this
*/
public FileChooserBuilder setDefaultWorkingDirectory (File dir) {
failoverDir = dir;
return this;
}
/**
* Enable file hiding in any created file choosers
* @param fileHiding Whether or not to hide files. Default is no.
* @return this
*/
public FileChooserBuilder setFileHiding(boolean fileHiding) {
this.fileHiding = fileHiding;
return this;
}
/**
* Show/hide control buttons
* @param val Whether or not to hide files. Default is no.
* @return this
*/
public FileChooserBuilder setControlButtonsAreShown(boolean val) {
this.controlButtonsShown = val;
return this;
}
/**
* Set the accessible description for any file choosers created by this
* builder
* @param aDescription The description
* @return this
*/
public FileChooserBuilder setAccessibleDescription(String aDescription) {
this.aDescription = aDescription;
return this;
}
/**
* Create a JFileChooser that conforms to the parameters set in this
* builder.
* @return A file chooser
*/
public JFileChooser createFileChooser() {
JFileChooser result = new SavedDirFileChooser(dirKey, failoverDir,
force, approver);
prepareFileChooser(result);
return result;
}
private boolean force = false;
/**
* Force use of the failover directory - i.e. ignore the directory key
* passed in.
* @param val
* @return this
*/
public FileChooserBuilder forceUseOfDefaultWorkingDirectory(boolean val) {
this.force = val;
return this;
}
/**
* Tries to find an appropriate component to parent the file chooser to
* when showing a dialog.
* @return this
*/
private Component findDialogParent() {
Component parent = KeyboardFocusManager.getCurrentKeyboardFocusManager().getFocusOwner();
if (parent == null) {
parent = KeyboardFocusManager.getCurrentKeyboardFocusManager().getActiveWindow();
}
if (parent == null) {
Frame[] f = Frame.getFrames();
parent = f.length == 0 ? null : f[f.length - 1];
}
return parent;
}
/**
* Show an open dialog that allows multiple selection.
* @return An array of files, or null if the user cancelled the dialog
*/
public File[] showMultiOpenDialog() {
JFileChooser chooser = createFileChooser();
chooser.setMultiSelectionEnabled(true);
int result = chooser.showOpenDialog(findDialogParent());
if (JFileChooser.APPROVE_OPTION == result) {
File[] files = chooser.getSelectedFiles();
return files == null ? new File[0] : files;
} else {
return null;
}
}
/**
* Show an open dialog with a file chooser set up according to the
* parameters of this builder.
* @return A file if the user clicks the accept button and a file or
* folder was selected at the time the user clicked cancel.
*/
public File showOpenDialog() {
JFileChooser chooser = createFileChooser();
if( Boolean.getBoolean("nb.native.filechooser") ) { //NOI18N
FileDialog fileDialog = createFileDialog( chooser.getCurrentDirectory() );
if( null != fileDialog ) {
return showFileDialog(fileDialog, FileDialog.LOAD );
}
}
chooser.setMultiSelectionEnabled(false);
int dlgResult = chooser.showOpenDialog(findDialogParent());
if (JFileChooser.APPROVE_OPTION == dlgResult) {
File result = chooser.getSelectedFile();
if (result != null && !result.exists()) {
result = null;
}
return result;
} else {
return null;
}
}
/**
* Show a save dialog with the file chooser set up according to the
* parameters of this builder.
* @return A file if the user clicks the accept button and a file or
* folder was selected at the time the user clicked cancel.
*/
public File showSaveDialog() {
JFileChooser chooser = createFileChooser();
if( Boolean.getBoolean("nb.native.filechooser") ) { //NOI18N
FileDialog fileDialog = createFileDialog( chooser.getCurrentDirectory() );
if( null != fileDialog ) {
return showFileDialog( fileDialog, FileDialog.SAVE );
}
}
int result = chooser.showSaveDialog(findDialogParent());
if (JFileChooser.APPROVE_OPTION == result) {
return chooser.getSelectedFile();
} else {
return null;
}
}
private File showFileDialog( FileDialog fileDialog, int mode ) {
String oldFileDialogProp = System.getProperty("apple.awt.fileDialogForDirectories"); //NOI18N
if( dirsOnly ) {
System.setProperty("apple.awt.fileDialogForDirectories", "true"); //NOI18N
}
fileDialog.setMode( mode );
fileDialog.setVisible(true);
if( dirsOnly ) {
if( null != oldFileDialogProp ) {
System.setProperty("apple.awt.fileDialogForDirectories", oldFileDialogProp); //NOI18N
} else {
System.clearProperty("apple.awt.fileDialogForDirectories"); //NOI18N
}
}
if( fileDialog.getDirectory() != null && fileDialog.getFile() != null ) {
String selFile = fileDialog.getFile();
File dir = new File( fileDialog.getDirectory() );
return new File( dir, selFile );
}
return null;
}
private void prepareFileChooser(JFileChooser chooser) {
chooser.setFileSelectionMode(dirsOnly ? JFileChooser.DIRECTORIES_ONLY
: filesOnly ? JFileChooser.FILES_ONLY :
JFileChooser.FILES_AND_DIRECTORIES);
chooser.setFileHidingEnabled(fileHiding);
chooser.setControlButtonsAreShown(controlButtonsShown);
chooser.setAcceptAllFileFilterUsed(useAcceptAllFileFilter);
if (title != null) {
chooser.setDialogTitle(title);
}
if (approveText != null) {
chooser.setApproveButtonText(approveText);
}
if (badger != null) {
chooser.setFileView(new CustomFileView(new BadgeIconProvider(badger),
chooser.getFileSystemView()));
}
if (PREVENT_SYMLINK_TRAVERSAL) {
FileUtil.preventFileChooserSymlinkTraversal(chooser,
chooser.getCurrentDirectory());
}
if (filter != null) {
chooser.setFileFilter(filter);
}
if (aDescription != null) {
chooser.getAccessibleContext().setAccessibleDescription(aDescription);
}
if (!filters.isEmpty()) {
for (FileFilter f : filters) {
chooser.addChoosableFileFilter(f);
}
}
}
private FileDialog createFileDialog( File currentDirectory ) {
if( badger != null )
return null;
if( !Boolean.getBoolean("nb.native.filechooser") )
return null;
if( dirsOnly && !BaseUtilities.isMac() )
return null;
Component parentComponent = findDialogParent();
Frame parentFrame = (Frame) SwingUtilities.getAncestorOfClass(Frame.class, parentComponent);
FileDialog fileDialog = new FileDialog(parentFrame);
if (title != null) {
fileDialog.setTitle(title);
}
if( null != currentDirectory )
fileDialog.setDirectory(currentDirectory.getAbsolutePath());
return fileDialog;
}
/**
* Equivalent to calling <code>JFileChooser.addChoosableFileFilter(filter)</code>.
* Adds another file filter that can be displayed in the file filters combo
* box in the file chooser.
*
* @param filter The file filter to add
* @return this
* @since 7.26.0
*/
public FileChooserBuilder addFileFilter (FileFilter filter) {
filters.add (filter);
return this;
}
/**
* Add all default file filters to the file chooser.
*
* @see MIMEResolver.Registration#showInFileChooser()
* @see MIMEResolver.ExtensionRegistration#showInFileChooser()
* @return this
* @since 8.1
*/
public FileChooserBuilder addDefaultFileFilters() {
filters.addAll(FileFilterSupport.findRegisteredFileFilters());
return this;
}
/**
* Set a selection approver which can display an "Overwrite file?"
* or similar dialog if necessary, when the user presses the accept button
* in the file chooser dialog.
*
* @param approver A SelectionApprover which will determine if the selection
* is valid
* @return this
* @since 7.26.0
*/
public FileChooserBuilder setSelectionApprover (SelectionApprover approver) {
this.approver = approver;
return this;
}
/**
* Object which can approve the selection (enabling the OK button or
* equivalent) in a JFileChooser. Equivalent to overriding
* <code>JFileChooser.approveSelection()</code>
* @since 7.26.0
*/
public interface SelectionApprover {
/**
* Approve the selection, enabling the dialog to be closed. Called by
* the JFileChooser's <code>approveSelection()</code> method. Use this
* interface if you want to, for example, show a dialog asking
* "Overwrite File X?" or similar.
*
* @param selection The selected file(s) at the time the user presses
* the Open, Save or OK button
* @return true if the selection is accepted, false if it is not and
* the dialog should not be closed
*/
public boolean approve (File[] selection);
}
private static final class SavedDirFileChooser extends JFileChooser {
private final String dirKey;
private final SelectionApprover approver;
SavedDirFileChooser(String dirKey, File failoverDir, boolean force, SelectionApprover approver) {
this.dirKey = dirKey;
this.approver = approver;
if (force && failoverDir != null && failoverDir.exists() && failoverDir.isDirectory()) {
setCurrentDirectory(failoverDir);
} else {
String path = DONT_STORE_DIRECTORIES ? null :
NbPreferences.forModule(FileChooserBuilder.class).get(dirKey, null);
if (path != null) {
File f = new File(path);
if (f.exists() && f.isDirectory()) {
setCurrentDirectory(f);
} else if (failoverDir != null) {
setCurrentDirectory(failoverDir);
}
} else if (failoverDir != null) {
setCurrentDirectory(failoverDir);
}
}
}
@Override
public void approveSelection() {
if (approver != null) {
File[] selected = getSelectedFiles();
final File sf = getSelectedFile();
if ((selected == null || selected.length == 0) && sf != null) {
selected = new File[] { sf };
}
boolean approved = approver.approve(selected);
if (approved) {
super.approveSelection();
}
} else {
super.approveSelection();
}
}
@Override
public int showDialog(Component parent, String approveButtonText) throws HeadlessException {
int result = super.showDialog(parent, approveButtonText);
if (result == APPROVE_OPTION) {
saveCurrentDir();
}
return result;
}
private void saveCurrentDir() {
File dir = super.getCurrentDirectory();
if (!DONT_STORE_DIRECTORIES && dir != null && dir.exists() && dir.isDirectory()) {
NbPreferences.forModule(FileChooserBuilder.class).put(dirKey, dir.getPath());
}
}
}
//Can open this API later if there is a use-case
interface IconProvider {
public Icon getIcon(File file, Icon orig);
}
/**
* Provides "badges" for icons that indicate files or folders of particular
* interest to the user.
* @see FileChooserBuilder#setBadgeProvider
*/
public interface BadgeProvider {
/**
* Get the badge the passed file should use. <b>Note:</b> this method
* is called for every visible file. The negative test (deciding
* <i>not</i> to badge a file) should be very, very fast and immediately
* return null.
* @param file The file in question
* @return an icon or null if no change to the appearance of the file
* is needed
*/
public Icon getBadge(File file);
/**
* Get the x offset for badges produced by this provider. This is
* the location of the badge icon relative to the real icon for the
* file.
* @return a rightward pixel offset
*/
public int getXOffset();
/**
* Get the y offset for badges produced by this provider. This is
* the location of the badge icon relative to the real icon for the
* file.
* @return a downward pixel offset
*/
public int getYOffset();
}
private static final class BadgeIconProvider implements IconProvider {
private final BadgeProvider badger;
public BadgeIconProvider(BadgeProvider badger) {
this.badger = badger;
}
public Icon getIcon(File file, Icon orig) {
Icon badge = badger.getBadge(file);
if (badge != null && orig != null) {
return new MergedIcon(orig, badge, badger.getXOffset(),
badger.getYOffset());
}
return orig;
}
}
private static final class CustomFileView extends FileView {
private final IconProvider provider;
private final FileSystemView view;
CustomFileView(IconProvider provider, FileSystemView view) {
this.provider = provider;
this.view = view;
}
@Override
public Icon getIcon(File f) {
Icon result = view.getSystemIcon(f);
result = provider.getIcon(f, result);
return result;
}
}
private static class MergedIcon implements Icon {
private Icon icon1;
private Icon icon2;
private int xMerge;
private int yMerge;
MergedIcon(Icon icon1, Icon icon2, int xMerge, int yMerge) {
assert icon1 != null;
assert icon2 != null;
this.icon1 = icon1;
this.icon2 = icon2;
if (xMerge == -1) {
xMerge = icon1.getIconWidth() - icon2.getIconWidth();
}
if (yMerge == -1) {
yMerge = icon1.getIconHeight() - icon2.getIconHeight();
}
this.xMerge = xMerge;
this.yMerge = yMerge;
}
public int getIconHeight() {
return Math.max(icon1.getIconHeight(), yMerge + icon2.getIconHeight());
}
public int getIconWidth() {
return Math.max(icon1.getIconWidth(), yMerge + icon2.getIconWidth());
}
public void paintIcon(java.awt.Component c, java.awt.Graphics g, int x, int y) {
icon1.paintIcon(c, g, x, y);
icon2.paintIcon(c, g, x + xMerge, y + yMerge);
}
}
}
| 9,892 |
1,338 | /*
* Copyright 2006, <NAME> <<EMAIL>>.
* All rights reserved. Distributed under the terms of the MIT License.
*/
#include <OS.h>
#include <arch_cpu.h>
#include <arch/system_info.h>
#include <boot/kernel_args.h>
enum cpu_vendor sCPUVendor;
uint32 sPVR;
static uint64 sCPUClockFrequency;
static uint64 sBusClockFrequency;
struct cpu_model {
uint16 version;
enum cpu_vendor vendor;
};
// mapping of CPU versions to vendors
struct cpu_model kCPUModels[] = {
{ MPC601, B_CPU_VENDOR_MOTOROLA },
{ MPC603, B_CPU_VENDOR_MOTOROLA },
{ MPC604, B_CPU_VENDOR_MOTOROLA },
{ MPC602, B_CPU_VENDOR_MOTOROLA },
{ MPC603e, B_CPU_VENDOR_MOTOROLA },
{ MPC603ev, B_CPU_VENDOR_MOTOROLA },
{ MPC750, B_CPU_VENDOR_MOTOROLA },
{ MPC604ev, B_CPU_VENDOR_MOTOROLA },
{ MPC7400, B_CPU_VENDOR_MOTOROLA },
{ MPC620, B_CPU_VENDOR_MOTOROLA },
{ IBM403, B_CPU_VENDOR_IBM },
{ IBM401A1, B_CPU_VENDOR_IBM },
{ IBM401B2, B_CPU_VENDOR_IBM },
{ IBM401C2, B_CPU_VENDOR_IBM },
{ IBM401D2, B_CPU_VENDOR_IBM },
{ IBM401E2, B_CPU_VENDOR_IBM },
{ IBM401F2, B_CPU_VENDOR_IBM },
{ IBM401G2, B_CPU_VENDOR_IBM },
{ IBMPOWER3, B_CPU_VENDOR_IBM },
{ MPC860, B_CPU_VENDOR_MOTOROLA },
{ MPC8240, B_CPU_VENDOR_MOTOROLA },
{ IBM405GP, B_CPU_VENDOR_IBM },
{ IBM405L, B_CPU_VENDOR_IBM },
{ IBM750FX, B_CPU_VENDOR_IBM },
{ MPC7450, B_CPU_VENDOR_MOTOROLA },
{ MPC7455, B_CPU_VENDOR_MOTOROLA },
{ MPC7457, B_CPU_VENDOR_MOTOROLA },
{ MPC7447A, B_CPU_VENDOR_MOTOROLA },
{ MPC7448, B_CPU_VENDOR_MOTOROLA },
{ MPC7410, B_CPU_VENDOR_MOTOROLA },
{ MPC8245, B_CPU_VENDOR_MOTOROLA },
{ 0, B_CPU_VENDOR_UNKNOWN }
};
void
arch_fill_topology_node(cpu_topology_node_info* node, int32 cpu)
{
switch (node->type) {
case B_TOPOLOGY_ROOT:
#if __powerpc64__
node->data.root.platform = B_CPU_PPC_64;
#else
node->data.root.platform = B_CPU_PPC;
#endif
break;
case B_TOPOLOGY_PACKAGE:
node->data.package.vendor = sCPUVendor;
node->data.package.cache_line_size = CACHE_LINE_SIZE;
break;
case B_TOPOLOGY_CORE:
node->data.core.model = sPVR;
node->data.core.default_frequency = sCPUClockFrequency;
break;
default:
break;
}
}
status_t
arch_system_info_init(struct kernel_args *args)
{
int i;
sCPUClockFrequency = args->arch_args.cpu_frequency;
sBusClockFrequency = args->arch_args.bus_frequency;
// The PVR (processor version register) contains processor version and
// revision.
sPVR = get_pvr();
uint16 model = (uint16)(sPVR >> 16);
//sCPURevision = (uint16)(pvr & 0xffff);
// Populate vendor
for (i = 0; kCPUModels[i].vendor != B_CPU_VENDOR_UNKNOWN; i++) {
if (model == kCPUModels[i].version) {
sCPUVendor = kCPUModels[i].vendor;
break;
}
}
return B_OK;
}
status_t
arch_get_frequency(uint64 *frequency, int32 cpu)
{
*frequency = sCPUClockFrequency;
return B_OK;
}
| 1,397 |
653 | /***********************************************************************
Interfacing to a classic piece of geodetic software
************************************************************************
gen_pol is a highly efficient, classic implementation of a generic
2D Horner's Scheme polynomial evaluation routine by <NAME> and
<NAME>, originating in the vivid geodetic environment at
what was then (1960-ish) the Danish Geodetic Institute.
The original Poder/Engsager gen_pol implementation (where
the polynomial degree and two sets of polynomial coefficients
are packed together in one compound array, handled via a plain
double pointer) is compelling and "true to the code history":
It has a beautiful classical 1960s ring to it, not unlike the
original fft implementations, which revolutionized spectral
analysis in twenty lines of code.
The Poder coding sound, as classic 1960s as Phil Spector's Wall
of Sound, is beautiful and inimitable.
On the other hand: For the uninitiated, the gen_pol code is hard
to follow, despite being compact.
Also, since adding metadata and improving maintainability
of the code are among the implied goals of a current SDFE/DTU Space
project, the material in this file introduces a version with a
more modern (or at least 1990s) look, introducing a "double 2D
polynomial" data type, HORNER.
Despite introducing a new data type for handling the polynomial
coefficients, great care has been taken to keep the coefficient
array organization identical to that of gen_pol.
Hence, on one hand, the HORNER data type helps improving the
long term maintainability of the code by making the data
organization more mentally accessible.
On the other hand, it allows us to preserve the business end of
the original gen_pol implementation - although not including the
famous "Poder dual autocheck" in all its enigmatic elegance.
**********************************************************************
The material included here was written by <NAME>, starting
around 1960, and <NAME>, starting around 1970. It was
originally written in Algol 60, later (1980s) reimplemented in C.
The HORNER data type interface, and the organization as a header
library was implemented by <NAME>, starting around 2015.
***********************************************************************
*
* Copyright (c) 2016, SDFE http://www.sdfe.dk / <NAME> / <NAME>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*****************************************************************************/
#define PJ_LIB__
#include <errno.h>
#include <math.h>
#include <stddef.h>
#include <stdio.h>
#include <string.h>
#include <complex>
#include "proj.h"
#include "proj_internal.h"
PROJ_HEAD(horner, "Horner polynomial evaluation");
/* make horner.h interface with proj's memory management */
#define horner_dealloc(x) free(x)
#define horner_calloc(n,x) calloc(n,x)
namespace { // anonymous namespace
struct horner {
int uneg; /* u axis negated? */
int vneg; /* v axis negated? */
int order; /* maximum degree of polynomium */
int coefs; /* number of coefficients for each polynomium */
double range; /* radius of the region of validity */
bool has_inv; /* inv parameters are specified */
double inverse_tolerance; /* in the units of the destination coords,
specifies when to stop iterating if !has_inv and direction is reverse */
double *fwd_u; /* coefficients for the forward transformations */
double *fwd_v; /* i.e. latitude/longitude to northing/easting */
double *inv_u; /* coefficients for the inverse transformations */
double *inv_v; /* i.e. northing/easting to latitude/longitude */
double *fwd_c; /* coefficients for the complex forward transformations */
double *inv_c; /* coefficients for the complex inverse transformations */
PJ_UV *fwd_origin; /* False longitude/latitude */
PJ_UV *inv_origin; /* False easting/northing */
};
} // anonymous namespace
typedef struct horner HORNER;
static PJ_UV horner_func (PJ* P, const HORNER *transformation, PJ_DIRECTION direction, PJ_UV position);
static HORNER *horner_alloc (size_t order, int complex_polynomia);
static void horner_free (HORNER *h);
/* e.g. degree = 2: a + bx + cy + dxx + eyy + fxy, i.e. 6 coefficients */
#define horner_number_of_coefficients(order) \
(((order + 1)*(order + 2)/2))
static void horner_free (HORNER *h) {
horner_dealloc (h->inv_v);
horner_dealloc (h->inv_u);
horner_dealloc (h->fwd_v);
horner_dealloc (h->fwd_u);
horner_dealloc (h->fwd_c);
horner_dealloc (h->inv_c);
horner_dealloc (h->fwd_origin);
horner_dealloc (h->inv_origin);
horner_dealloc (h);
}
static HORNER *horner_alloc (size_t order, int complex_polynomia) {
/* size_t is unsigned, so we need not check for order > 0 */
int n = (int)horner_number_of_coefficients(order);
int polynomia_ok = 0;
HORNER *h = static_cast<HORNER*>(horner_calloc (1, sizeof (HORNER)));
if (nullptr==h)
return nullptr;
if (complex_polynomia)
n = 2*(int)order + 2;
h->order = (int)order;
h->coefs = n;
if (complex_polynomia) {
h->fwd_c = static_cast<double*>(horner_calloc (n, sizeof(double)));
h->inv_c = static_cast<double*>(horner_calloc (n, sizeof(double)));
if (h->fwd_c && h->inv_c)
polynomia_ok = 1;
}
else {
h->fwd_u = static_cast<double*>(horner_calloc (n, sizeof(double)));
h->fwd_v = static_cast<double*>(horner_calloc (n, sizeof(double)));
h->inv_u = static_cast<double*>(horner_calloc (n, sizeof(double)));
h->inv_v = static_cast<double*>(horner_calloc (n, sizeof(double)));
if (h->fwd_u && h->fwd_v && h->inv_u && h->inv_v)
polynomia_ok = 1;
}
h->fwd_origin = static_cast<PJ_UV*>(horner_calloc (1, sizeof(PJ_UV)));
h->inv_origin = static_cast<PJ_UV*>(horner_calloc (1, sizeof(PJ_UV)));
if (polynomia_ok && h->fwd_origin && h->inv_origin)
return h;
/* safe, since all pointers are null-initialized (by calloc) */
horner_free (h);
return nullptr;
}
inline static PJ_UV double_real_horner_eval(int order, const double *cx, const double *cy, PJ_UV en, int order_offset = 0)
{
/*
The melody of this block is straight out of the great Engsager/Poder songbook.
For numerical stability, the summation is carried out backwards,
summing the tiny high order elements first.
Double Horner's scheme: N = n*Cy*e -> yout, E = e*Cx*n -> xout
*/
const double n = en.v;
const double e = en.u;
const int sz = horner_number_of_coefficients(order); /* Number of coefficients per polynomial */
cx += sz;
cy += sz;
double N = *--cy;
double E = *--cx;
for (int r = order; r > order_offset; r--) {
double u = *--cy;
double v = *--cx;
for (int c = order; c >= r; c--) {
u = n*u + *--cy;
v = e*v + *--cx;
}
N = e*N + u;
E = n*E + v;
}
return { E, N };
}
inline static double single_real_horner_eval(int order, const double *cx, double x, int order_offset = 0)
{
const int sz = order + 1; /* Number of coefficients per polynomial */
cx += sz;
double u = *--cx;
for (int r = order; r > order_offset; r--) {
u = x*u + *--cx;
}
return u;
}
inline static PJ_UV complex_horner_eval(int order, const double *c, PJ_UV en, int order_offset = 0)
{
// the coefficients are ordered like this:
// (Cn0+i*Ce0, Cn1+i*Ce1, ...)
const int sz = 2*order + 2; // number of coefficients
const double e = en.u;
const double n = en.v;
const double *cbeg = c + order_offset*2;
c += sz;
double E = *--c;
double N = *--c;
double w;
while (c > cbeg) {
w = n*E + e*N + *--c;
N = n*N - e*E + *--c;
E = w;
}
return { E, N };
}
/**********************************************************************/
static PJ_UV horner_func (PJ* P, const HORNER *transformation, PJ_DIRECTION direction, PJ_UV position) {
/***********************************************************************
A reimplementation of the classic Engsager/Poder 2D Horner polynomial
evaluation engine "gen_pol".
This version omits the inimitable Poder "dual autocheck"-machinery,
which here is intended to be implemented at a higher level of the
library: We separate the polynomial evaluation from the quality
control (which, given the limited MTBF for "computing machinery",
typical when Knud Poder invented the dual autocheck method,
was not defensible at that time).
Another difference from the original version is that we return the
result on the stack, rather than accepting pointers to result variables
as input. This results in code that is easy to read:
projected = horner (s34j, 1, geographic);
geographic = horner (s34j, -1, projected );
and experiments have shown that on contemporary architectures, the time
taken for returning even comparatively large objects on the stack (and
the UV is not that large - typically only 16 bytes) is negligibly
different from passing two pointers (i.e. typically also 16 bytes) the
other way.
The polynomium has the form:
P = sum (i = [0 : order])
sum (j = [0 : order - i])
pow(par_1, i) * pow(par_2, j) * coef(index(order, i, j))
***********************************************************************/
/* These variable names follow the Engsager/Poder implementation */
double range; /* Equivalent to the gen_pol's FLOATLIMIT constant */
double n, e;
PJ_UV uv_error;
uv_error.u = uv_error.v = HUGE_VAL;
if (nullptr==transformation)
return uv_error;
/* Check for valid value of direction (-1, 0, 1) */
switch (direction) {
case PJ_IDENT: /* no-op */
return position;
case PJ_FWD: /* forward */
case PJ_INV: /* inverse */
break;
default: /* invalid */
return uv_error;
}
/* Prepare for double Horner */
range = transformation->range;
const bool iterative_inverse = direction == PJ_INV && !transformation->has_inv;
if (direction==PJ_FWD) { /* forward */
e = position.u - transformation->fwd_origin->u;
n = position.v - transformation->fwd_origin->v;
} else { /* inverse */
if (!iterative_inverse) {
e = position.u - transformation->inv_origin->u;
n = position.v - transformation->inv_origin->v;
} else {
// in this case fwd_origin needs to be added in the end
e = position.u;
n = position.v;
}
}
if ((fabs(n) > range) || (fabs(e) > range)) {
proj_errno_set(P, PROJ_ERR_COORD_TRANSFM_OUTSIDE_PROJECTION_DOMAIN);
return uv_error;
} else if (iterative_inverse) {
/*
* solve iteratively
*
* | E | | u00 | | u01 + u02*x + ... ' u10 + u11*x + u20*y + ... | | x |
* | | = | | + |-------------------------- ' --------------------------| | |
* | N | | v00 | | v10 + v11*y + v20*x + ... ' v01 + v02*y + ... | | y |
*
* | x | | Ma ' Mb |-1 | E-u00 |
* | | = |-------- | | |
* | y | | Mc ' Md | | N-v00 |
*/
const int order = transformation->order;
const double tol = transformation->inverse_tolerance;
const double de = e - transformation->fwd_u[0];
const double dn = n - transformation->fwd_v[0];
double x0 = 0.0;
double y0 = 0.0;
int loops = 32; // usually converges really fast (1-2 loops)
bool converged = false;
while (loops-- > 0 && !converged) {
double Ma = 0.0;
double Mb = 0.0;
double Mc = 0.0;
double Md = 0.0;
{
const double *tcx = transformation->fwd_u;
const double *tcy = transformation->fwd_v;
PJ_UV x0y0 = { x0, y0 };
// sum the i > 0 coefficients
PJ_UV Mbc = double_real_horner_eval(order, tcx, tcy, x0y0, 1);
Mb = Mbc.u;
Mc = Mbc.v;
// sum the i = 0, j > 0 coefficients
Ma = single_real_horner_eval(order, tcx, x0, 1);
Md = single_real_horner_eval(order, tcy, y0, 1);
}
double idet = 1.0 / (Ma*Md - Mb*Mc);
double x = idet * (Md*de - Mb*dn);
double y = idet * (Ma*dn - Mc*de);
converged = (fabs(x-x0) < tol) && (fabs(y-y0) < tol);
x0 = x;
y0 = y;
}
// if loops have been exhausted and we have not converged yet,
// we are never going to converge
if (!converged) {
proj_errno_set(P, PROJ_ERR_COORD_TRANSFM);
return uv_error;
} else {
position.u = x0 + transformation->fwd_origin->u;
position.v = y0 + transformation->fwd_origin->v;
}
}
else {
const double *tcx = direction == PJ_FWD ? transformation->fwd_u : transformation->inv_u;
const double *tcy = direction == PJ_FWD ? transformation->fwd_v : transformation->inv_v;
PJ_UV en = { e, n };
position = double_real_horner_eval(transformation->order, tcx, tcy, en);
}
return position;
}
static PJ_COORD horner_forward_4d (PJ_COORD point, PJ *P) {
point.uv = horner_func (P, (HORNER *) P->opaque, PJ_FWD, point.uv);
return point;
}
static PJ_COORD horner_reverse_4d (PJ_COORD point, PJ *P) {
point.uv = horner_func (P, (HORNER *) P->opaque, PJ_INV, point.uv);
return point;
}
/**********************************************************************/
static PJ_UV complex_horner (PJ *P, const HORNER *transformation, PJ_DIRECTION direction, PJ_UV position) {
/***********************************************************************
A reimplementation of a classic Engsager/Poder Horner complex
polynomial evaluation engine.
***********************************************************************/
/* These variable names follow the Engsager/Poder implementation */
double range; /* Equivalent to the gen_pol's FLOATLIMIT constant */
double n, e;
PJ_UV uv_error;
uv_error.u = uv_error.v = HUGE_VAL;
if (nullptr==transformation)
return uv_error;
/* Check for valid value of direction (-1, 0, 1) */
switch (direction) {
case PJ_IDENT: /* no-op */
return position;
case PJ_FWD: /* forward */
case PJ_INV: /* inverse */
break;
default: /* invalid */
return uv_error;
}
/* Prepare for double Horner */
range = transformation->range;
const bool iterative_inverse = direction == PJ_INV && !transformation->has_inv;
if (direction==PJ_FWD) { /* forward */
e = position.u - transformation->fwd_origin->u;
n = position.v - transformation->fwd_origin->v;
if (transformation->uneg)
e = -e;
if (transformation->vneg)
n = -n;
} else { /* inverse */
if (!iterative_inverse) {
e = position.u - transformation->inv_origin->u;
n = position.v - transformation->inv_origin->v;
if (transformation->uneg)
e = -e;
if (transformation->vneg)
n = -n;
} else {
// in this case fwd_origin and any existing flipping needs to be added in the end
e = position.u;
n = position.v;
}
}
if ((fabs(n) > range) || (fabs(e) > range)) {
proj_errno_set(P, PROJ_ERR_COORD_TRANSFM_OUTSIDE_PROJECTION_DOMAIN);
return uv_error;
}
if (iterative_inverse) {
// complex real part corresponds to Northing, imag part to Easting
const double tol = transformation->inverse_tolerance;
const std::complex<double> dZ(n-transformation->fwd_c[0], e-transformation->fwd_c[1]);
std::complex<double> w0(0.0, 0.0);
int loops = 32; // usually converges really fast (1-2 loops)
bool converged = false;
while (loops-- > 0 && !converged) {
// sum coefficient pointers from back to front until the first complex pair (fwd_c0+i*fwd_c1)
const double *c = transformation->fwd_c;
PJ_UV en = { w0.imag(), w0.real() };
en = complex_horner_eval(transformation->order, c, en, 1);
std::complex<double> det(en.v, en.u);
std::complex<double> w1 = dZ / det;
converged = (fabs(w1.real()-w0.real()) < tol) && (fabs(w1.imag()-w0.imag()) < tol);
w0 = w1;
}
// if loops have been exhausted and we have not converged yet,
// we are never going to converge
if (!converged) {
proj_errno_set(P, PROJ_ERR_COORD_TRANSFM);
position = uv_error;
} else {
double E = w0.imag();
double N = w0.real();
if (transformation->uneg)
E = -E;
if (transformation->vneg)
N = -N;
position.u = E + transformation->fwd_origin->u;
position.v = N + transformation->fwd_origin->v;
}
return position;
}
// coefficient pointers
double *cb = direction == PJ_FWD ? transformation->fwd_c : transformation->inv_c;
PJ_UV en = { e, n };
position = complex_horner_eval(transformation->order, cb, en);
return position;
}
static PJ_COORD complex_horner_forward_4d (PJ_COORD point, PJ *P) {
point.uv = complex_horner (P, (HORNER *) P->opaque, PJ_FWD, point.uv);
return point;
}
static PJ_COORD complex_horner_reverse_4d (PJ_COORD point, PJ *P) {
point.uv = complex_horner (P, (HORNER *) P->opaque, PJ_INV, point.uv);
return point;
}
static PJ *horner_freeup (PJ *P, int errlev) { /* Destructor */
if (nullptr==P)
return nullptr;
if (nullptr==P->opaque)
return pj_default_destructor (P, errlev);
horner_free ((HORNER *) P->opaque);
P->opaque = nullptr;
return pj_default_destructor (P, errlev);
}
static int parse_coefs (PJ *P, double *coefs, const char *param, int ncoefs) {
char *buf, *init, *next = nullptr;
int i;
buf = static_cast<char*>(calloc (strlen (param) + 2, sizeof(char)));
if (nullptr==buf) {
proj_log_error (P, "No memory left");
return 0;
}
sprintf (buf, "t%s", param);
if (0==pj_param (P->ctx, P->params, buf).i) {
free (buf);
return 0;
}
sprintf (buf, "s%s", param);
init = pj_param(P->ctx, P->params, buf).s;
free (buf);
for (i = 0; i < ncoefs; i++) {
if (i > 0) {
if ( next == nullptr || ','!=*next) {
proj_log_error (P, "Malformed polynomium set %s. need %d coefs", param, ncoefs);
return 0;
}
init = ++next;
}
coefs[i] = pj_strtod (init, &next);
}
return 1;
}
/*********************************************************************/
PJ *PROJECTION(horner) {
/*********************************************************************/
int degree = 0, n, complex_polynomia = 0;
bool has_inv = false;
HORNER *Q;
P->fwd4d = horner_forward_4d;
P->inv4d = horner_reverse_4d;
P->fwd3d = nullptr;
P->inv3d = nullptr;
P->fwd = nullptr;
P->inv = nullptr;
P->left = P->right = PJ_IO_UNITS_PROJECTED;
P->destructor = horner_freeup;
/* Polynomial degree specified? */
if (pj_param (P->ctx, P->params, "tdeg").i) { /* degree specified? */
degree = pj_param(P->ctx, P->params, "ideg").i;
if (degree < 0 || degree > 10000) {
/* What are reasonable minimum and maximums for degree? */
proj_log_error (P, _("Degree is unreasonable: %d"), degree);
return horner_freeup (P, PROJ_ERR_INVALID_OP_ILLEGAL_ARG_VALUE);
}
} else {
proj_log_error (P, _("Must specify polynomial degree, (+deg=n)"));
return horner_freeup (P, PROJ_ERR_INVALID_OP_MISSING_ARG);
}
if (pj_param (P->ctx, P->params, "tfwd_c").i || pj_param (P->ctx, P->params, "tinv_c").i) /* complex polynomium? */
complex_polynomia = 1;
Q = horner_alloc (degree, complex_polynomia);
if (Q == nullptr)
return horner_freeup (P, PROJ_ERR_OTHER /*ENOMEM*/);
P->opaque = Q;
if (!complex_polynomia) {
has_inv =
pj_param_exists(P->params, "inv_u") ||
pj_param_exists(P->params, "inv_v") ||
pj_param_exists(P->params, "inv_origin");
} else {
has_inv =
pj_param_exists(P->params, "inv_c") ||
pj_param_exists(P->params, "inv_origin");
}
Q->has_inv = has_inv;
if (complex_polynomia) {
/* Westings and/or southings? */
Q->uneg = pj_param_exists (P->params, "uneg") ? 1 : 0;
Q->vneg = pj_param_exists (P->params, "vneg") ? 1 : 0;
n = 2*degree + 2;
if (0==parse_coefs (P, Q->fwd_c, "fwd_c", n))
{
proj_log_error (P, _("missing fwd_c"));
return horner_freeup (P, PROJ_ERR_INVALID_OP_MISSING_ARG);
}
if (has_inv && 0==parse_coefs (P, Q->inv_c, "inv_c", n))
{
proj_log_error (P, _("missing inv_c"));
return horner_freeup (P, PROJ_ERR_INVALID_OP_MISSING_ARG);
}
P->fwd4d = complex_horner_forward_4d;
P->inv4d = complex_horner_reverse_4d;
}
else {
n = horner_number_of_coefficients (degree);
if (0==parse_coefs (P, Q->fwd_u, "fwd_u", n))
{
proj_log_error (P, _("missing fwd_u"));
return horner_freeup (P, PROJ_ERR_INVALID_OP_MISSING_ARG);
}
if (0==parse_coefs (P, Q->fwd_v, "fwd_v", n))
{
proj_log_error (P, _("missing fwd_v"));
return horner_freeup (P, PROJ_ERR_INVALID_OP_MISSING_ARG);
}
if (has_inv && 0==parse_coefs (P, Q->inv_u, "inv_u", n))
{
proj_log_error (P, _("missing inv_u"));
return horner_freeup (P, PROJ_ERR_INVALID_OP_MISSING_ARG);
}
if (has_inv && 0==parse_coefs (P, Q->inv_v, "inv_v", n))
{
proj_log_error (P, _("missing inv_v"));
return horner_freeup (P, PROJ_ERR_INVALID_OP_MISSING_ARG);
}
}
if (0==parse_coefs (P, (double *)(Q->fwd_origin), "fwd_origin", 2))
{
proj_log_error (P, _("missing fwd_origin"));
return horner_freeup (P, PROJ_ERR_INVALID_OP_MISSING_ARG);
}
if (has_inv && 0==parse_coefs (P, (double *)(Q->inv_origin), "inv_origin", 2))
{
proj_log_error (P, _("missing inv_origin"));
return horner_freeup (P, PROJ_ERR_INVALID_OP_MISSING_ARG);
}
if (0==parse_coefs (P, &Q->range, "range", 1))
Q->range = 500000;
if (0==parse_coefs (P, &Q->inverse_tolerance, "inv_tolerance", 1))
Q->inverse_tolerance = 0.001;
return P;
}
| 10,589 |
2,406 | #include <ie_core.hpp>
int main() {
using namespace InferenceEngine;
//! [part6]
InferenceEngine::Core core;
auto network = core.ReadNetwork("sample.xml");
auto exeNetwork = core.LoadNetwork(network, "GPU");
std::map<std::string, uint64_t> statistics_map = exeNetwork.GetMetric(GPU_METRIC_KEY(MEMORY_STATISTICS));
//! [part6]
return 0;
}
| 122 |
5,169 | <gh_stars>1000+
{
"name": "WilddogIM",
"version": "0.1.0",
"summary": "Wilddog IM module for iOS",
"description": "The IM module helps you communication with your friends and the world.",
"homepage": "http://www.wilddog.com/",
"license": "Copyright",
"authors": "<NAME>",
"platforms": {
"ios": "8.0"
},
"source": {
"http": "https://cdn.wilddog.com/sdk/ios/0.1.0/WilddogIM.framework-0.1.0.zip"
},
"vendored_frameworks": "WilddogIM.framework",
"xcconfig": {
"OTHER_LDFLAGS": "-ObjC"
},
"dependencies": {
"Wilddog/Sync": [
"~> 2.0.2"
],
"Wilddog/Auth": [
"~> 2.0.2"
]
}
}
| 289 |
1,056 | <reponame>timfel/netbeans
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.modules.java.editor.codegen.ui;
import java.awt.GridBagConstraints;
import java.awt.Insets;
import java.util.List;
import javax.lang.model.element.Element;
import javax.swing.JCheckBox;
import javax.swing.JPanel;
import org.netbeans.api.java.source.ElementHandle;
import org.netbeans.modules.java.editor.codegen.ToStringGenerator;
import org.openide.util.NbBundle;
/**
*
* @author <NAME>
*/
public class ToStringPanel extends JPanel {
private final ElementSelectorPanel elementSelector;
private final JCheckBox useStringBuilderCheckBox = new JCheckBox();
/** Creates new form GetterSetterPanel */
public ToStringPanel(ElementNode.Description description, boolean useStringBuilder, boolean supportsStringBuilder) {
initComponents();
elementSelector = new ElementSelectorPanel(description, false);
java.awt.GridBagConstraints gridBagConstraints = new java.awt.GridBagConstraints();
gridBagConstraints.fill = java.awt.GridBagConstraints.BOTH;
gridBagConstraints.weightx = 1.0;
gridBagConstraints.weighty = 1.0;
gridBagConstraints.insets = new java.awt.Insets(0, 12, 0, 12);
add(elementSelector, gridBagConstraints);
selectorLabel.setText(NbBundle.getMessage(ToStringGenerator.class, "LBL_tostring_select")); //NOI18N
selectorLabel.setLabelFor(elementSelector);
elementSelector.doInitialExpansion(1);
useStringBuilderCheckBox.setSelected(false);
if (supportsStringBuilder) {
useStringBuilderCheckBox.setSelected(useStringBuilder);
org.openide.awt.Mnemonics.setLocalizedText(useStringBuilderCheckBox, NbBundle.getMessage(ToStringGenerator.class, "LBL_tostring_use_stringbuilder")); // NOI18N
GridBagConstraints gbc = new GridBagConstraints();
gbc.fill = GridBagConstraints.BOTH;
gbc.gridx = 0;
gbc.gridy = 2;
gbc.insets = new Insets(0, 12, 0, 12);
gbc.anchor = GridBagConstraints.WEST;
add(useStringBuilderCheckBox, gbc);
}
this.getAccessibleContext().setAccessibleDescription(NbBundle.getMessage(ToStringGenerator.class, "A11Y_Generate_ToString"));
}
public List<ElementHandle<? extends Element>> getVariables() {
return ((ElementSelectorPanel)elementSelector).getSelectedElements();
}
public boolean useStringBuilder() {
return useStringBuilderCheckBox.isSelected();
}
/** This method is called from within the constructor to
* initialize the form.
* WARNING: Do NOT modify this code. The content of this method is
* always regenerated by the Form Editor.
*/
// <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents
private void initComponents() {
java.awt.GridBagConstraints gridBagConstraints;
selectorLabel = new javax.swing.JLabel();
setLayout(new java.awt.GridBagLayout());
gridBagConstraints = new java.awt.GridBagConstraints();
gridBagConstraints.gridwidth = java.awt.GridBagConstraints.REMAINDER;
gridBagConstraints.fill = java.awt.GridBagConstraints.BOTH;
gridBagConstraints.anchor = java.awt.GridBagConstraints.WEST;
gridBagConstraints.insets = new java.awt.Insets(12, 12, 6, 12);
add(selectorLabel, gridBagConstraints);
}// </editor-fold>//GEN-END:initComponents
// Variables declaration - do not modify//GEN-BEGIN:variables
private javax.swing.JLabel selectorLabel;
// End of variables declaration//GEN-END:variables
}
| 1,617 |
441 | <reponame>vhn0912/Finance
import pandas as pd
import numpy as np
import yfinance as yf
import datetime as dt
from pandas_datareader import data as pdr
import statistics
import time
yf.pdr_override()
now = dt.datetime.now()
start =dt.datetime(2019,1,1)
smaUsed=[50,200]
emaUsed=[21]
stock = input('Enter a ticker: ')
position = input('Buy or Short? ')
AvgGain= int(input('Enter Your Average Gain: '))
AvgLoss= int(input('Enter Your Average Loss: '))
if position.lower() == 'buy':
df = pdr.get_data_yahoo(stock, start, now)
close=df["Adj Close"][-1]
maxStop=close*((100-AvgLoss)/100)
Target1R=round(close*((100+AvgGain)/100),2)
Target2R=round(close*(((100+(2*AvgGain))/100)),2)
Target3R=round(close*(((100+(3*AvgGain))/100)),2)
for x in smaUsed:
sma=x
df["SMA_"+str(sma)]=round(df.iloc[:,4].rolling(window=sma).mean(),2)
for x in emaUsed:
ema=x
df['EMA_'+str(ema)] = round(df.iloc[:,4].ewm(span=ema,adjust=False).mean(),2)
sma50=round(df["SMA_50"][-1],2)
sma200=round(df["SMA_200"][-1],2)
ema21=round(df["EMA_21"][-1],2)
low5=round(min(df["Low"].tail(5)),2)
pf50=round(((close/sma50)-1)*100,2)
check50=df["SMA_50"][-1]>maxStop
pf200=round(((close/sma200)-1)*100,2)
check200=((close/df["SMA_200"][-1])-1)*100>100
pf21=round(((close/ema21)-1)*100,2)
check21=df["EMA_21"][-1]>maxStop
pfl=round(((close/low5)-1)*100,2)
checkl=pfl>maxStop
print()
print("Current Stock: "+stock+" Price: "+str(round(close,2)))
print("21 EMA: "+str(ema21)+ " | 50 SMA: "+str(sma50)+ " | 200 SMA: "+str(sma200)+ " | 5 day Low: "+str(low5))
print("-------------------------------------------------")
print("Max Stop: "+str(round(maxStop,2)))
print("Price Targets:")
print("1R: "+str(Target1R))
print("2R: "+str(Target2R))
print("3R: "+str(Target3R))
print("From 5 Day Low "+ str(pfl)+ "% -Within Max Stop: "+str(checkl))
print("From 21 day EMA "+ str(pf21)+ "% -Within Max Stop: "+str(check21))
print("From 50 day SMA "+ str(pf50)+ "% -Within Max Stop: "+str(check50))
print("From 200 Day SMA "+ str(pf200)+ "% -In Danger Zone (Over 100% from 200 SMA): "+str(check200))
print()
elif position.lower() == 'short':
df = pdr.get_data_yahoo(stock, start, now)
close=df["Adj Close"][-1]
maxStop=close*((100+AvgLoss)/100)
Target3R=round(close*(((100-(3*AvgGain))/100)),2)
Target2R=round(close*(((100-(2*AvgGain))/100)),2)
Target1R=round(close*((100-AvgGain)/100),2)
for x in smaUsed:
sma=x
df["SMA_"+str(sma)]=round(df.iloc[:,4].rolling(window=sma).mean(),2)
for x in emaUsed:
ema=x
df['EMA_'+str(ema)] = round(df.iloc[:,4].ewm(span=ema,adjust=False).mean(),2)
sma50=round(df["SMA_50"][-1],2)
sma200=round(df["SMA_200"][-1],2)
ema21=round(df["EMA_21"][-1],2)
low5=round(min(df["Low"].tail(5)),2)
pf50=round(((close/sma50)-1)*100,2)
check50=df["SMA_50"][-1]>maxStop
pf200=round(((close/sma200)-1)*100,2)
check200=((close/df["SMA_200"][-1])-1)*100>100
pf21=round(((close/ema21)-1)*100,2)
check21=df["EMA_21"][-1]>maxStop
pfl=round(((close/low5)-1)*100,2)
checkl=pfl>maxStop
print()
print("Current Stock: "+stock+" Price: "+str(round(close,2)))
print("21 EMA: "+str(ema21)+ " | 50 SMA: "+str(sma50)+ " | 200 SMA: "+str(sma200)+ " | 5 day Low: "+str(low5))
print("-------------------------------------------------")
print("Max Stop: "+str(round(maxStop,2)))
print("Price Targets:")
print("1R: "+str(Target1R))
print("2R: "+str(Target2R))
print("3R: "+str(Target3R))
print("From 5 Day Low "+ str(pfl)+ "% -Within Max Stop: "+str(checkl))
print("From 21 day EMA "+ str(pf21)+ "% -Within Max Stop: "+str(check21))
print("From 50 day SMA "+ str(pf50)+ "% -Within Max Stop: "+str(check50))
print("From 200 Day SMA "+ str(pf200)+ "% -In Danger Zone (Over 100% from 200 SMA): "+str(check200))
print() | 1,896 |
2,039 | package org.nd4j.aeron.ipc;
import org.agrona.DirectBuffer;
import org.junit.Test;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.linalg.factory.Nd4j;
import static org.junit.Assert.assertEquals;
/**
* Created by agibsonccc on 11/6/16.
*/
public class NDArrayMessageTest {
@Test
public void testNDArrayMessageToAndFrom() {
NDArrayMessage message = NDArrayMessage.wholeArrayUpdate(Nd4j.scalar(1.0));
DirectBuffer bufferConvert = NDArrayMessage.toBuffer(message);
bufferConvert.byteBuffer().rewind();
NDArrayMessage newMessage = NDArrayMessage.fromBuffer(bufferConvert, 0);
assertEquals(message, newMessage);
INDArray compressed = Nd4j.getCompressor().compress(Nd4j.scalar(1.0), "GZIP");
NDArrayMessage messageCompressed = NDArrayMessage.wholeArrayUpdate(compressed);
DirectBuffer bufferConvertCompressed = NDArrayMessage.toBuffer(messageCompressed);
NDArrayMessage newMessageTest = NDArrayMessage.fromBuffer(bufferConvertCompressed, 0);
assertEquals(messageCompressed, newMessageTest);
}
}
| 416 |
Subsets and Splits