max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
4,197
# Copyright (C) 2022 Intel Corporation # # SPDX-License-Identifier: MIT import pytest from http import HTTPStatus from .utils.config import server_get class TestGetAnalytics: endpoint = 'analytics/app/kibana' def _test_can_see(self, user): response = server_get(user, self.endpoint) assert response.status_code == HTTPStatus.OK def _test_cannot_see(self, user): response = server_get(user, self.endpoint) assert response.status_code == HTTPStatus.FORBIDDEN @pytest.mark.parametrize('privilege, is_allow', [ ('admin', True), ('business', True), ('worker', False), ('user', False) ]) def test_can_see(self, privilege, is_allow, find_users): user = find_users(privilege=privilege)[0]['username'] if is_allow: self._test_can_see(user) else: self._test_cannot_see(user)
363
375
<gh_stars>100-1000 /* * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER * * Copyright (c) 2016, Juniper Networks, Inc. All rights reserved. * * * The contents of this file are subject to the terms of the BSD 3 clause * License (the "License"). You may not use this file except in compliance * with the License. * * You can obtain a copy of the license at * https://github.com/Juniper/warp17/blob/master/LICENSE. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * File name: * tpg_tests.c * * Description: * Test engine implementation * * Author: * <NAME>, <NAME> * * Initial Created: * 03/19/2015 * * Notes: * */ /***************************************************************************** * Include files ****************************************************************************/ #include "tcp_generator.h" /***************************************************************************** * Globals ****************************************************************************/ /* * Array[port][tcid] holding the test case operational state for * testcases on a port. */ RTE_DEFINE_PER_LCORE(test_case_info_t *, test_case_info); /* * Array[port][tcid] holding the test case config for * testcases on a port. */ RTE_DEFINE_PER_LCORE(test_case_init_msg_t *, test_case_cfg); typedef struct test_stats_s { tpg_gen_stats_t ts_gen_stats; tpg_rate_stats_t ts_rate_stats; tpg_app_stats_t ts_app_stats; } test_stats_t; /* * Array[port][tcid] holding the test case stats (gen, rate, app) for * testcases on a port. */ RTE_DEFINE_PER_LCORE(test_stats_t *, test_case_stats); #define TEST_GET_STATS(port, tcid) \ (&RTE_PER_LCORE(test_case_stats)[(port) * TPG_TEST_MAX_ENTRIES + (tcid)]) #define TEST_GET_GEN_STATS(port, tcid) \ (&TEST_GET_STATS((port), (tcid))->ts_gen_stats) #define TEST_GET_RATE_STATS(port, tcid) \ (&TEST_GET_STATS((port), (tcid))->ts_rate_stats) #define TEST_GET_APP_STATS(port, tcid) \ (&TEST_GET_STATS((port), (tcid))->ts_app_stats) /* * Array[port][tcid] holding the test case latency operational state for * testcases on a port. */ RTE_DEFINE_PER_LCORE(test_oper_latency_state_t *, test_case_latency_state); #define TEST_GET_LATENCY_STATE(port, tcid) \ (RTE_PER_LCORE(test_case_latency_state) + \ (port) * TPG_TEST_MAX_ENTRIES + (tcid)) /* * Pool of messages/tmr args to be used for running client tests (TCP/UDP). * We never need more than one message per port + test case + op. * Array[testcase][port]. */ RTE_DEFINE_PER_LCORE(test_run_msgpool_t *, test_open_msgpool); RTE_DEFINE_PER_LCORE(test_run_msgpool_t *, test_close_msgpool); RTE_DEFINE_PER_LCORE(test_run_msgpool_t *, test_send_msgpool); #define TEST_GET_MSG_PTR(msgpool, port, tcid) \ ((msgpool) + (port) * TPG_TEST_MAX_ENTRIES + (tcid)) static RTE_DEFINE_PER_LCORE(test_tmr_arg_t *, test_tmr_open_args); static RTE_DEFINE_PER_LCORE(test_tmr_arg_t *, test_tmr_close_args); static RTE_DEFINE_PER_LCORE(test_tmr_arg_t *, test_tmr_send_args); #define TEST_GET_TMR_ARG(type, port, tcid) \ (RTE_PER_LCORE(test_tmr_##type##_args) + \ (port) * TPG_TEST_MAX_ENTRIES + (tcid)) /* * Per test-type and protocol callbacks. * TODO: ideally this should be dynamic and allow registration of other * protocols. */ static int test_case_tcp_client_open(l4_control_block_t *l4_cb); static uint32_t test_case_tcp_mtu(l4_control_block_t *l4_cb); static int test_case_tcp_send(l4_control_block_t *l4_cb, struct rte_mbuf *data_mbuf, uint32_t *data_sent); static void test_case_tcp_close(l4_control_block_t *l4_cb); static void test_case_tcp_purge(l4_control_block_t *l4_cb); static int test_case_udp_client_open(l4_control_block_t *l4_cb); static uint32_t test_case_udp_mtu(l4_control_block_t *l4_cb); static int test_case_udp_send(l4_control_block_t *l4_cb, struct rte_mbuf *data_mbuf, uint32_t *data_sent); static void test_case_udp_close(l4_control_block_t *l4_cb); static void test_case_udp_purge(l4_control_block_t *l4_cb); static struct { test_case_client_open_cb_t open; test_case_client_close_cb_t close; test_case_session_mtu_cb_t mtu; test_case_session_send_cb_t send; test_case_session_close_cb_t sess_close; test_case_session_purge_cb_t sess_purge; test_case_htable_walk_cb_t sess_htable_walk; } test_callbacks[TEST_CASE_TYPE__MAX][L4_PROTO__L4_PROTO_MAX] = { [TEST_CASE_TYPE__SERVER][L4_PROTO__TCP] = { .open = NULL, .close = NULL, .mtu = test_case_tcp_mtu, .send = test_case_tcp_send, .sess_close = test_case_tcp_close, .sess_purge = test_case_tcp_purge, .sess_htable_walk = tlkp_walk_tcb, }, [TEST_CASE_TYPE__SERVER][L4_PROTO__UDP] = { .open = NULL, .close = NULL, .mtu = test_case_udp_mtu, .send = test_case_udp_send, .sess_close = test_case_udp_close, .sess_purge = test_case_udp_purge, .sess_htable_walk = tlkp_walk_ucb, }, [TEST_CASE_TYPE__CLIENT][L4_PROTO__TCP] = { .open = test_case_tcp_client_open, .close = test_case_tcp_close, .mtu = test_case_tcp_mtu, .send = test_case_tcp_send, .sess_close = test_case_tcp_close, .sess_purge = test_case_tcp_purge, .sess_htable_walk = tlkp_walk_tcb, }, [TEST_CASE_TYPE__CLIENT][L4_PROTO__UDP] = { .open = test_case_udp_client_open, .close = test_case_udp_close, .mtu = test_case_udp_mtu, .send = test_case_udp_send, .sess_close = test_case_udp_close, .sess_purge = test_case_udp_purge, .sess_htable_walk = tlkp_walk_ucb, }, }; /***************************************************************************** * Forward declarations ****************************************************************************/ static void test_update_recent_latency_stats(tpg_latency_stats_t *stats, test_oper_latency_state_t *buffer, tpg_test_case_latency_t *tc_latency); static void test_case_latency_init(test_case_info_t *tc_info); /***************************************************************************** * test_update_cksum_tstamp() * this function uses incremental checksum from RFC1624 in order to update * the checksum (Ipv4/TCP/UDP) after the timstamp is added * https://tools.ietf.org/html/rfc1624 ****************************************************************************/ static void test_update_cksum_tstamp(struct rte_mbuf *mbuf __rte_unused, struct rte_mbuf *mbuf_seg, uint32_t offset, uint32_t size) { uint16_t *tstamp; uint16_t *cksum_ptr; uint16_t cksum; uint32_t cksum_32; uint32_t offset_ck; offset_ck = DATA_GET_CKSUM_OFFSET(mbuf); /* Offset checksum works only with TPG_SW_CHECKSUMMING enabled! */ if (!offset_ck) return; cksum_ptr = (uint16_t *) data_mbuf_mtod_offset(mbuf, offset_ck); tstamp = (uint16_t *) data_mbuf_mtod_offset(mbuf_seg, offset); /* This is needed otherwise the checksum calc won't work */ cksum = ~(*cksum_ptr) & 0xFFFF; /* WARNING: those functions are private functions from dpdk library, they * may change in future!!! */ cksum_32 = __rte_raw_cksum(tstamp, size, cksum); cksum = __rte_raw_cksum_reduce(cksum_32); cksum = (cksum == 0xFFFF) ? cksum : ~cksum; *cksum_ptr = cksum; } /***************************************************************************** * Client and server config control block walk functions ****************************************************************************/ /* * Callback definition to be used when walking control block configs (both * clients and servers). */ typedef void (*test_walk_cfg_cb_t)(uint32_t lcore, uint32_t eth_port, uint32_t test_case_id, uint32_t src_ip, uint32_t dst_ip, uint16_t src_port, uint16_t dst_port, uint32_t conn_hash, void *arg); /***************************************************************************** * test_case_for_each_client() * Notes: walks the list of client control blocks from a given config. * Only core-local clients are processed. ****************************************************************************/ static void test_case_for_each_client(uint32_t lcore, const test_case_init_msg_t *cfg, test_walk_cfg_cb_t callback, void *callback_arg) { uint32_t eth_port; uint32_t tc_id; uint32_t src_ip, dst_ip; uint16_t src_port, dst_port; uint32_t conn_hash; uint32_t rx_queue_id; const tpg_client_t *client_cfg; eth_port = cfg->tcim_test_case.tc_eth_port; tc_id = cfg->tcim_test_case.tc_id; rx_queue_id = port_get_rx_queue_id(lcore, eth_port); client_cfg = &cfg->tcim_test_case.tc_client; TPG_FOREACH_CB_IN_RANGE(&client_cfg->cl_src_ips, &client_cfg->cl_dst_ips, &client_cfg->cl_l4.l4c_tcp_udp.tuc_sports, &client_cfg->cl_l4.l4c_tcp_udp.tuc_dports, src_ip, dst_ip, src_port, dst_port) { conn_hash = tlkp_calc_connection_hash(dst_ip, src_ip, dst_port, src_port); if (tlkp_get_qindex_from_hash(conn_hash, eth_port) != rx_queue_id) continue; callback(lcore, eth_port, tc_id, src_ip, dst_ip, src_port, dst_port, conn_hash, callback_arg); } } /***************************************************************************** * test_case_client_cfg_count() * Notes: returns the total number of client connections that would be * generated from a given config. ****************************************************************************/ static uint32_t test_case_client_cfg_count(const tpg_client_t *client_cfg) { return TPG_IPV4_RANGE_SIZE(&client_cfg->cl_src_ips) * TPG_IPV4_RANGE_SIZE(&client_cfg->cl_dst_ips) * TPG_PORT_RANGE_SIZE(&client_cfg->cl_l4.l4c_tcp_udp.tuc_sports) * TPG_PORT_RANGE_SIZE(&client_cfg->cl_l4.l4c_tcp_udp.tuc_dports); } /***************************************************************************** * test_case_for_each_server() * Notes: walks the list of server control blocks from a given config. ****************************************************************************/ static void test_case_for_each_server(uint32_t lcore, const test_case_init_msg_t *cfg, test_walk_cfg_cb_t callback, void *callback_arg) { uint32_t eth_port; uint32_t tc_id; uint32_t src_ip; uint16_t src_port; const tpg_server_t *server_cfg; eth_port = cfg->tcim_test_case.tc_eth_port; tc_id = cfg->tcim_test_case.tc_id; server_cfg = &cfg->tcim_test_case.tc_server; TPG_IPV4_FOREACH(&server_cfg->srv_ips, src_ip) { TPG_PORT_FOREACH(&server_cfg->srv_l4.l4s_tcp_udp.tus_ports, src_port) { callback(lcore, eth_port, tc_id, src_ip, 0, src_port, 0, 0, callback_arg); } } } /***************************************************************************** * test_case_server_cfg_count() * Notes: returns the total number of server listeners that would be * generated from a given config. ****************************************************************************/ static uint32_t test_case_server_cfg_count(const tpg_server_t *server_cfg) { return TPG_IPV4_RANGE_SIZE(&server_cfg->srv_ips) * TPG_PORT_RANGE_SIZE(&server_cfg->srv_l4.l4s_tcp_udp.tus_ports); } /***************************************************************************** * test_latency_state_init ****************************************************************************/ static void test_latency_state_init(test_oper_latency_state_t *buffer, uint32_t len) { bzero(buffer, sizeof(*buffer)); buffer->tols_length = len; } /***************************************************************************** * test_latency_state_add() ****************************************************************************/ static void test_latency_state_add(test_oper_latency_state_t *buffer, uint64_t tstamp, tpg_latency_stats_t *sample_stats) { uint32_t position; position = (buffer->tols_actual_length + buffer->tols_start_index) % buffer->tols_length; if (buffer->tols_actual_length + 1 <= buffer->tols_length) { buffer->tols_timestamps[position] = tstamp; buffer->tols_actual_length++; } else { uint64_t retain_tstamp; retain_tstamp = buffer->tols_timestamps[position]; buffer->tols_timestamps[position] = tstamp; sample_stats->ls_sum_latency -= retain_tstamp; sample_stats->ls_samples_count--; } } /***************************************************************************** * test_update_latency_stats() ****************************************************************************/ static void test_update_latency_stats(tpg_latency_stats_t *stats, uint64_t latency, tpg_test_case_latency_t *tci_latency) { int64_t avg = 0; if (stats->ls_samples_count > 0) { avg = stats->ls_sum_latency / stats->ls_samples_count; stats->ls_instant_jitter = (avg >= (int) latency) ? (avg - latency) : (latency - avg); stats->ls_sum_jitter += stats->ls_instant_jitter; } if (tci_latency->has_tcs_max) { if (latency > tci_latency->tcs_max) INC_STATS(stats, ls_max_exceeded); } if (tci_latency->has_tcs_max_avg) { if (avg > tci_latency->tcs_max_avg) INC_STATS(stats, ls_max_average_exceeded); } stats->ls_samples_count++; stats->ls_sum_latency += latency; if (latency < stats->ls_min_latency) stats->ls_min_latency = latency; if (latency > stats->ls_max_latency) stats->ls_max_latency = latency; } /***************************************************************************** * test_update_latency() ****************************************************************************/ void test_update_latency(l4_control_block_t *l4_cb, uint64_t sent_tstamp, uint64_t rcv_tstamp) { test_case_info_t *tc_info; tpg_gen_latency_stats_t *stats; int64_t latency; tc_info = TEST_GET_INFO(l4_cb->l4cb_interface, l4_cb->l4cb_test_case_id); stats = &tc_info->tci_gen_stats->gs_latency_stats; if (rcv_tstamp < sent_tstamp || sent_tstamp == 0 || rcv_tstamp == 0) { INC_STATS(stats, gls_invalid_lat); return; } latency = rcv_tstamp - sent_tstamp; /* Global stats */ test_update_latency_stats(&stats->gls_stats, latency, &tc_info->tci_cfg->tcim_test_case.tc_latency); /* Recent stats */ if (tc_info->tci_latency_state->tols_length != 0) { test_latency_state_add(tc_info->tci_latency_state, latency, &stats->gls_sample_stats); } } /***************************************************************************** * test_case_run_msg() ****************************************************************************/ int test_case_run_msg(uint32_t lcore_id, uint32_t eth_port, uint32_t test_case_id, test_run_msgpool_t *msgpool, test_run_msg_type_t msg_type) { int error; msg_t *msgp; msgp = &TEST_GET_MSG_PTR(msgpool, eth_port, test_case_id)->msg; msg_init(msgp, msg_type, lcore_id, MSG_FLAG_LOCAL); MSG_INNER(test_case_run_msg_t, msgp)->tcrm_eth_port = eth_port; MSG_INNER(test_case_run_msg_t, msgp)->tcrm_test_case_id = test_case_id; /* Send the message and forget about it. */ error = msg_send_local(msgp, MSG_SND_FLAG_NOWAIT); if (unlikely(error != 0)) { RTE_LOG(ERR, USER1, "[%d:%s()] Failed to send RUN message: tcid=%"PRIu32" %s(%d)\n", rte_lcore_index(lcore_id), __func__, test_case_id, rte_strerror(-error), -error); } return 0; } /***************************************************************************** * test_case_tmr_cb() ****************************************************************************/ static void test_case_tmr_cb(struct rte_timer *tmr __rte_unused, void *arg) { test_tmr_arg_t *tmr_arg = arg; test_rate_state_t *rate_state = tmr_arg->tta_rate_state; rate_limit_t *rate_limit = tmr_arg->tta_rate_limit; uint32_t in_progress_flag = tmr_arg->tta_rate_in_progress_flag; uint32_t reached_flag = tmr_arg->tta_rate_reached_flag; test_run_msgpool_t *msg_pool = tmr_arg->tta_run_msg_pool; /* We step into a new time interval... "Advance" the rates. */ rate_limit_advance_interval(rate_limit); /* Start from scratch (reset the "reached" flag).. */ rate_state->trs_flags &= ~reached_flag; test_resched_runner(rate_state, tmr_arg->tta_eth_port, tmr_arg->tta_test_case_id, in_progress_flag, reached_flag, msg_pool, tmr_arg->tta_run_msg_type); } /***************************************************************************** * Test notifications. ****************************************************************************/ /***************************************************************************** * test_sess_init() ****************************************************************************/ static void test_sess_init(l4_control_block_t *l4_cb, test_case_info_t *tc_info) { tpg_app_proto_t app_id = l4_cb->l4cb_app_data.ad_type; /* Struct copy the shared storage. */ l4_cb->l4cb_app_data.ad_storage = tc_info->tci_app_storage; /* Initialize the application state. */ APP_CALL(init, app_id)(&l4_cb->l4cb_app_data, &tc_info->tci_cfg->tcim_test_case.tc_app); /* Initialize the test state machine for the new TCB. */ test_sm_client_initialize(l4_cb, tc_info); } /***************************************************************************** * test_sess_record_start_time() ****************************************************************************/ static void test_sess_record_start_time(test_case_info_t *tc_info) { if (unlikely(tc_info->tci_gen_stats->gs_start_time == 0)) tc_info->tci_gen_stats->gs_start_time = rte_get_timer_cycles(); } /***************************************************************************** * test_sess_record_end_time() ****************************************************************************/ static void test_sess_record_end_time(test_case_info_t *tc_info) { /* * This is not really the end time.. but we can keep it * here for now for tracking how long it took to establish sessions. */ tc_info->tci_gen_stats->gs_end_time = rte_get_timer_cycles(); } /***************************************************************************** * test_sess_connecting() ****************************************************************************/ static void test_sess_connecting(l4_control_block_t *l4_cb, test_case_info_t *tc_info) { test_sess_record_start_time(tc_info); test_sm_sess_connecting(l4_cb, tc_info); } /***************************************************************************** * test_sess_connected() ****************************************************************************/ static void test_sess_connected(l4_control_block_t *l4_cb, test_case_info_t *tc_info) { tc_info->tci_gen_stats->gs_estab++; /* Update rate per second. */ tc_info->tci_rate_stats->rs_estab_per_s++; test_sess_record_end_time(tc_info); test_sm_sess_connected(l4_cb, tc_info); } /***************************************************************************** * test_sess_connected_imm() ****************************************************************************/ static void test_sess_connected_imm(l4_control_block_t *l4_cb, test_case_info_t *tc_info) { /* We skipped "connecting" so let's record the start time. */ test_sess_record_start_time(tc_info); test_sess_connected(l4_cb, tc_info); } /***************************************************************************** * test_sess_listen() ****************************************************************************/ static void test_sess_listen(l4_control_block_t *l4_cb, test_case_info_t *tc_info) { tpg_app_proto_t app_id = l4_cb->l4cb_app_data.ad_type; /* Struct copy the shared storage. */ l4_cb->l4cb_app_data.ad_storage = tc_info->tci_app_storage; /* Initialize the application state. */ APP_CALL(init, app_id)(&l4_cb->l4cb_app_data, &tc_info->tci_cfg->tcim_test_case.tc_app); /* Initialize the test state machine for the listening TCB. */ test_sm_listen_initialize(l4_cb, tc_info); } /***************************************************************************** * test_sess_server_connected() ****************************************************************************/ static void test_sess_server_connected(l4_control_block_t *l4_cb, test_case_info_t *tc_info) { tc_info->tci_gen_stats->gs_estab++; /* Update rate per second. */ tc_info->tci_rate_stats->rs_estab_per_s++; /* Initialize the test state machine for the new CB. */ test_sm_server_initialize(l4_cb, tc_info); } /***************************************************************************** * test_sess_closing() ****************************************************************************/ static void test_sess_closing(l4_control_block_t *l4_cb, test_case_info_t *tc_info) { test_sm_sess_closing(l4_cb, tc_info); } /***************************************************************************** * test_sess_closed() ****************************************************************************/ static void test_sess_closed(l4_control_block_t *l4_cb, test_case_info_t *tc_info) { /* Update rate per second. */ tc_info->tci_rate_stats->rs_closed_per_s++; test_sm_sess_closed(l4_cb, tc_info); } /***************************************************************************** * test_sess_win_available() ****************************************************************************/ static void test_sess_win_available(l4_control_block_t *l4_cb, test_case_info_t *tc_info) { test_sm_app_send_win_avail(l4_cb, tc_info); } /***************************************************************************** * test_sess_win_unavailable() ****************************************************************************/ static void test_sess_win_unavailable(l4_control_block_t *l4_cb, test_case_info_t *tc_info) { test_sm_app_send_win_unavail(l4_cb, tc_info); } /***************************************************************************** * Static functions for Rate Limiting engine initialization/start/stop. ****************************************************************************/ /***************************************************************************** * test_case_rate_init() * NOTES: scales down the rate limit based on the percentage of sessions * actually running on this core. However, if rate limiting is unlimited * (TPG_RATE_LIM_INFINITE_VAL) there's no need to scale down. ****************************************************************************/ static void test_case_rate_init(const char *rl_name, rate_limit_t *rl, rate_limit_cfg_t *rate_cfg, uint32_t lcore, uint32_t eth_port, uint32_t max_burst, uint32_t total_sessions, uint32_t local_sessions) { uint32_t target_rate = rate_cfg->rlc_target; uint32_t core_count = PORT_QCNT(eth_port); int err; if (target_rate != TPG_RATE_LIM_INFINITE_VAL && total_sessions != 0) target_rate = (uint64_t)local_sessions * target_rate / total_sessions; err = rate_limit_init(rl, rate_cfg, lcore, core_count, target_rate, max_burst); if (unlikely(err != 0)) { /* Unfortunately we can't do much here.. Just log the error for the * user. Rate limiting will stay 0 so no operations of the * corresponding type (i.e., open/close/send) will be performed. */ RTE_LOG(ERR, USER1, "[%d:%s()] Failed to initialize %s rate limit: %d(%s)\n", rte_lcore_index(lcore), __func__, rl_name, -err, rte_strerror(-err)); } } /***************************************************************************** * test_case_rate_zero() * NOTES: Change the current rate to zero (i.e., free & init). ****************************************************************************/ static void test_case_rate_zero(const char *rl_name, rate_limit_t *rl, uint32_t lcore, uint32_t eth_port) { rate_limit_cfg_t zero_rate_cfg = RATE_CFG_ZERO(); rate_limit_free(rl); /* TODO: We assume that a zero rate will not allocate any memory inside the * rate object.. */ test_case_rate_init(rl_name, rl, &zero_rate_cfg, lcore, eth_port, 0, 0, 0); } /***************************************************************************** * test_case_rate_start_timer() ****************************************************************************/ static void test_case_rate_start_timer(struct rte_timer *tmr, test_tmr_arg_t *tmr_arg, rate_limit_t *rl, uint32_t lcore_id) { /* No need to start a periodic timer if rate-limiting is set to 0. */ if (!rate_limit_interval_us(rl)) return; rte_timer_reset(tmr, rate_limit_interval_us(rl) * cycles_per_us, PERIODICAL, lcore_id, test_case_tmr_cb, tmr_arg); } /***************************************************************************** * test_case_rate_state_init() ****************************************************************************/ static void test_case_rate_state_init(uint32_t lcore, uint32_t eth_port, uint32_t test_case_id __rte_unused, test_oper_state_t *test_state, test_rate_timers_t *rate_timers, test_case_init_msg_t *im, uint32_t total_sessions, uint32_t local_sessions) { test_rate_state_t *rate_state = &test_state->tos_rates; uint32_t max_burst = 0; switch (im->tcim_l4_type) { case L4_PROTO__TCP: max_burst = GCFG_TCP_CLIENT_BURST_MAX; break; case L4_PROTO__UDP: max_burst = GCFG_UDP_CLIENT_BURST_MAX; break; default: assert(false); break; } /* Initialize open/close/send timers. */ rte_timer_init(&rate_timers->trt_open_timer); rte_timer_init(&rate_timers->trt_close_timer); rte_timer_init(&rate_timers->trt_send_timer); /* Initialize open/close/send rate limiter states. */ rate_state->trs_flags = 0; /* WARNING: It's safe to use the values from im->tcim_transient here as * long as this function is called only from the MSG_TEST_CASE_INIT * callback! */ test_case_rate_init("open", &rate_state->trs_open, im->tcim_transient.open_rate, lcore, eth_port, max_burst, total_sessions, local_sessions); test_case_rate_init("close", &rate_state->trs_close, im->tcim_transient.close_rate, lcore, eth_port, max_burst, total_sessions, local_sessions); test_case_rate_init("send", &rate_state->trs_send, im->tcim_transient.send_rate, lcore, eth_port, max_burst, total_sessions, local_sessions); } /***************************************************************************** * test_case_rate_state_start() ****************************************************************************/ static void test_case_rate_state_start(uint32_t lcore, uint32_t eth_port, uint32_t test_case_id, test_oper_state_t *test_state, test_rate_timers_t *rate_timers) { test_rate_state_t *rate_state = &test_state->tos_rates; test_tmr_arg_t *tmr_open_arg; test_tmr_arg_t *tmr_close_arg; test_tmr_arg_t *tmr_send_arg; tmr_open_arg = TEST_GET_TMR_ARG(open, eth_port, test_case_id); tmr_close_arg = TEST_GET_TMR_ARG(close, eth_port, test_case_id); tmr_send_arg = TEST_GET_TMR_ARG(send, eth_port, test_case_id); *tmr_open_arg = (test_tmr_arg_t) { .tta_lcore_id = lcore, .tta_eth_port = eth_port, .tta_test_case_id = test_case_id, .tta_rate_state = rate_state, .tta_rate_limit = &rate_state->trs_open, .tta_rate_in_progress_flag = TRS_FLAGS_OPEN_IN_PROGRESS, .tta_rate_reached_flag = TRS_FLAGS_OPEN_RATE_REACHED, .tta_run_msg_pool = RTE_PER_LCORE(test_open_msgpool), .tta_run_msg_type = TRMT_OPEN, }; *tmr_close_arg = (test_tmr_arg_t) { .tta_lcore_id = lcore, .tta_eth_port = eth_port, .tta_test_case_id = test_case_id, .tta_rate_state = rate_state, .tta_rate_limit = &rate_state->trs_close, .tta_rate_in_progress_flag = TRS_FLAGS_CLOSE_IN_PROGRESS, .tta_rate_reached_flag = TRS_FLAGS_CLOSE_RATE_REACHED, .tta_run_msg_pool = RTE_PER_LCORE(test_close_msgpool), .tta_run_msg_type = TRMT_CLOSE, }; *tmr_send_arg = (test_tmr_arg_t) { .tta_lcore_id = lcore, .tta_eth_port = eth_port, .tta_test_case_id = test_case_id, .tta_rate_state = rate_state, .tta_rate_limit = &rate_state->trs_send, .tta_rate_in_progress_flag = TRS_FLAGS_SEND_IN_PROGRESS, .tta_rate_reached_flag = TRS_FLAGS_SEND_RATE_REACHED, .tta_run_msg_pool = RTE_PER_LCORE(test_send_msgpool), .tta_run_msg_type = TRMT_SEND, }; test_case_rate_start_timer(&rate_timers->trt_open_timer, tmr_open_arg, &rate_state->trs_open, lcore); test_case_rate_start_timer(&rate_timers->trt_close_timer, tmr_close_arg, &rate_state->trs_close, lcore); test_case_rate_start_timer(&rate_timers->trt_send_timer, tmr_send_arg, &rate_state->trs_send, lcore); } /***************************************************************************** * test_case_rate_state_stop() ****************************************************************************/ static void test_case_rate_state_stop(uint32_t lcore, uint32_t eth_port, uint32_t test_case_id __rte_unused, test_oper_state_t *test_state, test_rate_timers_t *rate_timers) { test_rate_state_t *rate_state = &test_state->tos_rates; /* Change the target rates to 0 so we don't open/close/anymore. * Cancel the open/close/send timers. */ rte_timer_stop(&rate_timers->trt_open_timer); rte_timer_stop(&rate_timers->trt_close_timer); rte_timer_stop(&rate_timers->trt_send_timer); test_case_rate_zero("open-zero", &rate_state->trs_open, lcore, eth_port); test_case_rate_zero("close-zero", &rate_state->trs_close, lcore, eth_port); test_case_rate_zero("send-zero", &rate_state->trs_send, lcore, eth_port); } /***************************************************************************** * test_case_rate_state_running() * Notes: return true if there's any rate limiting message in progress. ****************************************************************************/ static bool test_case_rate_state_running(test_oper_state_t *test_state) { test_rate_state_t *rate_state = &test_state->tos_rates; return rate_state->trs_flags & (TRS_FLAGS_OPEN_IN_PROGRESS | TRS_FLAGS_CLOSE_IN_PROGRESS | TRS_FLAGS_SEND_IN_PROGRESS); } /***************************************************************************** * Static functions for Initializing test cases. ****************************************************************************/ /***************************************************************************** * test_case_init_state_client_counters_cb() ****************************************************************************/ static void test_case_init_state_client_counters_cb(uint32_t lcore __rte_unused, uint32_t eth_port __rte_unused, uint32_t test_case_id __rte_unused, uint32_t src_ip __rte_unused, uint32_t dst_ip __rte_unused, uint16_t src_port __rte_unused, uint16_t dst_port __rte_unused, uint32_t conn_hash __rte_unused, void *arg) { uint32_t *local_sessions = arg; (*local_sessions)++; } /***************************************************************************** * test_case_init_state() ****************************************************************************/ static void test_case_init_state(uint32_t lcore, test_case_init_msg_t *im, test_oper_state_t *ts, test_case_client_open_cb_t client_open_cb, test_case_client_close_cb_t client_close_cb, test_case_session_mtu_cb_t mtu_cb, test_case_session_send_cb_t send_cb, test_case_session_close_cb_t close_cb, test_rate_timers_t *rate_timers) { uint32_t total_sessions = 0; uint32_t local_sessions = 0; TEST_CBQ_INIT(&ts->tos_to_init_cbs); TEST_CBQ_INIT(&ts->tos_to_open_cbs); TEST_CBQ_INIT(&ts->tos_to_close_cbs); TEST_CBQ_INIT(&ts->tos_to_send_cbs); TEST_CBQ_INIT(&ts->tos_closed_cbs); /* Initialize the rates based on the percentage of clients running on * this core. */ switch (im->tcim_test_case.tc_type) { case TEST_CASE_TYPE__CLIENT: /* Get local and total session count. Unfortunately there's no other * way to compute the number of local sessions than to walk the list.. */ test_case_for_each_client(lcore, im, test_case_init_state_client_counters_cb, &local_sessions); total_sessions = test_case_client_cfg_count(&im->tcim_test_case.tc_client); break; case TEST_CASE_TYPE__SERVER: /* We know that servers are created on all lcores * (i.e., local == total). */ local_sessions = test_case_server_cfg_count(&im->tcim_test_case.tc_server); total_sessions = test_case_server_cfg_count(&im->tcim_test_case.tc_server); break; default: assert(false); break; } /* Initialize the rates. */ test_case_rate_state_init(lcore, im->tcim_test_case.tc_eth_port, im->tcim_test_case.tc_id, ts, rate_timers, im, total_sessions, local_sessions); /* Initialize the session callbacks. */ ts->tos_client_open_cb = client_open_cb; ts->tos_client_close_cb = client_close_cb; ts->tos_session_mtu_cb = mtu_cb; ts->tos_session_send_cb = send_cb; ts->tos_session_close_cb = close_cb; } /***************************************************************************** * test_case_start_tcp_server() ****************************************************************************/ static void test_case_start_tcp_server(uint32_t lcore __rte_unused, uint32_t eth_port, uint32_t test_case_id, uint32_t src_ip, uint32_t dst_ip __rte_unused, uint16_t src_port, uint16_t dst_port __rte_unused, uint32_t conn_hash __rte_unused, void *arg) { tcp_control_block_t *server_tcb; test_case_info_t *tc_info; tpg_app_proto_t app_id; sockopt_t *sockopt; int error; tc_info = arg; app_id = tc_info->tci_cfg->tcim_test_case.tc_app.app_proto; sockopt = &tc_info->tci_cfg->tcim_sockopt; /* We need to pass NULL in order for tcp_listen_v4 to allocate one * for us. */ server_tcb = NULL; /* Listen on the specified address + port. */ error = tcp_listen_v4(&server_tcb, eth_port, src_ip, src_port, test_case_id, app_id, sockopt, TCG_CB_CONSUME_ALL_DATA); if (unlikely(error)) { test_notification(TEST_NOTIF_SESS_FAILED, NULL, eth_port, test_case_id); } else { test_notification(TEST_NOTIF_SESS_UP, NULL, eth_port, test_case_id); } } /***************************************************************************** * test_case_start_udp_server() ****************************************************************************/ static void test_case_start_udp_server(uint32_t lcore __rte_unused, uint32_t eth_port, uint32_t test_case_id, uint32_t src_ip, uint32_t dst_ip __rte_unused, uint16_t src_port, uint16_t dst_port __rte_unused, uint32_t conn_hash __rte_unused, void *arg) { udp_control_block_t *server_ucb; test_case_info_t *tc_info; tpg_app_proto_t app_id; sockopt_t *sockopt; int error; tc_info = arg; app_id = tc_info->tci_cfg->tcim_test_case.tc_app.app_proto; sockopt = &tc_info->tci_cfg->tcim_sockopt; /* We need to pass NULL in order for udp_listen_v4 to allocate one * for us. */ server_ucb = NULL; /* Listen on the first specified address + port. */ error = udp_listen_v4(&server_ucb, eth_port, src_ip, src_port, test_case_id, app_id, sockopt, 0); if (unlikely(error)) { test_notification(TEST_NOTIF_SESS_FAILED, NULL, eth_port, test_case_id); } else { test_notification(TEST_NOTIF_SESS_UP, NULL, eth_port, test_case_id); } } /***************************************************************************** * test_case_start_tcp_client() ****************************************************************************/ static void test_case_start_tcp_client(uint32_t lcore, uint32_t eth_port, uint32_t test_case_id, uint32_t src_ip, uint32_t dst_ip, uint16_t src_port, uint16_t dst_port, uint32_t conn_hash, void *arg) { tcp_control_block_t *tcb; test_case_info_t *tc_info; tpg_app_proto_t app_id; sockopt_t *sockopt; tc_info = arg; app_id = tc_info->tci_cfg->tcim_test_case.tc_app.app_proto; sockopt = &tc_info->tci_cfg->tcim_sockopt; tcb = tlkp_alloc_tcb(); if (unlikely(tcb == NULL)) { RTE_LOG(ERR, USER1, "[%d:%s()] Failed to allocate TCB.\n", rte_lcore_index(lcore), __func__); return; } tlkp_init_tcb_client(tcb, src_ip, dst_ip, src_port, dst_port, conn_hash, eth_port, test_case_id, app_id, sockopt, (TPG_CB_USE_L4_HASH_FLAG | TCG_CB_CONSUME_ALL_DATA)); test_sess_init(&tcb->tcb_l4, tc_info); } /***************************************************************************** * test_case_start_udp_client() ****************************************************************************/ static void test_case_start_udp_client(uint32_t lcore, uint32_t eth_port, uint32_t test_case_id, uint32_t src_ip, uint32_t dst_ip, uint16_t src_port, uint16_t dst_port, uint32_t conn_hash, void *arg) { udp_control_block_t *ucb; test_case_info_t *tc_info; tpg_app_proto_t app_id; sockopt_t *sockopt; tc_info = arg; app_id = tc_info->tci_cfg->tcim_test_case.tc_app.app_proto; sockopt = &tc_info->tci_cfg->tcim_sockopt; ucb = tlkp_alloc_ucb(); if (unlikely(ucb == NULL)) { RTE_LOG(ERR, USER1, "[%d:%s()] Failed to allocate UCB.\n", rte_lcore_index(lcore), __func__); return; } tlkp_init_ucb_client(ucb, src_ip, dst_ip, src_port, dst_port, conn_hash, eth_port, test_case_id, app_id, sockopt, (TPG_CB_USE_L4_HASH_FLAG | 0)); test_sess_init(&ucb->ucb_l4, tc_info); } /***************************************************************************** * Static functions for Running test cases. ****************************************************************************/ /***************************************************************************** * test_case_tcp_client_open() ****************************************************************************/ static int test_case_tcp_client_open(l4_control_block_t *l4_cb) { tcp_control_block_t *tcb = container_of(l4_cb, tcp_control_block_t, tcb_l4); return tcp_open_v4_connection(&tcb, tcb->tcb_l4.l4cb_interface, tcb->tcb_l4.l4cb_src_addr.ip_v4, tcb->tcb_l4.l4cb_src_port, tcb->tcb_l4.l4cb_dst_addr.ip_v4, tcb->tcb_l4.l4cb_dst_port, tcb->tcb_l4.l4cb_test_case_id, tcb->tcb_l4.l4cb_app_data.ad_type, NULL, TCG_CB_REUSE_CB); } /***************************************************************************** * test_case_tcp_mtu() ****************************************************************************/ static uint32_t test_case_tcp_mtu(l4_control_block_t *l4_cb) { return TCB_AVAIL_SEND(container_of(l4_cb, tcp_control_block_t, tcb_l4)); } /***************************************************************************** * test_case_tcp_send() ****************************************************************************/ static int test_case_tcp_send(l4_control_block_t *l4_cb, struct rte_mbuf *data_mbuf, uint32_t *data_sent) { tcp_control_block_t *tcb = container_of(l4_cb, tcp_control_block_t, tcb_l4); return tcp_send_v4(tcb, data_mbuf, TCG_SEND_PSH, 0 /* Timeout */, data_sent); } /***************************************************************************** * test_case_tcp_close() ****************************************************************************/ static void test_case_tcp_close(l4_control_block_t *l4_cb) { tcp_close_connection(container_of(l4_cb, tcp_control_block_t, tcb_l4), 0); } /***************************************************************************** * test_case_udp_client_open() ****************************************************************************/ static int test_case_udp_client_open(l4_control_block_t *l4_cb) { udp_control_block_t *ucb = container_of(l4_cb, udp_control_block_t, ucb_l4); return udp_open_v4_connection(&ucb, ucb->ucb_l4.l4cb_interface, ucb->ucb_l4.l4cb_src_addr.ip_v4, ucb->ucb_l4.l4cb_src_port, ucb->ucb_l4.l4cb_dst_addr.ip_v4, ucb->ucb_l4.l4cb_dst_port, ucb->ucb_l4.l4cb_test_case_id, ucb->ucb_l4.l4cb_app_data.ad_type, NULL, TCG_CB_REUSE_CB); } /***************************************************************************** * test_case_udp_mtu() ****************************************************************************/ static uint32_t test_case_udp_mtu(l4_control_block_t *l4_cb) { return UCB_MTU(container_of(l4_cb, udp_control_block_t, ucb_l4)); } /***************************************************************************** * test_case_udp_send() ****************************************************************************/ static int test_case_udp_send(l4_control_block_t *l4_cb, struct rte_mbuf *data_mbuf, uint32_t *data_sent) { udp_control_block_t *ucb = container_of(l4_cb, udp_control_block_t, ucb_l4); return udp_send_v4(ucb, data_mbuf, data_sent); } /***************************************************************************** * test_case_udp_close() ****************************************************************************/ static void test_case_udp_close(l4_control_block_t *l4_cb) { udp_close_v4(container_of(l4_cb, udp_control_block_t, ucb_l4)); } /***************************************************************************** * test_case_tcp_purge() ****************************************************************************/ static void test_case_tcp_purge(l4_control_block_t *l4_cb) { tcp_control_block_t *tcb = container_of(l4_cb, tcp_control_block_t, tcb_l4); if (tcb->tcb_state != TS_INIT && tcb->tcb_state != TS_CLOSED) tcp_close_connection(tcb, TCG_SILENT_CLOSE); if (!tcb->tcb_malloced) tcp_connection_cleanup(tcb); } /***************************************************************************** * test_case_udp_purge() ****************************************************************************/ static void test_case_udp_purge(l4_control_block_t *l4_cb) { udp_control_block_t *ucb = container_of(l4_cb, udp_control_block_t, ucb_l4); if (ucb->ucb_state != US_INIT && ucb->ucb_state != US_CLOSED) udp_close_v4(ucb); if (!ucb->ucb_malloced) udp_connection_cleanup(ucb); } /***************************************************************************** * test_purge_list() ****************************************************************************/ static uint32_t test_purge_list(test_case_info_t *tc_info, tlkp_test_cb_list_t *cb_list) { tpg_test_case_type_t tc_type = tc_info->tci_cfg->tcim_test_case.tc_type; tpg_l4_proto_t l4_proto = tc_info->tci_cfg->tcim_l4_type; uint32_t purge_cnt = 0; while (!TEST_CBQ_EMPTY(cb_list)) { l4_control_block_t *l4_cb = TAILQ_FIRST(cb_list); purge_cnt++; /* No need to remove from the list. It should be done by the cleanup * function. Inform the state machine that we're purging the session. */ test_sm_purge(l4_cb, tc_info); /* Call the corresponding callback to purge the session. */ test_callbacks[tc_type][l4_proto].sess_purge(l4_cb); } return purge_cnt; } /***************************************************************************** * test_tcb_count_tail() ****************************************************************************/ static uint32_t test_tcb_count_tail(tlkp_test_cb_list_t *cb_list, uint32_t test_states[TSTS_MAX_STATE], uint32_t tcp_states[TS_MAX_STATE]) { uint32_t cnt = 0; l4_control_block_t *l4_cb; tcp_control_block_t *tcb; TAILQ_FOREACH(l4_cb, cb_list, l4cb_test_list_entry) { tcb = container_of(l4_cb, tcp_control_block_t, tcb_l4); test_states[l4_cb->l4cb_test_state]++; tcp_states[tcb->tcb_state]++; cnt++; } return cnt; } /***************************************************************************** * test_ucb_count_tail() ****************************************************************************/ static uint32_t test_ucb_count_tail(tlkp_test_cb_list_t *cb_list, uint32_t test_states[TSTS_MAX_STATE], uint32_t udp_states[US_MAX_STATE]) { uint32_t cnt = 0; l4_control_block_t *l4_cb; udp_control_block_t *ucb; TAILQ_FOREACH(l4_cb, cb_list, l4cb_test_list_entry) { ucb = container_of(l4_cb, udp_control_block_t, ucb_l4); test_states[l4_cb->l4cb_test_state]++; udp_states[ucb->ucb_state]++; cnt++; } return cnt; } /***************************************************************************** * test_case_purge_cbs() ****************************************************************************/ static void test_case_purge_cbs(test_case_info_t *tc_info) { tpg_test_case_type_t tc_type = tc_info->tci_cfg->tcim_test_case.tc_type; tpg_l4_proto_t l4_proto = tc_info->tci_cfg->tcim_l4_type; uint32_t eth_port = tc_info->tci_cfg->tcim_test_case.tc_eth_port; uint32_t tc_id = tc_info->tci_cfg->tcim_test_case.tc_id; uint32_t purge_cnt = 0; uint32_t cnt = 0; int lcore_id = rte_lcore_id(); test_case_htable_walk_cb_t htable_walk_fn; bool purge_htable_cb(l4_control_block_t *l4_cb, void *arg __rte_unused) { if (l4_cb->l4cb_test_case_id != tc_id) return true; cnt++; test_sm_purge(l4_cb, tc_info); test_callbacks[tc_type][l4_proto].sess_purge(l4_cb); return true; } cnt = test_purge_list(tc_info, &tc_info->tci_state.tos_to_init_cbs); if (cnt) { RTE_LOG(INFO, USER1, "lcore=%d Purged %d sessions from tos_to_init_cbs\n", lcore_id, cnt); purge_cnt += cnt; } cnt = test_purge_list(tc_info, &tc_info->tci_state.tos_to_open_cbs); if (cnt) { RTE_LOG(INFO, USER1, "lcore=%d Purged %d sessions from tos_to_open_cbs\n", lcore_id, cnt); purge_cnt += cnt; } cnt = test_purge_list(tc_info, &tc_info->tci_state.tos_to_close_cbs); if (cnt) { RTE_LOG(INFO, USER1, "lcore=%d Purged %d sessions from tos_to_close_cbs\n", lcore_id, cnt); purge_cnt += cnt; } cnt = test_purge_list(tc_info, &tc_info->tci_state.tos_to_send_cbs); if (cnt) { RTE_LOG(INFO, USER1, "lcore=%d Purged %d sessions from tos_to_send_cbs\n", lcore_id, cnt); purge_cnt += cnt; } cnt = test_purge_list(tc_info, &tc_info->tci_state.tos_closed_cbs); if (cnt) { RTE_LOG(INFO, USER1, "lcore=%d Purged %d sessions from tos_to_closed_cbs\n", lcore_id, cnt); purge_cnt += cnt; } cnt = 0; htable_walk_fn = test_callbacks[tc_type][l4_proto].sess_htable_walk; htable_walk_fn(eth_port, purge_htable_cb, NULL); if (cnt) { RTE_LOG(INFO, USER1, "lcore=%d Purged %d sessions from the session table\n", lcore_id, cnt); purge_cnt += cnt; } RTE_LOG(INFO, USER1, "lcore=%d Purged %u total sessions on eth_port %"PRIu32" tcid %"PRIu32"\n", lcore_id, purge_cnt, eth_port, tc_id); } /***************************************************************************** * test_case_count_tcb_cbs() ****************************************************************************/ static void test_case_count_tcb_cbs(test_case_info_t *tc_info, uint32_t test_states[TSTS_MAX_STATE], uint32_t tcp_states[TS_MAX_STATE]) { tpg_test_case_type_t tc_type = tc_info->tci_cfg->tcim_test_case.tc_type; tpg_l4_proto_t l4_proto = tc_info->tci_cfg->tcim_l4_type; tcp_control_block_t *tcb; uint32_t sessions = 0; uint32_t eth_port = tc_info->tci_cfg->tcim_test_case.tc_eth_port; uint32_t tc_id = tc_info->tci_cfg->tcim_test_case.tc_id; test_case_htable_walk_cb_t htable_walk_fn; bool count_htable_cb(l4_control_block_t *l4_cb, void *arg __rte_unused) { if (l4_cb->l4cb_test_case_id != tc_id) return true; tcb = container_of(l4_cb, tcp_control_block_t, tcb_l4); test_states[l4_cb->l4cb_test_state]++; tcp_states[tcb->tcb_state]++; sessions++; return true; } htable_walk_fn = test_callbacks[tc_type][l4_proto].sess_htable_walk; htable_walk_fn(eth_port, count_htable_cb, NULL); } /***************************************************************************** * test_case_count_ucb_cbs() ****************************************************************************/ static void test_case_count_ucb_cbs(test_case_info_t *tc_info, uint32_t test_states[TSTS_MAX_STATE], uint32_t udp_states[US_MAX_STATE]) { tpg_test_case_type_t tc_type = tc_info->tci_cfg->tcim_test_case.tc_type; tpg_l4_proto_t l4_proto = tc_info->tci_cfg->tcim_l4_type; udp_control_block_t *ucb; uint32_t sessions = 0; uint32_t eth_port = tc_info->tci_cfg->tcim_test_case.tc_eth_port; uint32_t tc_id = tc_info->tci_cfg->tcim_test_case.tc_id; test_case_htable_walk_cb_t htable_walk_fn; bool count_htable_cb(l4_control_block_t *l4_cb, void *arg __rte_unused) { if (l4_cb->l4cb_test_case_id != tc_id) return true; ucb = container_of(l4_cb, udp_control_block_t, ucb_l4); test_states[l4_cb->l4cb_test_state]++; udp_states[ucb->ucb_state]++; sessions++; return true; } htable_walk_fn = test_callbacks[tc_type][l4_proto].sess_htable_walk; htable_walk_fn(eth_port, count_htable_cb, NULL); } /***************************************************************************** * Test Message Handlers which will run on the packet threads. ****************************************************************************/ /***************************************************************************** * test_case_init_cb() ****************************************************************************/ static int test_case_init_cb(uint16_t msgid, uint16_t lcore, void *msg) { test_case_init_msg_t *im; uint32_t eth_port; uint32_t tcid; test_case_info_t *tc_info; tpg_test_case_type_t tc_type; tpg_l4_proto_t l4_proto; tpg_app_proto_t app_id; tpg_test_case_latency_t *test_latency; if (MSG_INVALID(msgid, msg, MSG_TEST_CASE_INIT)) return -EINVAL; im = msg; eth_port = im->tcim_test_case.tc_eth_port; tcid = im->tcim_test_case.tc_id; tc_type = im->tcim_test_case.tc_type; app_id = im->tcim_test_case.tc_app.app_proto; l4_proto = im->tcim_l4_type; /* If the requested port is not handled by this core just ignore. */ if (port_get_rx_queue_id(lcore, eth_port) == CORE_PORT_QINVALID) return 0; tc_info = TEST_GET_INFO(eth_port, tcid); /* If already configured then just ignore for now. */ if (tc_info->tci_configured) return 0; /* Struct copy of the configuration. */ *tc_info->tci_cfg = *im; test_latency = &tc_info->tci_cfg->tcim_test_case.tc_latency; if (tc_info->tci_cfg->tcim_tx_tstamp) { tstamp_tx_post_cb_t cb; sockopt_t *sockopt; sockopt = &tc_info->tci_cfg->tcim_sockopt; if (!sockopt->so_eth.ethso_tx_offload_ipv4_cksum || !sockopt->so_eth.ethso_tx_offload_tcp_cksum || !sockopt->so_eth.ethso_tx_offload_udp_cksum) cb = test_update_cksum_tstamp; else cb = NULL; tstamp_start_tx(eth_port, port_get_tx_queue_id(lcore, eth_port), cb); } if (tc_info->tci_cfg->tcim_rx_tstamp) { tstamp_start_rx(eth_port, port_get_rx_queue_id(lcore, eth_port)); test_case_latency_init(tc_info); /* Initialize latency buffer. */ test_latency_state_init(tc_info->tci_latency_state, test_latency->has_tcs_samples ? test_latency->tcs_samples : 0); } /* Initialize operational part and callbacks. */ test_case_init_state(lcore, tc_info->tci_cfg, &tc_info->tci_state, test_callbacks[tc_type][l4_proto].open, test_callbacks[tc_type][l4_proto].close, test_callbacks[tc_type][l4_proto].mtu, test_callbacks[tc_type][l4_proto].send, test_callbacks[tc_type][l4_proto].sess_close, &tc_info->tci_rate_timers); /* Let the application layer know that the test is starting. The application * will initialize its "global" per test case state. */ APP_CALL(tc_start, app_id)(&tc_info->tci_cfg->tcim_test_case, &tc_info->tci_cfg->tcim_test_case.tc_app, &tc_info->tci_app_storage); /* Initialize stats. */ bzero(tc_info->tci_gen_stats, sizeof(*tc_info->tci_gen_stats)); bzero(tc_info->tci_rate_stats, sizeof(*tc_info->tci_rate_stats)); /* Initialize app stats. */ APP_CALL(stats_init, app_id)(&tc_info->tci_cfg->tcim_test_case.tc_app, tc_info->tci_app_stats); /* Initialize clients and servers. */ /* WARNING: we could include test_case_start_tcp/udp_server/client in * the test_callbacks array but then the compiler would most likely * refuse to inline them inside the test_case_for_each_client/server * functions. To avoid that we do the (ugly) switch on l4 proto.. */ switch (tc_type) { case TEST_CASE_TYPE__CLIENT: switch (l4_proto) { case L4_PROTO__TCP: test_case_for_each_client(lcore, tc_info->tci_cfg, test_case_start_tcp_client, tc_info); break; case L4_PROTO__UDP: test_case_for_each_client(lcore, tc_info->tci_cfg, test_case_start_udp_client, tc_info); break; default: assert(false); return -EINVAL; } break; case TEST_CASE_TYPE__SERVER: switch (l4_proto) { case L4_PROTO__TCP: test_case_for_each_server(lcore, tc_info->tci_cfg, test_case_start_tcp_server, tc_info); break; case L4_PROTO__UDP: test_case_for_each_server(lcore, tc_info->tci_cfg, test_case_start_udp_server, tc_info); break; default: assert(false); return -EINVAL; } break; default: assert(false); return -EINVAL; } tc_info->tci_configured = true; return 0; } /***************************************************************************** * test_case_latency_init() ****************************************************************************/ void test_case_latency_init(test_case_info_t *tc_info) { /* We don't bzero "test_oper_latency_state_t" we choose to keep recent * stats updated here instead than mgmt core. */ bzero(&tc_info->tci_gen_stats->gs_latency_stats, sizeof(tc_info->tci_gen_stats->gs_latency_stats)); tc_info->tci_gen_stats->gs_latency_stats.gls_stats.ls_min_latency = UINT32_MAX; tc_info->tci_gen_stats->gs_latency_stats.gls_sample_stats .ls_min_latency = UINT32_MAX; } /***************************************************************************** * test_case_start_cb() ****************************************************************************/ static int test_case_start_cb(uint16_t msgid, uint16_t lcore, void *msg) { test_case_start_msg_t *sm; test_case_info_t *tc_info; if (MSG_INVALID(msgid, msg, MSG_TEST_CASE_START)) return -EINVAL; sm = msg; /* If the requested port is not handled by this core just ignore. */ if (port_get_rx_queue_id(lcore, sm->tcsm_eth_port) == CORE_PORT_QINVALID) { RTE_LOG(ERR, USER1, "[%d:%s()] Received TestCase START on a core that's not handling this port!\n", rte_lcore_index(lcore), __func__); return 0; } tc_info = TEST_GET_INFO(sm->tcsm_eth_port, sm->tcsm_test_case_id); /* If already running then just ignore for now. */ if (tc_info->tci_running) { RTE_LOG(ERR, USER1, "[%d:%s()] Received TestCase START for a test case already running on this port!\n", rte_lcore_index(lcore), __func__); return 0; } /* Store the initial start timestamp. */ tc_info->tci_rate_stats->rs_start_time = rte_get_timer_cycles(); /* Start the rate limiting engine. */ test_case_rate_state_start(lcore, sm->tcsm_eth_port, sm->tcsm_test_case_id, &tc_info->tci_state, &tc_info->tci_rate_timers); /* Safe to mark the test as running. We shouldn't fail from this * point on.. */ tc_info->tci_running = true; return 0; } /***************************************************************************** * test_case_rate_limit_update() * Notes: Update a specific test case rate limit. If the desired rate * was reached we stop resending the message. Otherwise, if there * are still sessions on the control_block list waiting to * execute an operation, resend the message (EAGAIN). ****************************************************************************/ static int test_case_rate_limit_update(test_rate_state_t *rate_state, rate_limit_t *rate_limit, tlkp_test_cb_list_t *cb_list, uint32_t rate_in_progress_flag, uint32_t rate_reached_flag, uint32_t consumed) { rate_limit_consume(rate_limit, consumed); /* If we still didn't reach the expected rate for this * interval then we should repost if we still have cbs in the list. */ if (likely(!rate_limit_reached(rate_limit))) { /* Rate not reached but no more sessions in queue: * Stop and mark the message as not in progress anymore. */ if (TEST_CBQ_EMPTY(cb_list)) { rate_state->trs_flags &= ~rate_in_progress_flag; return 0; } return -EAGAIN; } /* Set the "reached" flag. */ rate_state->trs_flags |= rate_reached_flag; /* Rate reached: Stop and mark the message as not in progress anymore. */ rate_state->trs_flags &= ~rate_in_progress_flag; return 0; } /***************************************************************************** * test_case_run_open_cb() ****************************************************************************/ static int test_case_run_open_cb(uint16_t msgid, uint16_t lcore __rte_unused, void *msg) { test_case_run_msg_t *rm; test_case_info_t *tc_info; test_oper_state_t *ts; uint32_t max_open; uint32_t open_cnt; int error; if (MSG_INVALID(msgid, msg, MSG_TEST_CASE_RUN_OPEN)) return -EINVAL; rm = msg; tc_info = TEST_GET_INFO(rm->tcrm_eth_port, rm->tcrm_test_case_id); ts = &tc_info->tci_state; /* Check how many sessions we are allowed to open. */ max_open = rate_limit_available(&ts->tos_rates.trs_open); /* Start a batch of clients from the to_open list. */ for (open_cnt = 0; !TEST_CBQ_EMPTY(&ts->tos_to_open_cbs) && open_cnt < max_open; open_cnt++) { l4_control_block_t *l4_cb; l4_cb = TAILQ_FIRST(&ts->tos_to_open_cbs); error = ts->tos_client_open_cb(l4_cb); if (unlikely(error)) { TEST_NOTIF(TEST_NOTIF_SESS_FAILED, l4_cb); /* Readd to the open list and try again later. */ TEST_CBQ_ADD_TO_OPEN(ts, l4_cb); } else { TEST_NOTIF(TEST_NOTIF_SESS_UP, l4_cb); } } TRACE_FMT(TST, DEBUG, "OPEN start cnt %"PRIu32, open_cnt); /* Update the rate limit and check if we have to open more (later). */ return test_case_rate_limit_update(&ts->tos_rates, &ts->tos_rates.trs_open, &ts->tos_to_open_cbs, TRS_FLAGS_OPEN_IN_PROGRESS, TRS_FLAGS_OPEN_RATE_REACHED, open_cnt); } /***************************************************************************** * test_case_run_close_cb() ****************************************************************************/ static int test_case_run_close_cb(uint16_t msgid, uint16_t lcore __rte_unused, void *msg) { test_case_run_msg_t *rm; test_case_info_t *tc_info; test_oper_state_t *ts; uint32_t max_close; uint32_t close_cnt; if (MSG_INVALID(msgid, msg, MSG_TEST_CASE_RUN_CLOSE)) return -EINVAL; rm = msg; tc_info = TEST_GET_INFO(rm->tcrm_eth_port, rm->tcrm_test_case_id); ts = &tc_info->tci_state; /* Check how many sessions we are allowed to close. */ max_close = rate_limit_available(&ts->tos_rates.trs_close); /* Stop a batch of clients from the to_close list. */ for (close_cnt = 0; !TEST_CBQ_EMPTY(&ts->tos_to_close_cbs) && close_cnt < max_close; close_cnt++) { l4_control_block_t *l4_cb; l4_cb = TAILQ_FIRST(&ts->tos_to_close_cbs); ts->tos_client_close_cb(l4_cb); } TRACE_FMT(TST, DEBUG, "CLOSE start cnt %"PRIu32, close_cnt); /* Update the rate limit and check if we have to send more (later). */ return test_case_rate_limit_update(&ts->tos_rates, &ts->tos_rates.trs_close, &ts->tos_to_close_cbs, TRS_FLAGS_CLOSE_IN_PROGRESS, TRS_FLAGS_CLOSE_RATE_REACHED, close_cnt); } /***************************************************************************** * test_case_run_send_cb() ****************************************************************************/ static int test_case_run_send_cb(uint16_t msgid, uint16_t lcore __rte_unused, void *msg) { test_case_run_msg_t *rm; test_case_info_t *tc_info; test_oper_state_t *ts; test_rate_state_t *rate_state; uint32_t max_send; uint32_t send_cnt; uint32_t send_pkt_cnt; if (MSG_INVALID(msgid, msg, MSG_TEST_CASE_RUN_SEND)) return -EINVAL; rm = msg; tc_info = TEST_GET_INFO(rm->tcrm_eth_port, rm->tcrm_test_case_id); ts = &tc_info->tci_state; rate_state = &ts->tos_rates; /* Check how many sessions are allowed to send traffic. */ max_send = rate_limit_available(&rate_state->trs_send); for (send_cnt = 0, send_pkt_cnt = 0; !TEST_CBQ_EMPTY(&ts->tos_to_send_cbs) && send_pkt_cnt < max_send; send_pkt_cnt++) { int error; uint32_t mtu; struct rte_mbuf *data_mbuf; uint32_t data_sent = 0; l4_control_block_t *l4_cb; tpg_app_proto_t app_id; l4_cb = TAILQ_FIRST(&ts->tos_to_send_cbs); app_id = l4_cb->l4cb_app_data.ad_type; mtu = ts->tos_session_mtu_cb(l4_cb); data_mbuf = APP_CALL(send, app_id)(l4_cb, &l4_cb->l4cb_app_data, tc_info->tci_app_stats, mtu); if (unlikely(data_mbuf == NULL)) { TEST_NOTIF(TEST_NOTIF_DATA_NULL, l4_cb); if (test_sm_has_data_pending(l4_cb)) { /* Move at the end to try again later. */ TEST_CBQ_REM_TO_SEND(ts, l4_cb); TEST_CBQ_ADD_TO_SEND(ts, l4_cb); } continue; } /* Try to send. * if we sent something then let the APP know * else if app still needs to send move at end of list * else do-nothing as the TEST state machine moved us already to * TSTS_NO_SND_WIN. */ error = ts->tos_session_send_cb(l4_cb, data_mbuf, &data_sent); if (unlikely(error)) { TEST_NOTIF(TEST_NOTIF_DATA_FAILED, l4_cb); if (test_sm_has_data_pending(l4_cb)) { /* Move at the end to try again later. */ TEST_CBQ_REM_TO_SEND(ts, l4_cb); TEST_CBQ_ADD_TO_SEND(ts, l4_cb); continue; } } if (likely(data_sent != 0)) { if (APP_CALL(data_sent, app_id)(l4_cb, &l4_cb->l4cb_app_data, tc_info->tci_app_stats, data_sent)) { /* We increment the sent count only if the application managed * to transmit a complete message. */ send_cnt++; } } } TRACE_FMT(TST, DEBUG, "SEND data cnt %"PRIu32, send_cnt); /* Update the transaction send rate. */ tc_info->tci_rate_stats->rs_data_per_s += send_cnt; /* Update the rate limiter with the number of individual sent packets * (not transactions!) and check if we have to send more (later). */ return test_case_rate_limit_update(rate_state, &rate_state->trs_send, &ts->tos_to_send_cbs, TRS_FLAGS_SEND_IN_PROGRESS, TRS_FLAGS_SEND_RATE_REACHED, send_pkt_cnt); } /***************************************************************************** * test_case_stop_cb() ****************************************************************************/ static int test_case_stop_cb(uint16_t msgid, uint16_t lcore, void *msg) { test_case_stop_msg_t *sm; test_case_info_t *tc_info; test_oper_state_t *tc_state; test_rate_timers_t *tc_rate_timers; bool all_purged; tpg_app_proto_t app_id; if (MSG_INVALID(msgid, msg, MSG_TEST_CASE_STOP)) return -EINVAL; sm = msg; tc_info = TEST_GET_INFO(sm->tcsm_eth_port, sm->tcsm_test_case_id); tc_state = &tc_info->tci_state; tc_rate_timers = &tc_info->tci_rate_timers; if (!tc_info->tci_configured || !tc_info->tci_running) { RTE_LOG(ERR, USER1, "[%d:%s()] Received TestCase STOP for a test case that's not running on this port!\n", rte_lcore_index(rte_lcore_id()), __func__); goto done; } /* If we didn't mark the TC as stopping do it now. * Notify all the tcbs that they need to go to CLOSED and get freed once * they get there. */ if (!tc_info->tci_stopping) { /* Stop the rate limiting engine. */ test_case_rate_state_stop(lcore, sm->tcsm_eth_port, sm->tcsm_test_case_id, tc_state, tc_rate_timers); tc_info->tci_stopping = true; all_purged = false; } else { /* Walk the tcbs for this test and purge them. */ test_case_purge_cbs(tc_info); all_purged = true; } /* If we still have open/close/send messages being processed we should * repost this message to ourselves and wait until the open/close/send * messages are processed. */ if (test_case_rate_state_running(tc_state) || !all_purged) return -EAGAIN; tc_info->tci_stopping = false; tc_info->tci_running = false; tc_info->tci_configured = false; if (tc_info->tci_cfg->tcim_tx_tstamp) { tstamp_stop_tx(sm->tcsm_eth_port, port_get_rx_queue_id(lcore, sm->tcsm_eth_port)); } if (tc_info->tci_cfg->tcim_rx_tstamp) { tstamp_stop_rx(sm->tcsm_eth_port, port_get_rx_queue_id(lcore, sm->tcsm_eth_port)); } app_id = tc_info->tci_cfg->tcim_test_case.tc_app.app_proto; APP_CALL(tc_stop, app_id)(&tc_info->tci_cfg->tcim_test_case, &tc_info->tci_cfg->tcim_test_case.tc_app, &tc_info->tci_app_storage); done: /* Notify the sender that we're done. */ *sm->tcsm_done = true; return 0; } /***************************************************************************** * test_case_stats_req_cb() ****************************************************************************/ static int test_case_stats_req_cb(uint16_t msgid, uint16_t lcore __rte_unused, void *msg) { test_case_stats_req_msg_t *sm; test_case_info_t *tc_info; tpg_app_proto_t app_id; if (MSG_INVALID(msgid, msg, MSG_TEST_CASE_STATS_REQ)) return -EINVAL; sm = msg; tc_info = TEST_GET_INFO(sm->tcsrm_eth_port, sm->tcsrm_test_case_id); app_id = tc_info->tci_cfg->tcim_test_case.tc_app.app_proto; /* Here we walk through the buffer in order to fill the recent stats */ if (tc_info->tci_cfg->tcim_rx_tstamp) { tpg_test_case_latency_t *tc_latency; tpg_gen_latency_stats_t *tc_latency_stats; tc_latency = &tc_info->tci_cfg->tcim_test_case.tc_latency; tc_latency_stats = &tc_info->tci_gen_stats->gs_latency_stats; test_update_recent_latency_stats(&tc_latency_stats->gls_sample_stats, tc_info->tci_latency_state, tc_latency); } /* Struct copy the stats! */ *sm->tcsrm_test_case_stats = *tc_info->tci_gen_stats; /* Ask the APP implementation to copy the stats for us. */ APP_CALL(stats_copy, app_id)(sm->tcsrm_test_case_app_stats, tc_info->tci_app_stats); /* Clear the runtime stats. They're aggregated by the test manager. * Don't clear the start and end time for the gen stats! */ if (tc_info->tci_cfg->tcim_rx_tstamp) test_case_latency_init(tc_info); tc_info->tci_gen_stats->gs_up = 0; tc_info->tci_gen_stats->gs_estab = 0; tc_info->tci_gen_stats->gs_down = 0; tc_info->tci_gen_stats->gs_failed = 0; tc_info->tci_gen_stats->gs_data_failed = 0; tc_info->tci_gen_stats->gs_data_null = 0; /* Clear the app stats. */ APP_CALL(stats_init, app_id)(&tc_info->tci_cfg->tcim_test_case.tc_app, tc_info->tci_app_stats); return 0; } /***************************************************************************** * test_case_get_state_counter() ****************************************************************************/ static void test_case_get_state_counter(test_case_info_t *tc_info, test_state_counter_t *state_counter) { bzero(state_counter, sizeof(test_state_counter_t)); if (tc_info->tci_cfg->tcim_l4_type == L4_PROTO__TCP) { state_counter->tos_to_init_cbs = test_tcb_count_tail(&tc_info->tci_state.tos_to_init_cbs, state_counter->test_states_from_test, state_counter->tcp_states_from_test); state_counter->tos_to_open_cbs = test_tcb_count_tail(&tc_info->tci_state.tos_to_open_cbs, state_counter->test_states_from_test, state_counter->tcp_states_from_test); state_counter->tos_to_close_cbs = test_tcb_count_tail(&tc_info->tci_state.tos_to_close_cbs, state_counter->test_states_from_test, state_counter->tcp_states_from_test); state_counter->tos_to_send_cbs = test_tcb_count_tail(&tc_info->tci_state.tos_to_send_cbs, state_counter->test_states_from_test, state_counter->tcp_states_from_test); state_counter->tos_closed_cbs = test_tcb_count_tail(&tc_info->tci_state.tos_closed_cbs, state_counter->test_states_from_test, state_counter->tcp_states_from_test); test_case_count_tcb_cbs(tc_info, state_counter->test_states_from_tcp, state_counter->tcp_states_from_tcp); } else if (tc_info->tci_cfg->tcim_l4_type == L4_PROTO__UDP) { state_counter->tos_to_init_cbs = test_ucb_count_tail(&tc_info->tci_state.tos_to_init_cbs, state_counter->test_states_from_test, state_counter->udp_states_from_test); state_counter->tos_to_open_cbs = test_ucb_count_tail(&tc_info->tci_state.tos_to_open_cbs, state_counter->test_states_from_test, state_counter->udp_states_from_test); state_counter->tos_to_close_cbs = test_ucb_count_tail(&tc_info->tci_state.tos_to_close_cbs, state_counter->test_states_from_test, state_counter->udp_states_from_test); state_counter->tos_to_send_cbs = test_ucb_count_tail(&tc_info->tci_state.tos_to_send_cbs, state_counter->test_states_from_test, state_counter->udp_states_from_test); state_counter->tos_closed_cbs = test_ucb_count_tail(&tc_info->tci_state.tos_closed_cbs, state_counter->test_states_from_test, state_counter->udp_states_from_test); test_case_count_ucb_cbs(tc_info, state_counter->test_states_from_udp, state_counter->udp_states_from_udp); } } /***************************************************************************** * test_case_rates_stats_req_cb() ****************************************************************************/ static int test_case_rates_stats_req_cb(uint16_t msgid, uint16_t lcore __rte_unused, void *msg) { test_case_rates_req_msg_t *sm; test_case_info_t *tc_info; uint64_t now; if (MSG_INVALID(msgid, msg, MSG_TEST_CASE_RATES_REQ)) return -EINVAL; sm = msg; tc_info = TEST_GET_INFO(sm->tcrrm_eth_port, sm->tcrrm_test_case_id); now = rte_get_timer_cycles(); tc_info->tci_rate_stats->rs_end_time = now; /* Struct copy the stats! */ *sm->tcrrm_test_case_rate_stats = *tc_info->tci_rate_stats; /* Clear the rates stats. They're aggregated by the test manager. */ bzero(tc_info->tci_rate_stats, sizeof(*tc_info->tci_rate_stats)); /* Store the new initial timestamp. */ tc_info->tci_rate_stats->rs_start_time = now; return 0; } /***************************************************************************** * test_case_states_stats_req_cb() ****************************************************************************/ static int test_case_states_stats_req_cb(uint16_t msgid, uint16_t lcore __rte_unused, void *msg) { test_case_states_req_msg_t *sm; test_case_info_t *tc_info; uint64_t now; if (MSG_INVALID(msgid, msg, MSG_TEST_CASE_STATES_REQ)) return -EINVAL; sm = msg; tc_info = TEST_GET_INFO(sm->tcsrm_eth_port, sm->tcsrm_test_case_id); now = rte_get_timer_cycles(); tc_info->tci_rate_stats->rs_end_time = now; /* Struct copy the stats! */ test_case_get_state_counter(tc_info, sm->tcsrm_test_state_counter); /* Clear the states stats. They're aggregated by the test manager. */ bzero(tc_info->tci_rate_stats, sizeof(*tc_info->tci_rate_stats)); /* Store the new initial timestamp. */ tc_info->tci_rate_stats->rs_start_time = now; return 0; } /***************************************************************************** * test_fix_recent_latency_stats() * this fun should be call only for recent stats! ****************************************************************************/ static void test_update_recent_latency_stats(tpg_latency_stats_t *stats, test_oper_latency_state_t *buffer, tpg_test_case_latency_t *tc_latency) { uint32_t i; bzero(stats, sizeof(tpg_latency_stats_t)); stats->ls_min_latency = UINT32_MAX; /* Here I walk trough my whole buffer in order to fix recent stats */ for (i = 0; i < buffer->tols_actual_length; i++) { test_update_latency_stats(stats, buffer->tols_timestamps[i], tc_latency); } } /***************************************************************************** * test_init() ****************************************************************************/ bool test_init(void) { int error; while (true) { /* * Register the handlers for our message types. */ error = msg_register_handler(MSG_TEST_CASE_INIT, test_case_init_cb); if (error) break; error = msg_register_handler(MSG_TEST_CASE_START, test_case_start_cb); if (error) break; error = msg_register_handler(MSG_TEST_CASE_RUN_OPEN, test_case_run_open_cb); if (error) break; error = msg_register_handler(MSG_TEST_CASE_RUN_CLOSE, test_case_run_close_cb); if (error) break; error = msg_register_handler(MSG_TEST_CASE_RUN_SEND, test_case_run_send_cb); if (error) break; error = msg_register_handler(MSG_TEST_CASE_STOP, test_case_stop_cb); if (error) break; error = msg_register_handler(MSG_TEST_CASE_STATS_REQ, test_case_stats_req_cb); if (error) break; error = msg_register_handler(MSG_TEST_CASE_RATES_REQ, test_case_rates_stats_req_cb); if (error) break; error = msg_register_handler(MSG_TEST_CASE_STATES_REQ, test_case_states_stats_req_cb); if (error) break; return true; } RTE_LOG(ERR, USER1, "Failed to register Tests msg handler: %s(%d)\n", rte_strerror(-error), -error); return false; } /***************************************************************************** * test_lcore_init_pool() * NOTES: this is a bit ugly but this has to be a macro due to the TPG message * infra. ****************************************************************************/ #define test_lcore_init_pool(msgpool, name, count, lcore_id) \ do { \ (msgpool) = rte_zmalloc_socket((name), (count) * sizeof(*(msgpool)), \ RTE_CACHE_LINE_SIZE, \ rte_lcore_to_socket_id(lcore_id)); \ if ((msgpool) == NULL) { \ TPG_ERROR_ABORT("[%d:%s() Failed to allocate %s!\n", \ rte_lcore_index((lcore_id)), \ __func__, \ (name)); \ } \ } while (0) /***************************************************************************** * test_lcore_init() * Notes: Initialize all the pointers within test_case_info_t objects. ****************************************************************************/ static void test_lcore_init_test_case_info(void) { uint32_t eth_port; uint32_t tcid; for (eth_port = 0; eth_port < rte_eth_dev_count_avail(); eth_port++) { for (tcid = 0; tcid < TPG_TEST_MAX_ENTRIES; tcid++) { test_case_info_t *tc_info = TEST_GET_INFO(eth_port, tcid); tc_info->tci_cfg = TEST_GET_CFG(eth_port, tcid); tc_info->tci_gen_stats = TEST_GET_GEN_STATS(eth_port, tcid); tc_info->tci_rate_stats = TEST_GET_RATE_STATS(eth_port, tcid); tc_info->tci_app_stats = TEST_GET_APP_STATS(eth_port, tcid); tc_info->tci_latency_state = TEST_GET_LATENCY_STATE(eth_port, tcid); } } } /***************************************************************************** * test_lcore_init() ****************************************************************************/ void test_lcore_init(uint32_t lcore_id) { test_lcore_init_pool(RTE_PER_LCORE(test_case_info), "per_lcore_test_case_info", rte_eth_dev_count_avail() * TPG_TEST_MAX_ENTRIES, lcore_id); test_lcore_init_pool(RTE_PER_LCORE(test_case_cfg), "per_lcore_test_case_cfg", rte_eth_dev_count_avail() * TPG_TEST_MAX_ENTRIES, lcore_id); test_lcore_init_pool(RTE_PER_LCORE(test_case_stats), "per_lcore_test_stats", rte_eth_dev_count_avail() * TPG_TEST_MAX_ENTRIES, lcore_id); test_lcore_init_pool(RTE_PER_LCORE(test_case_latency_state), "per_lcore_test_case_latency_state", rte_eth_dev_count_avail() * TPG_TEST_MAX_ENTRIES, lcore_id); test_lcore_init_pool(RTE_PER_LCORE(test_open_msgpool), "per_lcore_open_msgpool", rte_eth_dev_count_avail() * TPG_TEST_MAX_ENTRIES, lcore_id); test_lcore_init_pool(RTE_PER_LCORE(test_close_msgpool), "per_lcore_close_msgpool", rte_eth_dev_count_avail() * TPG_TEST_MAX_ENTRIES, lcore_id); test_lcore_init_pool(RTE_PER_LCORE(test_send_msgpool), "per_lcore_send_msgpool", rte_eth_dev_count_avail() * TPG_TEST_MAX_ENTRIES, lcore_id); test_lcore_init_pool(RTE_PER_LCORE(test_tmr_open_args), "per_lcore_tmr_open_arg", rte_eth_dev_count_avail() * TPG_TEST_MAX_ENTRIES, lcore_id); test_lcore_init_pool(RTE_PER_LCORE(test_tmr_close_args), "per_lcore_tmr_close_arg", rte_eth_dev_count_avail() * TPG_TEST_MAX_ENTRIES, lcore_id); test_lcore_init_pool(RTE_PER_LCORE(test_tmr_send_args), "per_lcore_tmr_send_arg", rte_eth_dev_count_avail() * TPG_TEST_MAX_ENTRIES, lcore_id); test_lcore_init_test_case_info(); } /**************************************************************************** * test_notification() * * NOTE: * This will be called from the packet processing core context. ****************************************************************************/ void test_notification(uint32_t notification, l4_control_block_t *l4_cb, uint32_t eth_port, uint32_t test_case_id) { test_case_info_t *tc_info = TEST_GET_INFO(eth_port, test_case_id); switch (notification) { case TEST_NOTIF_SESS_UP: tc_info->tci_gen_stats->gs_up++; break; case TEST_NOTIF_SESS_DOWN: tc_info->tci_gen_stats->gs_down++; break; case TEST_NOTIF_SESS_FAILED: tc_info->tci_gen_stats->gs_failed++; break; case TEST_NOTIF_DATA_FAILED: tc_info->tci_gen_stats->gs_data_failed++; break; case TEST_NOTIF_DATA_NULL: tc_info->tci_gen_stats->gs_data_null++; break; case TEST_NOTIF_TMR_FIRED: test_sm_tmr_to(l4_cb, tc_info); break; case TEST_NOTIF_SESS_CONNECTING: test_sess_connecting(l4_cb, tc_info); break; case TEST_NOTIF_SESS_CONNECTED: test_sess_connected(l4_cb, tc_info); break; case TEST_NOTIF_SESS_CONNECTED_IMM: test_sess_connected_imm(l4_cb, tc_info); break; case TEST_NOTIF_SESS_LISTEN: test_sess_listen(l4_cb, tc_info); break; case TEST_NOTIF_SESS_SRV_CONNECTED: test_sess_server_connected(l4_cb, tc_info); break; case TEST_NOTIF_SESS_CLOSING: test_sess_closing(l4_cb, tc_info); break; case TEST_NOTIF_SESS_CLOSED: test_sess_closed(l4_cb, tc_info); break; case TEST_NOTIF_SESS_WIN_AVAIL: test_sess_win_available(l4_cb, tc_info); break; case TEST_NOTIF_SESS_WIN_UNAVAIL: test_sess_win_unavailable(l4_cb, tc_info); break; case TEST_NOTIF_APP_SEND_START: test_sm_app_send_start(l4_cb, tc_info); break; case TEST_NOTIF_APP_SEND_STOP: test_sm_app_send_stop(l4_cb, tc_info); break; case TEST_NOTIF_APP_CLOSE: tc_info->tci_state.tos_session_close_cb(l4_cb); break; default: assert(false); break; } }
45,073
435
<gh_stars>100-1000 package datawave.webservice.common.exception; import javax.ejb.ApplicationException; import javax.ws.rs.core.Response; import datawave.webservice.result.BaseResponse; @ApplicationException(rollback = true) public class BadRequestException extends DatawaveWebApplicationException { private static final long serialVersionUID = 1L; public BadRequestException(Throwable t, BaseResponse response) { super(t, response, Response.Status.BAD_REQUEST.getStatusCode()); } }
178
3,710
<reponame>wofogen/tahoma2d /* lzo1x_9x.c -- implementation of the LZO1X-999 compression algorithm This file is part of the LZO real-time data compression library. Copyright (C) 2008 <NAME> Copyright (C) 2007 <NAME> Copyright (C) 2006 <NAME> Copyright (C) 2005 <NAME> Copyright (C) 2004 <NAME> Copyright (C) 2003 <NAME> Copyright (C) 2002 <NAME> Copyright (C) 2001 <NAME> Copyright (C) 2000 <NAME> Copyright (C) 1999 <NAME> Copyright (C) 1998 <NAME> Copyright (C) 1997 <NAME> Copyright (C) 1996 <NAME> All Rights Reserved. The LZO library is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. The LZO library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with the LZO library; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. <NAME> <<EMAIL>> http://www.oberhumer.com/opensource/lzo/ */ #if !defined(LZO1X) && !defined(LZO1Y) && !defined(LZO1Z) # define LZO1X #endif #if defined(LZO1X) # include "config1x.h" #elif defined(LZO1Y) # include "config1y.h" #elif defined(LZO1Z) # include "config1z.h" #else # error #endif /*********************************************************************** // ************************************************************************/ #define N M4_MAX_OFFSET /* size of ring buffer */ #define THRESHOLD 1 /* lower limit for match length */ #define F 2048 /* upper limit for match length */ #define SWD_BEST_OFF (LZO_MAX3( M2_MAX_LEN, M3_MAX_LEN, M4_MAX_LEN ) + 1) #if defined(LZO1X) # define LZO_COMPRESS_T lzo1x_999_t # define lzo_swd_t lzo1x_999_swd_t #elif defined(LZO1Y) # define LZO_COMPRESS_T lzo1y_999_t # define lzo_swd_t lzo1y_999_swd_t # define lzo1x_999_compress_internal lzo1y_999_compress_internal # define lzo1x_999_compress_dict lzo1y_999_compress_dict # define lzo1x_999_compress_level lzo1y_999_compress_level # define lzo1x_999_compress lzo1y_999_compress #elif defined(LZO1Z) # define LZO_COMPRESS_T lzo1z_999_t # define lzo_swd_t lzo1z_999_swd_t # define lzo1x_999_compress_internal lzo1z_999_compress_internal # define lzo1x_999_compress_dict lzo1z_999_compress_dict # define lzo1x_999_compress_level lzo1z_999_compress_level # define lzo1x_999_compress lzo1z_999_compress #else # error #endif #if 0 # define HEAD3(b,p) \ ((((((lzo_xint)b[p]<<3)^b[p+1])<<3)^b[p+2]) & (SWD_HSIZE-1)) #endif #if 0 && defined(LZO_UNALIGNED_OK_4) && defined(LZO_ABI_LITTLE_ENDIAN) # define HEAD3(b,p) \ (((* (lzo_uint32p) &b[p]) ^ ((* (lzo_uint32p) &b[p])>>10)) & (SWD_HSIZE-1)) #endif #include "lzo_mchw.ch" /* this is a public functions, but there is no prototype in a header file */ LZO_EXTERN(int) lzo1x_999_compress_internal ( const lzo_bytep in , lzo_uint in_len, lzo_bytep out, lzo_uintp out_len, lzo_voidp wrkmem, const lzo_bytep dict, lzo_uint dict_len, lzo_callback_p cb, int try_lazy, lzo_uint good_length, lzo_uint max_lazy, lzo_uint nice_length, lzo_uint max_chain, lzo_uint32 flags ); /*********************************************************************** // ************************************************************************/ static lzo_bytep code_match ( LZO_COMPRESS_T *c, lzo_bytep op, lzo_uint m_len, lzo_uint m_off ) { lzo_uint x_len = m_len; lzo_uint x_off = m_off; c->match_bytes += (unsigned long) m_len; #if 0 /* static lzo_uint last_m_len = 0, last_m_off = 0; static lzo_uint prev_m_off[4]; static int prev_m_off_ptr = 0; int i; //if (m_len >= 3 && m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) if (m_len >= 3 && m_len <= M2_MAX_LEN) { //if (m_len == last_m_len && m_off == last_m_off) //printf("last_m_len + last_m_off\n"); //else if (m_off == last_m_off) printf("last_m_off\n"); else { for (i = 0; i < 4; i++) if (m_off == prev_m_off[i]) printf("prev_m_off %d: %5ld\n",i,(long)m_off); } } last_m_len = m_len; last_m_off = prev_m_off[prev_m_off_ptr] = m_off; prev_m_off_ptr = (prev_m_off_ptr + 1) & 3; */ #endif assert(op > c->out); if (m_len == 2) { assert(m_off <= M1_MAX_OFFSET); assert(c->r1_lit > 0); assert(c->r1_lit < 4); m_off -= 1; #if defined(LZO1Z) *op++ = LZO_BYTE(M1_MARKER | (m_off >> 6)); *op++ = LZO_BYTE(m_off << 2); #else *op++ = LZO_BYTE(M1_MARKER | ((m_off & 3) << 2)); *op++ = LZO_BYTE(m_off >> 2); #endif c->m1a_m++; } #if defined(LZO1Z) else if (m_len <= M2_MAX_LEN && (m_off <= M2_MAX_OFFSET || m_off == c->last_m_off)) #else else if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) #endif { assert(m_len >= 3); #if defined(LZO1X) m_off -= 1; *op++ = LZO_BYTE(((m_len - 1) << 5) | ((m_off & 7) << 2)); *op++ = LZO_BYTE(m_off >> 3); assert(op[-2] >= M2_MARKER); #elif defined(LZO1Y) m_off -= 1; *op++ = LZO_BYTE(((m_len + 1) << 4) | ((m_off & 3) << 2)); *op++ = LZO_BYTE(m_off >> 2); assert(op[-2] >= M2_MARKER); #elif defined(LZO1Z) if (m_off == c->last_m_off) *op++ = LZO_BYTE(((m_len - 1) << 5) | (0x700 >> 6)); else { m_off -= 1; *op++ = LZO_BYTE(((m_len - 1) << 5) | (m_off >> 6)); *op++ = LZO_BYTE(m_off << 2); } #endif c->m2_m++; } else if (m_len == M2_MIN_LEN && m_off <= MX_MAX_OFFSET && c->r1_lit >= 4) { assert(m_len == 3); assert(m_off > M2_MAX_OFFSET); m_off -= 1 + M2_MAX_OFFSET; #if defined(LZO1Z) *op++ = LZO_BYTE(M1_MARKER | (m_off >> 6)); *op++ = LZO_BYTE(m_off << 2); #else *op++ = LZO_BYTE(M1_MARKER | ((m_off & 3) << 2)); *op++ = LZO_BYTE(m_off >> 2); #endif c->m1b_m++; } else if (m_off <= M3_MAX_OFFSET) { assert(m_len >= 3); m_off -= 1; if (m_len <= M3_MAX_LEN) *op++ = LZO_BYTE(M3_MARKER | (m_len - 2)); else { m_len -= M3_MAX_LEN; *op++ = M3_MARKER | 0; while (m_len > 255) { m_len -= 255; *op++ = 0; } assert(m_len > 0); *op++ = LZO_BYTE(m_len); } #if defined(LZO1Z) *op++ = LZO_BYTE(m_off >> 6); *op++ = LZO_BYTE(m_off << 2); #else *op++ = LZO_BYTE(m_off << 2); *op++ = LZO_BYTE(m_off >> 6); #endif c->m3_m++; } else { lzo_uint k; assert(m_len >= 3); assert(m_off > 0x4000); assert(m_off <= 0xbfff); m_off -= 0x4000; k = (m_off & 0x4000) >> 11; if (m_len <= M4_MAX_LEN) *op++ = LZO_BYTE(M4_MARKER | k | (m_len - 2)); else { m_len -= M4_MAX_LEN; *op++ = LZO_BYTE(M4_MARKER | k | 0); while (m_len > 255) { m_len -= 255; *op++ = 0; } assert(m_len > 0); *op++ = LZO_BYTE(m_len); } #if defined(LZO1Z) *op++ = LZO_BYTE(m_off >> 6); *op++ = LZO_BYTE(m_off << 2); #else *op++ = LZO_BYTE(m_off << 2); *op++ = LZO_BYTE(m_off >> 6); #endif c->m4_m++; } c->last_m_len = x_len; c->last_m_off = x_off; return op; } static lzo_bytep STORE_RUN ( LZO_COMPRESS_T *c, lzo_bytep op, const lzo_bytep ii, lzo_uint t ) { c->lit_bytes += (unsigned long) t; if (op == c->out && t <= 238) { *op++ = LZO_BYTE(17 + t); } else if (t <= 3) { #if defined(LZO1Z) op[-1] |= LZO_BYTE(t); #else op[-2] |= LZO_BYTE(t); #endif c->lit1_r++; } else if (t <= 18) { *op++ = LZO_BYTE(t - 3); c->lit2_r++; } else { lzo_uint tt = t - 18; *op++ = 0; while (tt > 255) { tt -= 255; *op++ = 0; } assert(tt > 0); *op++ = LZO_BYTE(tt); c->lit3_r++; } do *op++ = *ii++; while (--t > 0); return op; } static lzo_bytep code_run ( LZO_COMPRESS_T *c, lzo_bytep op, const lzo_bytep ii, lzo_uint lit, lzo_uint m_len ) { if (lit > 0) { assert(m_len >= 2); op = STORE_RUN(c,op,ii,lit); c->r1_m_len = m_len; c->r1_lit = lit; } else { assert(m_len >= 3); c->r1_m_len = 0; c->r1_lit = 0; } return op; } /*********************************************************************** // ************************************************************************/ static int len_of_coded_match ( lzo_uint m_len, lzo_uint m_off, lzo_uint lit ) { int n = 4; if (m_len < 2) return -1; if (m_len == 2) return (m_off <= M1_MAX_OFFSET && lit > 0 && lit < 4) ? 2 : -1; if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) return 2; if (m_len == M2_MIN_LEN && m_off <= MX_MAX_OFFSET && lit >= 4) return 2; if (m_off <= M3_MAX_OFFSET) { if (m_len <= M3_MAX_LEN) return 3; m_len -= M3_MAX_LEN; while (m_len > 255) { m_len -= 255; n++; } return n; } if (m_off <= M4_MAX_OFFSET) { if (m_len <= M4_MAX_LEN) return 3; m_len -= M4_MAX_LEN; while (m_len > 255) { m_len -= 255; n++; } return n; } return -1; } static lzo_int min_gain(lzo_uint ahead, lzo_uint lit1, lzo_uint lit2, int l1, int l2, int l3) { lzo_int lazy_match_min_gain = 0; assert (ahead >= 1); lazy_match_min_gain += ahead; #if 0 if (l3 > 0) lit2 -= ahead; #endif if (lit1 <= 3) lazy_match_min_gain += (lit2 <= 3) ? 0 : 2; else if (lit1 <= 18) lazy_match_min_gain += (lit2 <= 18) ? 0 : 1; lazy_match_min_gain += (l2 - l1) * 2; if (l3 > 0) lazy_match_min_gain -= (ahead - l3) * 2; if (lazy_match_min_gain < 0) lazy_match_min_gain = 0; #if 0 if (l1 == 2) if (lazy_match_min_gain == 0) lazy_match_min_gain = 1; #endif return lazy_match_min_gain; } /*********************************************************************** // ************************************************************************/ #if !defined(NDEBUG) static void assert_match( const lzo_swd_p swd, lzo_uint m_len, lzo_uint m_off ) { const LZO_COMPRESS_T *c = swd->c; lzo_uint d_off; assert(m_len >= 2); if (m_off <= (lzo_uint) (c->bp - c->in)) { assert(c->bp - m_off + m_len < c->ip); assert(lzo_memcmp(c->bp, c->bp - m_off, m_len) == 0); } else { assert(swd->dict != NULL); d_off = m_off - (lzo_uint) (c->bp - c->in); assert(d_off <= swd->dict_len); if (m_len > d_off) { assert(lzo_memcmp(c->bp, swd->dict_end - d_off, d_off) == 0); assert(c->in + m_len - d_off < c->ip); assert(lzo_memcmp(c->bp + d_off, c->in, m_len - d_off) == 0); } else { assert(lzo_memcmp(c->bp, swd->dict_end - d_off, m_len) == 0); } } } #else # define assert_match(a,b,c) ((void)0) #endif #if defined(SWD_BEST_OFF) static void better_match ( const lzo_swd_p swd, lzo_uint *m_len, lzo_uint *m_off ) { #if defined(LZO1Z) const LZO_COMPRESS_T *c = swd->c; #endif if (*m_len <= M2_MIN_LEN) return; #if defined(LZO1Z) if (*m_off == c->last_m_off && *m_len <= M2_MAX_LEN) return; #if 1 if (*m_len >= M2_MIN_LEN + 1 && *m_len <= M2_MAX_LEN + 1 && c->last_m_off && swd->best_off[*m_len-1] == c->last_m_off) { *m_len = *m_len - 1; *m_off = swd->best_off[*m_len]; return; } #endif #endif if (*m_off <= M2_MAX_OFFSET) return; #if 1 /* M3/M4 -> M2 */ if (*m_off > M2_MAX_OFFSET && *m_len >= M2_MIN_LEN + 1 && *m_len <= M2_MAX_LEN + 1 && swd->best_off[*m_len-1] && swd->best_off[*m_len-1] <= M2_MAX_OFFSET) { *m_len = *m_len - 1; *m_off = swd->best_off[*m_len]; return; } #endif #if 1 /* M4 -> M2 */ if (*m_off > M3_MAX_OFFSET && *m_len >= M4_MAX_LEN + 1 && *m_len <= M2_MAX_LEN + 2 && swd->best_off[*m_len-2] && swd->best_off[*m_len-2] <= M2_MAX_OFFSET) { *m_len = *m_len - 2; *m_off = swd->best_off[*m_len]; return; } #endif #if 1 /* M4 -> M3 */ if (*m_off > M3_MAX_OFFSET && *m_len >= M4_MAX_LEN + 1 && *m_len <= M3_MAX_LEN + 1 && swd->best_off[*m_len-1] && swd->best_off[*m_len-1] <= M3_MAX_OFFSET) { *m_len = *m_len - 1; *m_off = swd->best_off[*m_len]; } #endif } #endif /*********************************************************************** // ************************************************************************/ LZO_PUBLIC(int) lzo1x_999_compress_internal ( const lzo_bytep in , lzo_uint in_len, lzo_bytep out, lzo_uintp out_len, lzo_voidp wrkmem, const lzo_bytep dict, lzo_uint dict_len, lzo_callback_p cb, int try_lazy, lzo_uint good_length, lzo_uint max_lazy, lzo_uint nice_length, lzo_uint max_chain, lzo_uint32 flags ) { lzo_bytep op; const lzo_bytep ii; lzo_uint lit; lzo_uint m_len, m_off; LZO_COMPRESS_T cc; LZO_COMPRESS_T * const c = &cc; lzo_swd_p const swd = (lzo_swd_p) wrkmem; int r; /* sanity check */ #if defined(LZO1X) LZO_COMPILE_TIME_ASSERT(LZO1X_999_MEM_COMPRESS >= SIZEOF_LZO_SWD_T) #elif defined(LZO1Y) LZO_COMPILE_TIME_ASSERT(LZO1Y_999_MEM_COMPRESS >= SIZEOF_LZO_SWD_T) #elif defined(LZO1Z) LZO_COMPILE_TIME_ASSERT(LZO1Z_999_MEM_COMPRESS >= SIZEOF_LZO_SWD_T) #else # error #endif /* setup parameter defaults */ /* number of lazy match tries */ if (try_lazy < 0) try_lazy = 1; /* reduce lazy match search if we already have a match with this length */ if (good_length <= 0) good_length = 32; /* do not try a lazy match if we already have a match with this length */ if (max_lazy <= 0) max_lazy = 32; /* stop searching for longer matches than this one */ if (nice_length <= 0) nice_length = 0; /* don't search more positions than this */ if (max_chain <= 0) max_chain = SWD_MAX_CHAIN; c->init = 0; c->ip = c->in = in; c->in_end = in + in_len; c->out = out; c->cb = cb; c->m1a_m = c->m1b_m = c->m2_m = c->m3_m = c->m4_m = 0; c->lit1_r = c->lit2_r = c->lit3_r = 0; op = out; ii = c->ip; /* point to start of literal run */ lit = 0; c->r1_lit = c->r1_m_len = 0; r = init_match(c,swd,dict,dict_len,flags); if (r != 0) return r; if (max_chain > 0) swd->max_chain = max_chain; if (nice_length > 0) swd->nice_length = nice_length; r = find_match(c,swd,0,0); if (r != 0) return r; while (c->look > 0) { lzo_uint ahead; lzo_uint max_ahead; int l1, l2, l3; c->codesize = pd(op, out); m_len = c->m_len; m_off = c->m_off; assert(c->bp == c->ip - c->look); assert(c->bp >= in); if (lit == 0) ii = c->bp; assert(ii + lit == c->bp); assert(swd->b_char == *(c->bp)); if ( m_len < 2 || (m_len == 2 && (m_off > M1_MAX_OFFSET || lit == 0 || lit >= 4)) || #if 1 /* Do not accept this match for compressed-data compatibility * with LZO v1.01 and before * [ might be a problem for decompress() and optimize() ] */ (m_len == 2 && op == out) || #endif (op == out && lit == 0)) { /* a literal */ m_len = 0; } else if (m_len == M2_MIN_LEN) { /* compression ratio improves if we code a literal in some cases */ if (m_off > MX_MAX_OFFSET && lit >= 4) m_len = 0; } if (m_len == 0) { /* a literal */ lit++; swd->max_chain = max_chain; r = find_match(c,swd,1,0); assert(r == 0); continue; } /* a match */ #if defined(SWD_BEST_OFF) if (swd->use_best_off) better_match(swd,&m_len,&m_off); #endif assert_match(swd,m_len,m_off); /* shall we try a lazy match ? */ ahead = 0; if (try_lazy <= 0 || m_len >= max_lazy) { /* no */ l1 = 0; max_ahead = 0; } else { /* yes, try a lazy match */ l1 = len_of_coded_match(m_len,m_off,lit); assert(l1 > 0); #if 1 max_ahead = LZO_MIN((lzo_uint)try_lazy, (lzo_uint)l1 - 1); #else max_ahead = LZO_MIN3(try_lazy, l1, m_len - 1); #endif } while (ahead < max_ahead && c->look > m_len) { lzo_int lazy_match_min_gain; if (m_len >= good_length) swd->max_chain = max_chain >> 2; else swd->max_chain = max_chain; r = find_match(c,swd,1,0); ahead++; assert(r == 0); assert(c->look > 0); assert(ii + lit + ahead == c->bp); #if defined(LZO1Z) if (m_off == c->last_m_off && c->m_off != c->last_m_off) if (m_len >= M2_MIN_LEN && m_len <= M2_MAX_LEN) c->m_len = 0; #endif if (c->m_len < m_len) continue; #if 1 if (c->m_len == m_len && c->m_off >= m_off) continue; #endif #if defined(SWD_BEST_OFF) if (swd->use_best_off) better_match(swd,&c->m_len,&c->m_off); #endif l2 = len_of_coded_match(c->m_len,c->m_off,lit+ahead); if (l2 < 0) continue; #if 0 if (c->m_len == m_len && l2 >= l1) continue; #endif #if 1 /* compressed-data compatibility [see above] */ l3 = (op == out) ? -1 : len_of_coded_match(ahead,m_off,lit); #else l3 = len_of_coded_match(ahead,m_off,lit); #endif lazy_match_min_gain = min_gain(ahead,lit,lit+ahead,l1,l2,l3); if (c->m_len >= m_len + lazy_match_min_gain) { c->lazy++; assert_match(swd,c->m_len,c->m_off); if (l3 > 0) { /* code previous run */ op = code_run(c,op,ii,lit,ahead); lit = 0; /* code shortened match */ op = code_match(c,op,ahead,m_off); } else { lit += ahead; assert(ii + lit == c->bp); } goto lazy_match_done; } } assert(ii + lit + ahead == c->bp); /* 1 - code run */ op = code_run(c,op,ii,lit,m_len); lit = 0; /* 2 - code match */ op = code_match(c,op,m_len,m_off); swd->max_chain = max_chain; r = find_match(c,swd,m_len,1+ahead); assert(r == 0); lazy_match_done: ; } /* store final run */ if (lit > 0) op = STORE_RUN(c,op,ii,lit); #if defined(LZO_EOF_CODE) *op++ = M4_MARKER | 1; *op++ = 0; *op++ = 0; #endif c->codesize = pd(op, out); assert(c->textsize == in_len); *out_len = pd(op, out); if (c->cb && c->cb->nprogress) (*c->cb->nprogress)(c->cb, c->textsize, c->codesize, 0); #if 0 printf("%ld %ld -> %ld %ld: %ld %ld %ld %ld %ld %ld: %ld %ld %ld %ld\n", (long) c->textsize, (long) in_len, (long) c->codesize, c->match_bytes, c->m1a_m, c->m1b_m, c->m2_m, c->m3_m, c->m4_m, c->lit_bytes, c->lit1_r, c->lit2_r, c->lit3_r, c->lazy); #endif assert(c->lit_bytes + c->match_bytes == in_len); return LZO_E_OK; } /*********************************************************************** // ************************************************************************/ LZO_PUBLIC(int) lzo1x_999_compress_level ( const lzo_bytep in , lzo_uint in_len, lzo_bytep out, lzo_uintp out_len, lzo_voidp wrkmem, const lzo_bytep dict, lzo_uint dict_len, lzo_callback_p cb, int compression_level ) { static const struct { int try_lazy; lzo_uint good_length; lzo_uint max_lazy; lzo_uint nice_length; lzo_uint max_chain; lzo_uint32 flags; } c[9] = { { 0, 0, 0, 8, 4, 0 }, /* faster compression */ { 0, 0, 0, 16, 8, 0 }, { 0, 0, 0, 32, 16, 0 }, { 1, 4, 4, 16, 16, 0 }, { 1, 8, 16, 32, 32, 0 }, { 1, 8, 16, 128, 128, 0 }, { 2, 8, 32, 128, 256, 0 }, { 2, 32, 128, F, 2048, 1 }, { 2, F, F, F, 4096, 1 } /* max. compression */ }; if (compression_level < 1 || compression_level > 9) return LZO_E_ERROR; compression_level -= 1; return lzo1x_999_compress_internal(in, in_len, out, out_len, wrkmem, dict, dict_len, cb, c[compression_level].try_lazy, c[compression_level].good_length, c[compression_level].max_lazy, #if 0 c[compression_level].nice_length, #else 0, #endif c[compression_level].max_chain, c[compression_level].flags); } /*********************************************************************** // ************************************************************************/ LZO_PUBLIC(int) lzo1x_999_compress_dict ( const lzo_bytep in , lzo_uint in_len, lzo_bytep out, lzo_uintp out_len, lzo_voidp wrkmem, const lzo_bytep dict, lzo_uint dict_len ) { return lzo1x_999_compress_level(in, in_len, out, out_len, wrkmem, dict, dict_len, 0, 8); } LZO_PUBLIC(int) lzo1x_999_compress ( const lzo_bytep in , lzo_uint in_len, lzo_bytep out, lzo_uintp out_len, lzo_voidp wrkmem ) { return lzo1x_999_compress_level(in, in_len, out, out_len, wrkmem, NULL, 0, (lzo_callback_p) 0, 8); } /* vi:ts=4:et */
13,748
1,273
package org.broadinstitute.hellbender.utils; import org.apache.commons.math3.special.Gamma; /** * Wrapper class so that the log10Factorial array is only calculated if it's used */ public final class Log10FactorialCache extends IntToDoubleFunctionCache { private static final int CACHE_SIZE = 10_000; @Override protected int maxSize() { return CACHE_SIZE; } @Override protected double compute(final int n) { return MathUtils.log10Gamma(n + 1); } }
174
575
// Copyright 2020 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "base/check_op.h" // check_op.h is a widely included header and its size has significant impact on // build time. Try not to raise this limit unless absolutely necessary. See // https://chromium.googlesource.com/chromium/src/+/HEAD/docs/wmax_tokens.md #ifndef NACL_TC_REV #pragma clang max_tokens_here 244000 #endif #include <string.h> #include <cstdio> #include <sstream> namespace logging { char* CheckOpValueStr(int v) { char buf[50]; snprintf(buf, sizeof(buf), "%d", v); return strdup(buf); } char* CheckOpValueStr(unsigned v) { char buf[50]; snprintf(buf, sizeof(buf), "%u", v); return strdup(buf); } char* CheckOpValueStr(long v) { char buf[50]; snprintf(buf, sizeof(buf), "%ld", v); return strdup(buf); } char* CheckOpValueStr(unsigned long v) { char buf[50]; snprintf(buf, sizeof(buf), "%lu", v); return strdup(buf); } char* CheckOpValueStr(long long v) { char buf[50]; snprintf(buf, sizeof(buf), "%lld", v); return strdup(buf); } char* CheckOpValueStr(unsigned long long v) { char buf[50]; snprintf(buf, sizeof(buf), "%llu", v); return strdup(buf); } char* CheckOpValueStr(const void* v) { char buf[50]; snprintf(buf, sizeof(buf), "%p", v); return strdup(buf); } char* CheckOpValueStr(std::nullptr_t v) { return strdup("nullptr"); } char* CheckOpValueStr(double v) { char buf[50]; snprintf(buf, sizeof(buf), "%.6lf", v); return strdup(buf); } char* StreamValToStr(const void* v, void (*stream_func)(std::ostream&, const void*)) { std::stringstream ss; stream_func(ss, v); return strdup(ss.str().c_str()); } CheckOpResult::CheckOpResult(const char* expr_str, char* v1_str, char* v2_str) { std::ostringstream ss; ss << expr_str << " (" << v1_str << " vs. " << v2_str << ")"; message_ = strdup(ss.str().c_str()); free(v1_str); free(v2_str); } } // namespace logging
802
1,073
<filename>mobius-core/src/test/java/com/spotify/mobius/EventSourceConnectableTest.java /* * -\-\- * Mobius * -- * Copyright (c) 2017-2020 Spotify AB * -- * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * -/-/- */ package com.spotify.mobius; import static org.hamcrest.Matchers.is; import static org.junit.Assert.assertThat; import com.spotify.mobius.disposables.Disposable; import com.spotify.mobius.functions.Consumer; import com.spotify.mobius.test.RecordingConsumer; import java.util.concurrent.CopyOnWriteArrayList; import javax.annotation.Nonnull; import org.junit.Before; import org.junit.Test; public class EventSourceConnectableTest { TestEventSource source; Connectable<Integer, String> underTest; RecordingConsumer<String> events; @Before public void setUp() throws Exception { source = new TestEventSource(); underTest = EventSourceConnectable.create(source); events = new RecordingConsumer<>(); } public static class SubscriptionsBehavior extends EventSourceConnectableTest { @Test public void subscribesToEventSourceOnConnect() { underTest.connect(events); assertThat(source.subscriberCount(), is(1)); } @Test public void subscribesToEventSourceOnEveryConnect() { final Connection<Integer> c1 = underTest.connect(events); final Connection<Integer> c2 = underTest.connect(events); assertThat(source.subscriberCount(), is(2)); c2.dispose(); assertThat(source.subscriberCount(), is(1)); c1.dispose(); assertThat(source.subscriberCount(), is(0)); } @Test public void disposingUnsubscribesFromEventSource() { final Connection<Integer> connection = underTest.connect(events); connection.dispose(); assertThat(source.subscriberCount(), is(0)); } @Test public void disposingThenSubscribingResubscribesToEventSource() { Connection<Integer> connection = underTest.connect(events); assertThat(source.subscriberCount(), is(1)); connection.dispose(); assertThat(source.subscriberCount(), is(0)); connection = underTest.connect(events); assertThat(source.subscriberCount(), is(1)); connection.dispose(); assertThat(source.subscriberCount(), is(0)); } } public static class EmissionsBehavior extends EventSourceConnectableTest { @Test public void forwardsAllEmittedEvents() { underTest.connect(events); source.publishEvent("Hello"); source.publishEvent("World"); events.assertValues("Hello", "World"); } @Test public void noItemsAreEmittedOnceDisposed() { final Connection<Integer> connection = underTest.connect(events); source.publishEvent("Hello"); connection.dispose(); source.publishEvent("World"); events.assertValues("Hello"); } } private static class TestEventSource implements EventSource<String> { private CopyOnWriteArrayList<Consumer<String>> consumers = new CopyOnWriteArrayList<>(); @Nonnull @Override public Disposable subscribe(Consumer<String> eventConsumer) { consumers.add(eventConsumer); return () -> consumers.remove(eventConsumer); } void publishEvent(String event) { for (Consumer<String> consumer : consumers) { consumer.accept(event); } } int subscriberCount() { return consumers.size(); } } }
1,303
5,169
{ "name": "GEOSwift", "version": "4.1.0", "swift_version": "5.0", "cocoapods_version": ">= 1.4.0", "summary": "The Swift Geographic Engine.", "description": "Easily handle a geographical object model (points, linestrings, polygons etc.) and related\ntopographical operations (intersections, overlapping etc.). A type-safe, MIT-licensed Swift\ninterface to the OSGeo's GEOS library routines, nicely integrated with MapKit and Quicklook.", "homepage": "https://github.com/GEOSwift/GEOSwift", "license": { "type": "MIT", "file": "LICENSE" }, "authors": { "<NAME>": "<EMAIL>" }, "platforms": { "ios": "8.0" }, "source": { "git": "https://github.com/GEOSwift/GEOSwift.git", "tag": "4.1.0" }, "source_files": "GEOSwift/*.{swift,h}", "exclude_files": "GEOSwift/Bridge.swift", "dependencies": { "geos": [ "4.0.1" ] } }
364
32,544
package com.baeldung.dispatchservlet.listener; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.servlet.ServletContextEvent; import javax.servlet.ServletContextListener; public class CustomListener implements ServletContextListener { Logger logger = LoggerFactory.getLogger(CustomListener.class); @Override public void contextInitialized(ServletContextEvent sce) { logger.info("CustomListener is initialized"); } @Override public void contextDestroyed(ServletContextEvent sce) { logger.info("CustomListener is destroyed"); } }
198
679
<filename>main/comphelper/source/property/propertycontainerhelper.cxx /************************************************************** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * *************************************************************/ // MARKER(update_precomp.py): autogen include statement, do not remove #include "precompiled_comphelper.hxx" #include <comphelper/propertycontainerhelper.hxx> #include <comphelper/property.hxx> #include <osl/diagnose.h> #include <uno/data.h> #include <com/sun/star/uno/genfunc.h> #include <com/sun/star/beans/PropertyAttribute.hpp> #include <com/sun/star/beans/UnknownPropertyException.hpp> #include <rtl/ustrbuf.hxx> #include <algorithm> //......................................................................... namespace comphelper { //......................................................................... using namespace ::com::sun::star::uno; using namespace ::com::sun::star::lang; using namespace ::com::sun::star::beans; //-------------------------------------------------------------------------- namespace { // comparing two property descriptions struct PropertyDescriptionCompareByHandle : public ::std::binary_function< PropertyDescription, PropertyDescription, bool > { bool operator() (const PropertyDescription& x, const PropertyDescription& y) const { return x.aProperty.Handle < y.aProperty.Handle; } }; // comparing two property descriptions struct PropertyDescriptionHandleCompare : public ::std::binary_function< PropertyDescription, sal_Int32, bool > { bool operator() (const PropertyDescription& x, const sal_Int32& y) const { return x.aProperty.Handle < y; } bool operator() (const sal_Int32& x, const PropertyDescription& y) const { return x < y.aProperty.Handle; } }; // comparing two property descriptions (by name) struct PropertyDescriptionNameMatch : public ::std::unary_function< PropertyDescription, bool > { ::rtl::OUString m_rCompare; PropertyDescriptionNameMatch( const ::rtl::OUString& _rCompare ) : m_rCompare( _rCompare ) { } bool operator() (const PropertyDescription& x ) const { return x.aProperty.Name.equals(m_rCompare); } }; } //========================================================================== //= OPropertyContainerHelper //========================================================================== //-------------------------------------------------------------------------- OPropertyContainerHelper::OPropertyContainerHelper() :m_bUnused(sal_False) { } // ------------------------------------------------------------------------- OPropertyContainerHelper::~OPropertyContainerHelper() { } //-------------------------------------------------------------------------- void OPropertyContainerHelper::registerProperty(const ::rtl::OUString& _rName, sal_Int32 _nHandle, sal_Int32 _nAttributes, void* _pPointerToMember, const Type& _rMemberType) { OSL_ENSURE((_nAttributes & PropertyAttribute::MAYBEVOID) == 0, "OPropertyContainerHelper::registerProperty: don't use this for properties which may be void ! There is a method called \"registerMayBeVoidProperty\" for this !"); OSL_ENSURE(!_rMemberType.equals(::getCppuType(static_cast< Any* >(NULL))), "OPropertyContainerHelper::registerProperty: don't give my the type of an uno::Any ! Really can't handle this !"); OSL_ENSURE(_pPointerToMember, "OPropertyContainerHelper::registerProperty: you gave me nonsense : the pointer must be non-NULL"); PropertyDescription aNewProp; aNewProp.aProperty = Property( _rName, _nHandle, _rMemberType, (sal_Int16)_nAttributes ); aNewProp.eLocated = PropertyDescription::ltDerivedClassRealType; aNewProp.aLocation.pDerivedClassMember = _pPointerToMember; implPushBackProperty(aNewProp); } //-------------------------------------------------------------------------- void OPropertyContainerHelper::revokeProperty( sal_Int32 _nHandle ) { PropertiesIterator aPos = searchHandle( _nHandle ); if ( aPos == m_aProperties.end() ) throw UnknownPropertyException(); m_aProperties.erase( aPos ); } //-------------------------------------------------------------------------- void OPropertyContainerHelper::registerMayBeVoidProperty(const ::rtl::OUString& _rName, sal_Int32 _nHandle, sal_Int32 _nAttributes, Any* _pPointerToMember, const Type& _rExpectedType) { OSL_ENSURE((_nAttributes & PropertyAttribute::MAYBEVOID) != 0, "OPropertyContainerHelper::registerMayBeVoidProperty: why calling this when the attributes say nothing about may-be-void ?"); OSL_ENSURE(!_rExpectedType.equals(::getCppuType(static_cast< Any* >(NULL))), "OPropertyContainerHelper::registerMayBeVoidProperty: don't give my the type of an uno::Any ! Really can't handle this !"); OSL_ENSURE(_pPointerToMember, "OPropertyContainerHelper::registerMayBeVoidProperty: you gave me nonsense : the pointer must be non-NULL"); _nAttributes |= PropertyAttribute::MAYBEVOID; PropertyDescription aNewProp; aNewProp.aProperty = Property( _rName, _nHandle, _rExpectedType, (sal_Int16)_nAttributes ); aNewProp.eLocated = PropertyDescription::ltDerivedClassAnyType; aNewProp.aLocation.pDerivedClassMember = _pPointerToMember; implPushBackProperty(aNewProp); } //-------------------------------------------------------------------------- void OPropertyContainerHelper::registerPropertyNoMember(const ::rtl::OUString& _rName, sal_Int32 _nHandle, sal_Int32 _nAttributes, const Type& _rType, const void* _pInitialValue) { OSL_ENSURE(!_rType.equals(::getCppuType(static_cast< Any* >(NULL))), "OPropertyContainerHelper::registerPropertyNoMember : don't give my the type of an uno::Any ! Really can't handle this !"); OSL_ENSURE(_pInitialValue || ((_nAttributes & PropertyAttribute::MAYBEVOID) != 0), "OPropertyContainerHelper::registerPropertyNoMember : you should not ommit the initial value if the property can't be void ! This will definitivly crash later !"); PropertyDescription aNewProp; aNewProp.aProperty = Property( _rName, _nHandle, _rType, (sal_Int16)_nAttributes ); aNewProp.eLocated = PropertyDescription::ltHoldMyself; aNewProp.aLocation.nOwnClassVectorIndex = m_aHoldProperties.size(); if (_pInitialValue) m_aHoldProperties.push_back(Any(_pInitialValue, _rType)); else m_aHoldProperties.push_back(Any()); implPushBackProperty(aNewProp); } //-------------------------------------------------------------------------- sal_Bool OPropertyContainerHelper::isRegisteredProperty( sal_Int32 _nHandle ) const { return const_cast< OPropertyContainerHelper* >( this )->searchHandle( _nHandle ) != m_aProperties.end(); } //-------------------------------------------------------------------------- sal_Bool OPropertyContainerHelper::isRegisteredProperty( const ::rtl::OUString& _rName ) const { // TODO: the current structure is from a time where properties were // static, not dynamic. Since we allow that properties are also dynamic, // i.e. registered and revoked even though the XPropertySet has already been // accessed, a vector is not really the best data structure anymore ... ConstPropertiesIterator pos = ::std::find_if( m_aProperties.begin(), m_aProperties.end(), PropertyDescriptionNameMatch( _rName ) ); return pos != m_aProperties.end(); } //-------------------------------------------------------------------------- namespace { struct ComparePropertyWithHandle { bool operator()( const PropertyDescription& _rLHS, sal_Int32 _nRHS ) const { return _rLHS.aProperty.Handle < _nRHS; } bool operator()( sal_Int32 _nLHS, const PropertyDescription& _rRHS ) const { return _nLHS < _rRHS.aProperty.Handle; } }; } //-------------------------------------------------------------------------- void OPropertyContainerHelper::implPushBackProperty(const PropertyDescription& _rProp) { #ifdef DBG_UTIL for ( PropertiesIterator checkConflicts = m_aProperties.begin(); checkConflicts != m_aProperties.end(); ++checkConflicts ) { OSL_ENSURE(checkConflicts->aProperty.Name != _rProp.aProperty.Name, "OPropertyContainerHelper::implPushBackProperty: name already exists!"); OSL_ENSURE(checkConflicts->aProperty.Handle != _rProp.aProperty.Handle, "OPropertyContainerHelper::implPushBackProperty: handle already exists!"); } #endif PropertiesIterator pos = ::std::lower_bound( m_aProperties.begin(), m_aProperties.end(), _rProp.aProperty.Handle, ComparePropertyWithHandle() ); m_aProperties.insert( pos, _rProp ); } //-------------------------------------------------------------------------- namespace { void lcl_throwIllegalPropertyValueTypeException( const PropertyDescription& _rProperty, const Any& _rValue ) { ::rtl::OUStringBuffer aErrorMessage; aErrorMessage.appendAscii( "The given value cannot be converted to the required property type." ); aErrorMessage.appendAscii( "\n(property name \"" ); aErrorMessage.append( _rProperty.aProperty.Name ); aErrorMessage.appendAscii( "\", found value type \"" ); aErrorMessage.append( _rValue.getValueType().getTypeName() ); aErrorMessage.appendAscii( "\", required property type \"" ); aErrorMessage.append( _rProperty.aProperty.Type.getTypeName() ); aErrorMessage.appendAscii( "\")" ); throw IllegalArgumentException( aErrorMessage.makeStringAndClear(), NULL, 4 ); } } //-------------------------------------------------------------------------- sal_Bool OPropertyContainerHelper::convertFastPropertyValue( Any& _rConvertedValue, Any& _rOldValue, sal_Int32 _nHandle, const Any& _rValue ) SAL_THROW( (IllegalArgumentException) ) { sal_Bool bModified = sal_False; // get the property somebody is asking for PropertiesIterator aPos = searchHandle(_nHandle); if (aPos == m_aProperties.end()) { OSL_ENSURE( false, "OPropertyContainerHelper::convertFastPropertyValue: unknown handle!" ); // should not happen if the derived class has built a correct property set info helper to be used by // our base class OPropertySetHelper return bModified; } switch (aPos->eLocated) { // similar handling for the two cases where the value is stored in an any case PropertyDescription::ltHoldMyself: case PropertyDescription::ltDerivedClassAnyType: { sal_Bool bMayBeVoid = ((aPos->aProperty.Attributes & PropertyAttribute::MAYBEVOID) != 0); // non modifiable version of the value-to-be-set Any aNewRequestedValue( _rValue ); // normalization // (#102329# - 2002-08-14 - <EMAIL>) // (#i29490# - 2004-06-16 - <EMAIL>) if ( !aNewRequestedValue.getValueType().equals( aPos->aProperty.Type ) ) { // the actually given value is not of the same type as the one required Any aProperlyTyped( NULL, aPos->aProperty.Type.getTypeLibType() ); if ( uno_type_assignData( const_cast< void* >( aProperlyTyped.getValue() ), aProperlyTyped.getValueType().getTypeLibType(), const_cast< void* >( aNewRequestedValue.getValue() ), aNewRequestedValue.getValueType().getTypeLibType(), reinterpret_cast< uno_QueryInterfaceFunc >( cpp_queryInterface ), reinterpret_cast< uno_AcquireFunc >( cpp_acquire ), reinterpret_cast< uno_ReleaseFunc >( cpp_release ) ) ) { // we were able to query the given XInterface-derivee for the interface // which is required for this property aNewRequestedValue = aProperlyTyped; } } // argument check if ( ! ( (bMayBeVoid && !aNewRequestedValue.hasValue()) // void is allowed if the attribute says so || (aNewRequestedValue.getValueType().equals(aPos->aProperty.Type)) // else the types have to be equal ) ) { lcl_throwIllegalPropertyValueTypeException( *aPos, _rValue ); } Any* pPropContainer = NULL; // the pointer to the any which holds the property value, no matter if located in the derived clas // or in out vector if (PropertyDescription::ltHoldMyself == aPos->eLocated) { OSL_ENSURE(aPos->aLocation.nOwnClassVectorIndex < (sal_Int32)m_aHoldProperties.size(), "OPropertyContainerHelper::convertFastPropertyValue: invalid position !"); PropertyContainerIterator aIter = m_aHoldProperties.begin() + aPos->aLocation.nOwnClassVectorIndex; pPropContainer = &(*aIter); } else pPropContainer = reinterpret_cast<Any*>(aPos->aLocation.pDerivedClassMember); // check if the new value differs from the current one if (!pPropContainer->hasValue() || !aNewRequestedValue.hasValue()) bModified = pPropContainer->hasValue() != aNewRequestedValue.hasValue(); else bModified = !uno_type_equalData( const_cast< void* >( pPropContainer->getValue() ), aPos->aProperty.Type.getTypeLibType(), const_cast< void* >( aNewRequestedValue.getValue() ), aPos->aProperty.Type.getTypeLibType(), reinterpret_cast< uno_QueryInterfaceFunc >( cpp_queryInterface ), reinterpret_cast< uno_ReleaseFunc >( cpp_release ) ); if (bModified) { _rOldValue = *pPropContainer; _rConvertedValue = aNewRequestedValue; } } break; case PropertyDescription::ltDerivedClassRealType: // let the UNO runtime library do any possible conversion // this may include a change of the type - for instance, if a LONG is required, // but a short is given, then this is valid, as it can be converted without any potential // data loss Any aProperlyTyped; const Any* pNewValue = &_rValue; if (!_rValue.getValueType().equals(aPos->aProperty.Type)) { sal_Bool bConverted = sal_False; // a temporary any of the correct (required) type aProperlyTyped = Any( NULL, aPos->aProperty.Type.getTypeLibType() ); // (need this as we do not want to overwrite the derived class member here) if ( uno_type_assignData( const_cast<void*>(aProperlyTyped.getValue()), aProperlyTyped.getValueType().getTypeLibType(), const_cast<void*>(_rValue.getValue()), _rValue.getValueType().getTypeLibType(), reinterpret_cast< uno_QueryInterfaceFunc >( cpp_queryInterface ), reinterpret_cast< uno_AcquireFunc >( cpp_acquire ), reinterpret_cast< uno_ReleaseFunc >( cpp_release ) ) ) { // could query for the requested interface bConverted = sal_True; pNewValue = &aProperlyTyped; } if ( !bConverted ) lcl_throwIllegalPropertyValueTypeException( *aPos, _rValue ); } // from here on, we should have the proper type OSL_ENSURE( pNewValue->getValueType() == aPos->aProperty.Type, "OPropertyContainerHelper::convertFastPropertyValue: conversion failed!" ); bModified = !uno_type_equalData( aPos->aLocation.pDerivedClassMember, aPos->aProperty.Type.getTypeLibType(), const_cast<void*>(pNewValue->getValue()), aPos->aProperty.Type.getTypeLibType(), reinterpret_cast< uno_QueryInterfaceFunc >( cpp_queryInterface ), reinterpret_cast< uno_ReleaseFunc >( cpp_release ) ); if (bModified) { _rOldValue.setValue(aPos->aLocation.pDerivedClassMember, aPos->aProperty.Type); _rConvertedValue = *pNewValue; } break; } return bModified; } //-------------------------------------------------------------------------- void OPropertyContainerHelper::setFastPropertyValue(sal_Int32 _nHandle, const Any& _rValue) SAL_THROW( (Exception) ) { // get the property somebody is asking for PropertiesIterator aPos = searchHandle(_nHandle); if (aPos == m_aProperties.end()) { OSL_ENSURE( false, "OPropertyContainerHelper::setFastPropertyValue: unknown handle!" ); // should not happen if the derived class has built a correct property set info helper to be used by // our base class OPropertySetHelper return; } switch (aPos->eLocated) { case PropertyDescription::ltHoldMyself: m_aHoldProperties[aPos->aLocation.nOwnClassVectorIndex] = _rValue; break; case PropertyDescription::ltDerivedClassAnyType: *reinterpret_cast< Any* >(aPos->aLocation.pDerivedClassMember) = _rValue; break; case PropertyDescription::ltDerivedClassRealType: #if OSL_DEBUG_LEVEL > 0 sal_Bool bSuccess = #endif // copy the data from the to-be-set value uno_type_assignData( aPos->aLocation.pDerivedClassMember, aPos->aProperty.Type.getTypeLibType(), const_cast< void* >( _rValue.getValue() ), _rValue.getValueType().getTypeLibType(), reinterpret_cast< uno_QueryInterfaceFunc >( cpp_queryInterface ), reinterpret_cast< uno_AcquireFunc >( cpp_acquire ), reinterpret_cast< uno_ReleaseFunc >( cpp_release ) ); OSL_ENSURE( bSuccess, "OPropertyContainerHelper::setFastPropertyValue: ooops .... the value could not be assigned!"); break; } } //-------------------------------------------------------------------------- void OPropertyContainerHelper::getFastPropertyValue(Any& _rValue, sal_Int32 _nHandle) const { // get the property somebody is asking for PropertiesIterator aPos = const_cast<OPropertyContainerHelper*>(this)->searchHandle(_nHandle); if (aPos == m_aProperties.end()) { OSL_ENSURE( false, "OPropertyContainerHelper::getFastPropertyValue: unknown handle!" ); // should not happen if the derived class has built a correct property set info helper to be used by // our base class OPropertySetHelper return; } switch (aPos->eLocated) { case PropertyDescription::ltHoldMyself: OSL_ENSURE(aPos->aLocation.nOwnClassVectorIndex < (sal_Int32)m_aHoldProperties.size(), "OPropertyContainerHelper::convertFastPropertyValue: invalid position !"); _rValue = m_aHoldProperties[aPos->aLocation.nOwnClassVectorIndex]; break; case PropertyDescription::ltDerivedClassAnyType: _rValue = *reinterpret_cast<Any*>(aPos->aLocation.pDerivedClassMember); break; case PropertyDescription::ltDerivedClassRealType: _rValue.setValue(aPos->aLocation.pDerivedClassMember, aPos->aProperty.Type); break; } } //-------------------------------------------------------------------------- OPropertyContainerHelper::PropertiesIterator OPropertyContainerHelper::searchHandle(sal_Int32 _nHandle) { // search a lower bound PropertiesIterator aLowerBound = ::std::lower_bound( m_aProperties.begin(), m_aProperties.end(), _nHandle, PropertyDescriptionHandleCompare()); // check for identity if ((aLowerBound != m_aProperties.end()) && aLowerBound->aProperty.Handle != _nHandle) aLowerBound = m_aProperties.end(); return aLowerBound; } //-------------------------------------------------------------------------- const Property& OPropertyContainerHelper::getProperty( const ::rtl::OUString& _rName ) const { ConstPropertiesIterator pos = ::std::find_if( m_aProperties.begin(), m_aProperties.end(), PropertyDescriptionNameMatch( _rName ) ); if ( pos == m_aProperties.end() ) throw UnknownPropertyException( _rName, NULL ); return pos->aProperty; } //-------------------------------------------------------------------------- void OPropertyContainerHelper::modifyAttributes(sal_Int32 _nHandle, sal_Int32 _nAddAttrib, sal_Int32 _nRemoveAttrib) { // get the property somebody is asking for PropertiesIterator aPos = searchHandle(_nHandle); if (aPos == m_aProperties.end()) { OSL_ENSURE( false, "OPropertyContainerHelper::modifyAttributes: unknown handle!" ); // should not happen if the derived class has built a correct property set info helper to be used by // our base class OPropertySetHelper return; } aPos->aProperty.Handle |= _nAddAttrib; aPos->aProperty.Handle &= ~_nRemoveAttrib; } //-------------------------------------------------------------------------- void OPropertyContainerHelper::describeProperties(Sequence< Property >& _rProps) const { Sequence< Property > aOwnProps(m_aProperties.size()); Property* pOwnProps = aOwnProps.getArray(); for ( ConstPropertiesIterator aLoop = m_aProperties.begin(); aLoop != m_aProperties.end(); ++aLoop, ++pOwnProps ) { pOwnProps->Name = aLoop->aProperty.Name; pOwnProps->Handle = aLoop->aProperty.Handle; pOwnProps->Attributes = (sal_Int16)aLoop->aProperty.Attributes; pOwnProps->Type = aLoop->aProperty.Type; } // as our property vector is sorted by handles, not by name, we have to sort aOwnProps ::std::sort(aOwnProps.getArray(), aOwnProps.getArray() + aOwnProps.getLength(), PropertyCompareByName()); // unfortunally the STL merge function does not allow the output range to overlap one of the input ranges, // so we need an extra sequence Sequence< Property > aOutput; aOutput.realloc(_rProps.getLength() + aOwnProps.getLength()); // do the merge ::std::merge( _rProps.getConstArray(), _rProps.getConstArray() + _rProps.getLength(), // input 1 aOwnProps.getConstArray(), aOwnProps.getConstArray() + aOwnProps.getLength(), // input 2 aOutput.getArray(), // output PropertyCompareByName() // compare operator ); // copy the output _rProps = aOutput; } //......................................................................... } // namespace comphelper //.........................................................................
7,433
311
<reponame>jurecuhalev/snowflake-connector-python<filename>src/snowflake/connector/result_set.py # # Copyright (c) 2012-2021 Snowflake Computing Inc. All rights reserved. # from collections import deque from concurrent.futures import Future from concurrent.futures.thread import ThreadPoolExecutor from logging import getLogger from typing import ( TYPE_CHECKING, Any, Callable, Deque, Dict, Iterable, Iterator, List, Optional, Tuple, Union, ) from .constants import IterUnit from .errors import NotSupportedError from .options import installed_pandas, pandas from .result_batch import ( ArrowResultBatch, DownloadMetrics, JSONResultBatch, ResultBatch, ) from .telemetry import TelemetryField from .time_util import get_time_millis if TYPE_CHECKING: # pragma: no cover from snowflake.connector.cursor import SnowflakeCursor if installed_pandas: from pyarrow import Table, concat_tables else: Table = None logger = getLogger(__name__) def result_set_iterator( first_batch_iter: Iterator[Tuple], unconsumed_batches: "Deque[Future[Iterator[Tuple]]]", unfetched_batches: Deque["ResultBatch"], final: Callable[[], None], prefetch_thread_num: int, **kw: Any, ) -> Union[ Iterator[Union[Dict, Exception]], Iterator[Union[Tuple, Exception]], Iterator[Table], ]: """Creates an iterator over some other iterators. Very similar to itertools.chain but we need some keywords to be propagated to ``_download`` functions later. We need this to have ResultChunks fall out of usage so that they can be garbage collected. Just like ``ResultBatch`` iterator, this might yield an ``Exception`` to allow users to continue iterating through the rest of the ``ResultBatch``. """ with ThreadPoolExecutor(prefetch_thread_num) as pool: # Fill up window logger.debug("beginning to schedule result batch downloads") for _ in range(min(prefetch_thread_num, len(unfetched_batches))): logger.debug( f"queuing download of result batch id: {unfetched_batches[0].id}" ) unconsumed_batches.append( pool.submit(unfetched_batches.popleft().create_iter, **kw) ) yield from first_batch_iter i = 1 while unconsumed_batches: logger.debug(f"user requesting to consume result batch {i}") # Submit the next un-fetched batch to the pool if unfetched_batches: logger.debug( f"queuing download of result batch id: {unfetched_batches[0].id}" ) future = pool.submit(unfetched_batches.popleft().create_iter, **kw) unconsumed_batches.append(future) future = unconsumed_batches.popleft() # this will raise an exception if one has occurred batch_iterator = future.result() logger.debug(f"user began consuming result batch {i}") yield from batch_iterator logger.debug(f"user finished consuming result batch {i}") i += 1 final() class ResultSet(Iterable[List[Any]]): """This class retrieves the results of a query with the historical strategy. It pre-downloads the first up to 4 ResultChunks (this doesn't include the 1st chunk as that is embedded in the response JSON from Snowflake) upon creating an Iterator on it. It also reports telemetry data about its ``ResultBatch``es once it's done iterating through them. Currently we do not support mixing multiple ``ResultBatch`` types and having different column definitions types per ``ResultBatch``. """ def __init__( self, cursor: "SnowflakeCursor", result_chunks: Union[List["JSONResultBatch"], List["ArrowResultBatch"]], prefetch_thread_num: int, ): self.batches = result_chunks self._cursor = cursor self.prefetch_thread_num = prefetch_thread_num def _report_metrics(self) -> None: """Report all metrics totalled up. This includes TIME_CONSUME_LAST_RESULT, TIME_DOWNLOADING_CHUNKS and TIME_PARSING_CHUNKS in that order. """ if self._cursor._first_chunk_time is not None: time_consume_last_result = ( get_time_millis() - self._cursor._first_chunk_time ) self._cursor._log_telemetry_job_data( TelemetryField.TIME_CONSUME_LAST_RESULT, time_consume_last_result ) metrics = self._get_metrics() if DownloadMetrics.download.value in metrics: self._cursor._log_telemetry_job_data( TelemetryField.TIME_DOWNLOADING_CHUNKS, metrics.get(DownloadMetrics.download.value), ) if DownloadMetrics.parse.value in metrics: self._cursor._log_telemetry_job_data( TelemetryField.TIME_PARSING_CHUNKS, metrics.get(DownloadMetrics.parse.value), ) def _finish_iterating(self): """Used for any cleanup after the result set iterator is done.""" self._report_metrics() def _can_create_arrow_iter(self) -> None: # For now we don't support mixed ResultSets, so assume first partition's type # represents them all head_type = type(self.batches[0]) if head_type != ArrowResultBatch: raise NotSupportedError( f"Trying to use arrow fetching on {head_type} which " f"is not ArrowResultChunk" ) def _fetch_arrow_batches( self, ) -> Iterator[Table]: """Fetches all the results as Arrow Tables, chunked by Snowflake back-end.""" self._can_create_arrow_iter() return self._create_iter(iter_unit=IterUnit.TABLE_UNIT, structure="arrow") def _fetch_arrow_all(self) -> Optional[Table]: """Fetches a single Arrow Table from all of the ``ResultBatch``.""" tables = list(self._fetch_arrow_batches()) if tables: return concat_tables(tables) else: return None def _fetch_pandas_batches(self, **kwargs) -> Iterator["pandas.DataFrame"]: """Fetches Pandas dataframes in batches, where batch refers to Snowflake Chunk. Thus, the batch size (the number of rows in dataframe) is determined by Snowflake's back-end. """ self._can_create_arrow_iter() return self._create_iter(iter_unit=IterUnit.TABLE_UNIT, structure="pandas") def _fetch_pandas_all(self, **kwargs) -> "pandas.DataFrame": """Fetches a single Pandas dataframe.""" dataframes = list(self._fetch_pandas_batches()) if dataframes: return pandas.concat(dataframes, **kwargs) return pandas.DataFrame(columns=self.batches[0].column_names) def _get_metrics(self) -> Dict[str, int]: """Sum up all the chunks' metrics and show them together.""" overall_metrics: Dict[str, int] = {} for c in self.batches: for n, v in c._metrics.items(): overall_metrics[n] = overall_metrics.get(n, 0) + v return overall_metrics def __iter__(self) -> Iterator[Tuple]: """Returns a new iterator through all batches with default values.""" return self._create_iter() def _create_iter( self, **kwargs, ) -> Union[ Iterator[Union[Dict, Exception]], Iterator[Union[Tuple, Exception]], Iterator[Table], Iterator["pandas.DataFrame"], ]: """Set up a new iterator through all batches with first 5 chunks downloaded. This function is a helper function to ``__iter__`` and it was introduced for the cases where we need to propagate some values to later ``_download`` calls. """ # add connection so that result batches can use sessions kwargs["connection"] = self._cursor.connection first_batch_iter = self.batches[0].create_iter(**kwargs) # Iterator[Tuple] Futures that have not been consumed by the user unconsumed_batches: Deque[Future[Iterator[Tuple]]] = deque() # batches that have not been fetched unfetched_batches = deque(self.batches[1:]) for num, batch in enumerate(unfetched_batches): logger.debug(f"result batch {num + 1} has id: {batch.id}") return result_set_iterator( first_batch_iter, unconsumed_batches, unfetched_batches, self._finish_iterating, self.prefetch_thread_num, **kwargs, ) def total_row_index(self) -> int: """Returns the total rowcount of the ``ResultSet`` .""" total = 0 for p in self.batches: total += p.rowcount return total
3,709
407
package com.alibaba.sreworks.health.common.utils; import lombok.Builder; import lombok.Getter; import java.util.ArrayList; import java.util.List; /** * 通用工具类 *@author <EMAIL> *@date 2021/12/29 20:35 */ public class CommonTools { /** * 解析分步下标 * @param size 元素总数量 * @param step 步长 * @return */ public static List<IndexStepRange> generateStepIndex(int size, int step) { if (size <= 0 || step <= 0) { return null; } List<IndexStepRange> result = new ArrayList<>(); int headIndex = 0; while (headIndex < size) { int tailIndex = headIndex + step; if (tailIndex >= size) { tailIndex = size; } IndexStepRange indexStepRange = IndexStepRange.builder().startIndex(headIndex).endIndex(tailIndex).build(); result.add(indexStepRange); headIndex = tailIndex; } return result; } @Builder @Getter static public class IndexStepRange { /** * 开始索引 */ private int startIndex; /** * 结束索引 */ private int endIndex; } }
607
334
/* * This file is part of Sponge, licensed under the MIT License (MIT). * * Copyright (c) SpongePowered <https://www.spongepowered.org> * Copyright (c) contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ package org.spongepowered.common.advancement.criterion; import com.google.common.collect.ImmutableSet; import org.checkerframework.checker.nullness.qual.Nullable; import org.spongepowered.api.advancement.criteria.AdvancementCriterion; import org.spongepowered.api.advancement.criteria.OperatorCriterion; import org.spongepowered.api.advancement.criteria.trigger.FilteredTrigger; import java.util.Arrays; import java.util.Collection; import java.util.Objects; import java.util.Optional; public class SpongeOperatorCriterion implements OperatorCriterion, DefaultedAdvancementCriterion { private final String name; private final Collection<AdvancementCriterion> criteria; private @Nullable Collection<AdvancementCriterion> recursiveChildrenCriteria; private @Nullable Collection<AdvancementCriterion> leafChildrenCriteria; SpongeOperatorCriterion(final String namePrefix, final Collection<AdvancementCriterion> criteria) { this.name = namePrefix + Arrays.toString(criteria.stream().map(AdvancementCriterion::name).toArray(String[]::new)); this.criteria = ImmutableSet.copyOf(criteria); } @Override public String name() { return this.name; } @Override public Optional<FilteredTrigger<?>> trigger() { return Optional.empty(); } private Collection<AdvancementCriterion> getAllChildrenCriteria0(final boolean onlyLeaves) { final ImmutableSet.Builder<AdvancementCriterion> criteria = ImmutableSet.builder(); if (!onlyLeaves) { criteria.add(this); } for (final AdvancementCriterion criterion : this.criteria) { if (criterion instanceof OperatorCriterion) { criteria.addAll(((SpongeOperatorCriterion) criterion).getAllChildrenCriteria0(onlyLeaves)); } } return criteria.build(); } private Collection<AdvancementCriterion> getRecursiveChildren() { if (this.recursiveChildrenCriteria == null) { this.recursiveChildrenCriteria = this.getAllChildrenCriteria0(false); } return this.recursiveChildrenCriteria; } @Override public Collection<AdvancementCriterion> criteria() { return this.criteria; } @Override public Collection<AdvancementCriterion> leafCriteria() { if (this.leafChildrenCriteria == null) { this.leafChildrenCriteria = this.getAllChildrenCriteria0(true); } return this.leafChildrenCriteria; } @Override public Collection<AdvancementCriterion> find(final String name) { return this.getRecursiveChildren().stream() .filter(c -> c.name().equals(name)).collect(ImmutableSet.toImmutableSet()); } @Override public Optional<AdvancementCriterion> findFirst(final String name) { return this.getRecursiveChildren().stream() .filter(c -> c.name().equals(name)).findFirst(); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof SpongeOperatorCriterion)) { return false; } SpongeOperatorCriterion that = (SpongeOperatorCriterion) o; if (!Objects.equals(this.name, that.name)) { return false; } return Objects.equals(this.criteria, that.criteria); } @Override public int hashCode() { int result = this.name != null ? this.name.hashCode() : 0; result = 31 * result + (this.criteria != null ? this.criteria.hashCode() : 0); return result; } }
1,683
1,016
<reponame>peter-ls/kylo<gh_stars>1000+ package com.thinkbiganalytics.nifi.v2.hdfs; /*- * #%L * thinkbig-nifi-hadoop-properties-v1.2 * %% * Copyright (C) 2017 ThinkBig Analytics * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ import org.apache.nifi.components.PropertyDescriptor; import org.apache.nifi.components.Validator; /** * Additional properties for Hadoop processors (NiFi version 1.2+) */ public class AdditionalProperties { public static PropertyDescriptor getHdfsAdditionalClasspathResources() { return new PropertyDescriptor.Builder() .name("Additional Classpath Resources") .description("A comma-separated list of paths to files and/or directories that will be added to the classpath. When specifying a " + "directory, all files with in the directory will be added to the classpath, but further sub-directories will not be included.") .required(false) .addValidator(Validator.VALID) .dynamicallyModifiesClasspath(true) .build(); } public static String getNiFiVersion() { return "v1.2+"; } }
565
2,542
{ "Default": { }, "Tests": [ { "Name": "FMScale_SP3R", "Type": "V2_DllTest", "Owners": "chuxin", "Environment": "Iaas", "ResourcesRequired": "Server:21;Group=Failover-Scale", "TestExecutionParameters": { "SetupType": "XCopy", "SetupTimeout": "3600", "ConfigName": "WinFabricTest\\Config\\Failover_Scale_Sanity.txt", "DllPath": "MS.Test.WinFabric.Cases.dll", "ClassName": "FailoverTestCases", "TaskName": "FMScale_SP3R", "InstalledFeaturesFile": "Config\\FailoverScalePHWInstalledFeatures.txt", "ExecutionTimeout": "64800", "CleanupTimeout": "28800" } } ] }
328
1,093
/* * Copyright 2015-2020 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.integration.file.remote; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.assertj.core.api.Assertions.fail; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.mockito.Mockito.when; import java.io.ByteArrayInputStream; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; import java.nio.file.Path; import java.util.UUID; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; import org.springframework.beans.factory.BeanFactory; import org.springframework.core.io.ByteArrayResource; import org.springframework.expression.common.LiteralExpression; import org.springframework.integration.file.remote.session.Session; import org.springframework.integration.file.remote.session.SessionFactory; import org.springframework.integration.file.support.FileExistsMode; import org.springframework.messaging.MessageDeliveryException; import org.springframework.messaging.MessagingException; import org.springframework.messaging.support.GenericMessage; /** * @author <NAME> * @author <NAME> * * @since 4.1.7 * */ public class RemoteFileTemplateTests { private RemoteFileTemplate<Object> template; private Session<Object> session; @TempDir Path folder; private File file; @SuppressWarnings("unchecked") @BeforeEach public void setUp() throws Exception { SessionFactory<Object> sessionFactory = mock(SessionFactory.class); this.template = new RemoteFileTemplate<>(sessionFactory); this.template.setRemoteDirectoryExpression(new LiteralExpression("/foo")); this.template.setBeanFactory(mock(BeanFactory.class)); this.template.afterPropertiesSet(); this.session = mock(Session.class); when(sessionFactory.getSession()).thenReturn(this.session); this.file = Files.createTempFile(this.folder, null, null).toFile(); } @Test public void testReplace() throws Exception { this.template.send(new GenericMessage<>(this.file), FileExistsMode.REPLACE); verify(this.session).write(any(InputStream.class), anyString()); } @Test public void testAppend() throws Exception { this.template.setUseTemporaryFileName(false); this.template.send(new GenericMessage<>(this.file), FileExistsMode.APPEND); verify(this.session).append(any(InputStream.class), anyString()); } @Test public void testFailExists() throws Exception { when(session.exists(anyString())).thenReturn(true); try { this.template.send(new GenericMessage<>(this.file), FileExistsMode.FAIL); fail("Expected exception"); } catch (MessagingException e) { assertThat(e.getMessage()).contains("The destination file already exists"); } verify(this.session, never()).write(any(InputStream.class), anyString()); } @Test public void testIgnoreExists() throws Exception { when(session.exists(anyString())).thenReturn(true); this.template.send(new GenericMessage<>(this.file), FileExistsMode.IGNORE); verify(this.session, never()).write(any(InputStream.class), anyString()); } @Test public void testFailNotExists() throws Exception { when(session.exists(anyString())).thenReturn(false); this.template.send(new GenericMessage<>(this.file), FileExistsMode.FAIL); verify(this.session).write(any(InputStream.class), anyString()); } @Test public void testIgnoreNotExists() throws Exception { when(session.exists(anyString())).thenReturn(false); this.template.send(new GenericMessage<>(this.file), FileExistsMode.IGNORE); verify(this.session).write(any(InputStream.class), anyString()); } @Test public void testStream() throws IOException { ByteArrayInputStream stream = new ByteArrayInputStream("foo".getBytes()); this.template.send(new GenericMessage<>(stream), FileExistsMode.IGNORE); verify(this.session).write(eq(stream), any()); } @Test public void testString() throws IOException { this.template.send(new GenericMessage<>("foo"), FileExistsMode.IGNORE); verify(this.session).write(any(InputStream.class), any()); } @Test public void testBytes() throws IOException { this.template.send(new GenericMessage<>("foo".getBytes()), FileExistsMode.IGNORE); verify(this.session).write(any(InputStream.class), any()); } @Test public void testResource() throws IOException { this.template.send(new GenericMessage<>(new ByteArrayResource("foo".getBytes())), FileExistsMode.IGNORE); verify(this.session).write(any(InputStream.class), any()); } @Test public void testMissingFile() { this.template.send(new GenericMessage<>(new File(UUID.randomUUID().toString())), FileExistsMode.IGNORE); verifyNoMoreInteractions(this.session); } @Test public void testInvalid() { assertThatThrownBy(() -> this.template .send(new GenericMessage<>(new Object()), FileExistsMode.IGNORE)) .isInstanceOf(MessageDeliveryException.class) .hasCauseInstanceOf(IllegalArgumentException.class) .hasMessageContaining("Unsupported payload type"); } }
1,902
307
<gh_stars>100-1000 #!/usr/bin/python # -*- coding: UTF-8 -*- """ @author: Bryan @file: send_mails.py @time: 2019/03/13 12:55 @desc: 发送邮件模块 """ import smtplib from email.header import Header from email.mime.text import MIMEText mail_receivers=["<EMAIL>","<EMAIL>"] class SendMail(object): def __init__(self): """ 初始化邮箱模块 """ try: self.mail_host = "smtp.qq.com" # 邮箱服务器 self.mail_port = "25" # 邮箱服务端端口 self.mail_user = "<EMAIL>" # 邮箱用户名 self.mail_pwd = "<PASSWORD>" # 邮箱授权码 self.mail_receivers = mail_receivers # 收件人,以逗号分隔成列表 smtp = smtplib.SMTP() smtp.connect(self.mail_host, self.mail_port) smtp.login(self.mail_user, self.mail_pwd) self.smtp = smtp except: print('发邮件---->初始化失败!请检查用户名和密码是否正确!') def send_mails(self, content): """ 发送邮件 """ try: message = MIMEText(content, 'plain', 'utf-8') message['From'] = Header("登录检测机器人小咪", 'utf-8') message['To'] = Header("用户登录系统通知", 'utf-8') subject = '用户登录通知信息' message['Subject'] = Header(subject, 'utf-8') self.smtp.sendmail(self.mail_user, self.mail_receivers, message.as_string()) print('发送邮件成功!') except Exception as e: print('发邮件---->失败!原因:', e) def mail_close(self): """ 关闭邮箱资源 """ self.smtp.close() if __name__ == "__main__": sendMail = SendMail() #实例化 sendMail.send_mails("测试邮件") #调用send_mails发送邮件函数 sendMail.mail_close() #关闭连接
1,107
681
{ "alias":"peteDeleteTop", "name":"A dataverse for testing", "affiliation":"Affiliation value", "contactEmail":"<EMAIL>", "permissionRoot":false, "description":"A dataverse that's added for testing purposes." }
82
344
# *********************************** # |docname| - gunincorn configuration # *********************************** # This file configures gunicorn to use Uvicorn to run FastAPI which runs the BookServer. # # See also the `gunicorn config docs <https://docs.gunicorn.org/en/stable/configure.html#configuration-file>`_. # # Imports # ======= # These are listed in the order prescribed by `PEP 8`_. # # Standard library # ---------------- import multiprocessing # Third-party imports # ------------------- # None. # # Local application imports # ------------------------- # None. # # # Configuration # ============= # `wsgi_app <https://docs.gunicorn.org/en/stable/settings.html#wsgi-app>`_: A WSGI application path in pattern ``$(MODULE_NAME):$(VARIABLE_NAME)``. wsgi_app = "bookserver.main:app" # `user <https://docs.gunicorn.org/en/stable/settings.html#user>`_: Switch worker processes to run as this user. user = "www-data" # `group <https://docs.gunicorn.org/en/stable/settings.html#group>`_: Switch worker process to run as this group. group = "www-data" # `workers <https://docs.gunicorn.org/en/stable/settings.html#workers>`_: The number of worker processes for handling requests. Pick this based on CPU count. workers = multiprocessing.cpu_count() * 2 + 1 # `worker_class <https://docs.gunicorn.org/en/stable/settings.html#worker-class>`_: The type of workers to use. Use `uvicorn's worker class for gunicorn <https://www.uvicorn.org/deployment/#gunicorn>`_. worker_class = "uvicorn.workers.UvicornWorker"
484
439
<gh_stars>100-1000 /* * Copyright (C) 2014 <NAME>. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.github.pedrovgs.tuentitv.ui.fragment; import android.content.Context; import android.os.Bundle; import android.os.Handler; import android.support.v17.leanback.app.BackgroundManager; import android.support.v17.leanback.widget.ArrayObjectAdapter; import android.support.v17.leanback.widget.HeaderItem; import android.support.v17.leanback.widget.ListRow; import android.support.v17.leanback.widget.ListRowPresenter; import android.support.v17.leanback.widget.OnItemViewClickedListener; import android.support.v17.leanback.widget.OnItemViewSelectedListener; import android.support.v17.leanback.widget.Presenter; import android.support.v17.leanback.widget.Row; import android.support.v17.leanback.widget.RowPresenter; import android.util.DisplayMetrics; import android.view.View; import com.github.pedrovgs.tuentitv.R; import com.github.pedrovgs.tuentitv.presenter.MainPresenter; import com.github.pedrovgs.tuentitv.ui.data.CardInfo; import com.github.pedrovgs.tuentitv.ui.data.IconInfo; import com.github.pedrovgs.tuentitv.ui.data.ImageInfo; import com.github.pedrovgs.tuentitv.ui.picasso.PicassoBackgroundManagerTarget; import com.github.pedrovgs.tuentitv.ui.picasso.transformation.BlurTransformation; import com.github.pedrovgs.tuentitv.ui.picasso.transformation.GrayScaleTransformation; import com.github.pedrovgs.tuentitv.ui.viewpresenter.CardPresenter; import com.github.pedrovgs.tuentitv.ui.viewpresenter.IconPresenter; import com.github.pedrovgs.tuentitv.ui.viewpresenter.ImagePresenter; import com.squareup.picasso.Picasso; import java.util.List; import javax.inject.Inject; /** * BrowseFragment extension created to work as main application fragment and to show most important * information related to the user. Favorite contacts, contacts, recent conversation, media gallery * elements and other information like application preferences. * * @author <NAME>. */ public class MainFragment extends BrowseBaseFragment implements MainPresenter.View { private static final int CARD_WIDTH_IN_DP = 260; private static final int CARD_HEIGHT_IN_DP = 150; private static final int UPDATE_BACKGROUND_IMAGE_DELAY_MILLIS = 700; private static final int FAVORITES_ROW = 1; private static final int CONVERSATIONS_ROW = 2; private static final int CONTACTS_ROW = 3; private static final int MEDIA_ROW = 4; private static final int PREFERENCES_ROW = 5; @Inject MainPresenter presenter; private DisplayMetrics metrics; private PicassoBackgroundManagerTarget backgroundTarget; private Handler handler; private ChangeBackground lastChangeBackgroundRunnable; @Override public void onActivityCreated(Bundle savedInstanceState) { super.onActivityCreated(savedInstanceState); this.handler = new Handler(); presenter.setView(this); configureColors(); configureApplicationIcon(); prepareBackgroundManager(); hookListeners(); presenter.loadData(); } @Override public void showDefaultBackground() { Picasso.with(getActivity()) .load(R.drawable.fragment_default_background) .placeholder(R.drawable.fragment_default_background) .resize(metrics.widthPixels, metrics.heightPixels) .transform(new GrayScaleTransformation()) .transform(new BlurTransformation(getActivity())) .centerCrop() .into(backgroundTarget); } @Override public void updateBackground(String imageUrl) { if (lastChangeBackgroundRunnable != null) { handler.removeCallbacks(lastChangeBackgroundRunnable); } lastChangeBackgroundRunnable = new ChangeBackground(getActivity(), imageUrl, backgroundTarget); handler.postDelayed(lastChangeBackgroundRunnable, UPDATE_BACKGROUND_IMAGE_DELAY_MILLIS); } @Override public void showMainInformation(List<CardInfo> favorites, List<CardInfo> conversations, List<CardInfo> contacts, List<ImageInfo> mediaElements, List<IconInfo> preferences) { ArrayObjectAdapter rowsAdapter = new ArrayObjectAdapter(new ListRowPresenter()); CardPresenter bigCardPresenter = new CardPresenter(CARD_WIDTH_IN_DP, CARD_HEIGHT_IN_DP); CardPresenter smallCarPresenter = new CardPresenter(); addCardInfoElementsToRowsAdapter(R.string.favorites_item_title, favorites, rowsAdapter, smallCarPresenter, FAVORITES_ROW); addCardInfoElementsToRowsAdapter(R.string.recent_conversation_item_title, conversations, rowsAdapter, bigCardPresenter, CONVERSATIONS_ROW); addCardInfoElementsToRowsAdapter(R.string.contacts_item_title, contacts, rowsAdapter, smallCarPresenter, CONTACTS_ROW); addImageInfoElementsToRowAdapter(R.string.media_elements_item_title, mediaElements, rowsAdapter, new ImagePresenter(), MEDIA_ROW); addIconInfoElementsToRowAdapter(getResources().getString(R.string.preferences), preferences, rowsAdapter, new IconPresenter(), PREFERENCES_ROW); setAdapter(rowsAdapter); } @Override public void closeView() { getActivity().finish(); } @Override public void cancelPendingBackgroundUpdates() { Picasso.with(getActivity()).cancelRequest(backgroundTarget); handler.removeCallbacks(lastChangeBackgroundRunnable); } private void addCardInfoElementsToRowsAdapter(int title, List<CardInfo> elements, ArrayObjectAdapter rowsAdapter, Presenter presenter, int id) { ArrayObjectAdapter listRowAdapter = new ArrayObjectAdapter(presenter); for (Object element : elements) { listRowAdapter.add(element); } HeaderItem header = new HeaderItem(id, getString(title), null); rowsAdapter.add(new ListRow(header, listRowAdapter)); } private void addImageInfoElementsToRowAdapter(int title, List<ImageInfo> elements, ArrayObjectAdapter rowsAdapter, Presenter presenter, int id) { ArrayObjectAdapter listRowAdapter = new ArrayObjectAdapter(presenter); for (Object element : elements) { listRowAdapter.add(element); } HeaderItem header = new HeaderItem(id, getString(title), null); rowsAdapter.add(new ListRow(header, listRowAdapter)); } private void addIconInfoElementsToRowAdapter(String title, List<IconInfo> preferences, ArrayObjectAdapter rowsAdapter, Presenter presenter, int id) { ArrayObjectAdapter listRowAdapter = new ArrayObjectAdapter(presenter); for (IconInfo iconInfo : preferences) { listRowAdapter.add(iconInfo); } rowsAdapter.add(new ListRow(new HeaderItem(id, title, ""), listRowAdapter)); } private void prepareBackgroundManager() { BackgroundManager backgroundManager = BackgroundManager.getInstance(getActivity()); backgroundManager.attach(getActivity().getWindow()); backgroundTarget = new PicassoBackgroundManagerTarget(backgroundManager); metrics = new DisplayMetrics(); getActivity().getWindowManager().getDefaultDisplay().getMetrics(metrics); } private void configureColors() { setBrandColor(getResources().getColor(R.color.primary_color)); setSearchAffordanceColor(getResources().getColor(R.color.primary_color_dark)); } private void configureApplicationIcon() { setBadgeDrawable(getResources().getDrawable(R.drawable.icn_wink_main_fragment)); } private void hookListeners() { configureOnItemSelectedListener(); configureOnSearchClickedListener(); } private void configureOnSearchClickedListener() { setOnSearchClickedListener(new View.OnClickListener() { @Override public void onClick(View view) { presenter.onSearchIconClicked(); } }); } private void configureOnItemSelectedListener() { setOnItemViewSelectedListener(new OnItemViewSelectedListener() { @Override public void onItemSelected(Presenter.ViewHolder viewHolder, Object item, RowPresenter.ViewHolder viewHolder1, Row row) { if (row.getId() < MEDIA_ROW) { presenter.onCardInfoSelected((CardInfo) item); } else if (row.getId() == MEDIA_ROW) { presenter.onImageInfoSelected((ImageInfo) item); } else if (row.getId() == PREFERENCES_ROW) { presenter.onPreferencesSelected(); } } }); setOnItemViewClickedListener(new OnItemViewClickedListener() { @Override public void onItemClicked(Presenter.ViewHolder viewHolder, Object item, RowPresenter.ViewHolder viewHolder1, Row row) { if (row.getId() == PREFERENCES_ROW) { int id = ((IconInfo) item).getIconId(); switch (id) { case R.drawable.icn_settings_log_out: presenter.logout(); break; default: } } else if (row.getId() == MEDIA_ROW) { presenter.onImageInfoClicked((ImageInfo) item); } else if (row.getId() < MEDIA_ROW) { presenter.onCardInfoClicked((CardInfo) item); } } }); } private static class ChangeBackground implements Runnable { private final Context context; private String photo; private final PicassoBackgroundManagerTarget backgroundTarget; public ChangeBackground(Context context, String photo, PicassoBackgroundManagerTarget backgroundTarget) { this.context = context; this.photo = photo; this.backgroundTarget = backgroundTarget; } @Override public void run() { Picasso.with(context).cancelRequest(backgroundTarget); Picasso.with(context) .load(photo) .transform(new GrayScaleTransformation()) .transform(new BlurTransformation(context)) .into(backgroundTarget); } } }
3,368
897
/* This program is about merging two Linked list. Here the user is going to enter values of first and second linked list. It will merge the two linked lists into one linked list by comparing the each and every node values and convert into one linked list. */ #include <stdio.h> #include <stdlib.h> struct Node { int data; struct Node *next; } *first = NULL, *second = NULL, *third = NULL; void Create(int Array1[], int size) { int i; struct Node *temp, *last; first = (struct Node *)malloc(sizeof(struct Node)); first->data = Array1[0]; first->next = NULL; last = first; for (i = 1; i < size; i++) { temp = (struct Node *)malloc(sizeof(struct Node)); temp->data = Array1[i]; temp->next = NULL; last->next = temp; last = temp; } } void Create2(int Array2[], int size) { int i; struct Node *temp, *last; second = (struct Node *)malloc(sizeof(struct Node)); second->data = Array2[0]; second->next = NULL; last = second; for (i = 1; i < size; i++) { temp = (struct Node *)malloc(sizeof(struct Node)); temp->data = Array2[i]; temp->next = NULL; last->next = temp; last = temp; } } void Display(struct Node *p) { while (p != NULL) { printf("%d ", p->data); p = p->next; } printf("\n"); } void Merge(struct Node *p, struct Node *q) { struct Node *last; /*Merging the two linkedlists and making it as third linkedlist if the condition satisfies*/ if (p->data < q->data) { third = last = p; p = p->next; third->next = NULL; } else { third = last = q; q = q->next; third->next = NULL; } while (p && q) { if (p->data < q->data) { last->next = p; last = p; p = p->next; last->next = NULL; } else { last->next = q; last = q; q = q->next; last->next = NULL; } } /* Checking whether any node is left in the first and second linkedlist if so merging those nodes as well. */ if (p) last->next = p; if (q) last->next = q; } int main() { int *Array1, *Array2; int size; printf("Enter the size of the array: \n"); scanf("%d", &size); Array1 = (int *)malloc(size * sizeof(int)); Array2 = (int *)malloc(size * sizeof(int)); printf("Enter first LL Elements: \n"); for (int i = 0; i < size; i++) { scanf("%d", &Array1[i]); } Create(Array1, size); printf("Enter second LL Elements: \n"); for (int i = 0; i < size; i++) { scanf("%d", &Array2[i]); } Create2(Array2, size); Merge(first, second); Display(third); return 0; } // Output:- /* Enter the size of the array: 5 Enter first LL Elements: 2 4 7 8 9 Enter second LL Elements: 1 2 3 4 5 1 2 2 3 4 4 5 7 8 9 Time Complexity:- O(m+n) Space Complexity:- O(1) */
1,326
335
{ "word": "Conjunction", "definitions": [ "A word used to connect clauses or sentences or to coordinate words in the same clause (e.g. and, but, if).", "The action or an instance of two or more events or things occurring at the same point in time or space.", "An alignment of two planets or other celestial objects so that they appear to be in the same, or nearly the same, place in the sky." ], "parts-of-speech": "Noun" }
150
892
{ "schema_version": "1.2.0", "id": "GHSA-94xh-2fmc-xf5j", "modified": "2021-10-08T21:15:35Z", "published": "2020-10-27T20:30:20Z", "aliases": [ "CVE-2020-7752" ], "summary": "command injection vulnerability", "details": "### Impact\ncommand injection vulnerability\n\n### Patches\nProblem was fixed with a shell string sanitation fix. Please upgrade to version >= 4.27.11\n\n### Workarounds\nIf you cannot upgrade, be sure to check or sanitize service parameter strings that are passed to si.inetChecksite()\n\n### References\n_Are there any links users can visit to find out more?_\n\n### For more information\nIf you have any questions or comments about this advisory:\n* Open an issue in [systeminformation](https://github.com/sebhildebrandt/systeminformation/issues/new?template=bug_report.md)", "severity": [ { "type": "CVSS_V3", "score": "CVSS:3.1/AV:N/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H" } ], "affected": [ { "package": { "ecosystem": "npm", "name": "systeminformation" }, "ranges": [ { "type": "ECOSYSTEM", "events": [ { "introduced": "0" }, { "fixed": "4.27.11" } ] } ] } ], "references": [ { "type": "WEB", "url": "https://github.com/sebhildebrandt/systeminformation/security/advisories/GHSA-94xh-2fmc-xf5j" }, { "type": "ADVISORY", "url": "https://nvd.nist.gov/vuln/detail/CVE-2020-7752" }, { "type": "WEB", "url": "https://github.com/sebhildebrandt/systeminformation/commit/931fecaec2c1a7dcc10457bb8cd552d08089da61" }, { "type": "WEB", "url": "https://github.com/sebhildebrandt/systeminformation/blob/master/lib/internet.js" }, { "type": "WEB", "url": "https://snyk.io/vuln/SNYK-JS-SYSTEMINFORMATION-1021909" }, { "type": "WEB", "url": "https://www.npmjs.com/package/systeminformation" }, { "type": "PACKAGE", "url": "https://github.com/sebhildebrandt/systeminformation" } ], "database_specific": { "cwe_ids": [ "CWE-78" ], "severity": "HIGH", "github_reviewed": true } }
1,086
642
#pragma once #include <QtCore/QUuid> #include "Action.h" namespace AxiomModel { class SetNumRangeAction : public Action { public: SetNumRangeAction(const QUuid &uuid, double beforeMin, double beforeMax, uint32_t beforeStep, double afterMin, double afterMax, uint32_t afterStep, ModelRoot *root); static std::unique_ptr<SetNumRangeAction> create(const QUuid &uuid, double beforeMin, double beforeMax, uint32_t beforeStep, double afterMin, double afterMax, uint32_t afterStep, ModelRoot *root); void forward(bool first) override; void backward() override; const QUuid &uuid() const { return _uuid; } double beforeMin() const { return _beforeMin; } double beforeMax() const { return _beforeMax; } uint32_t beforeStep() const { return _beforeStep; } double afterMin() const { return _afterMin; } double afterMax() const { return _afterMax; } uint32_t afterStep() const { return _afterStep; } private: QUuid _uuid; double _beforeMin; double _beforeMax; uint32_t _beforeStep; double _afterMin; double _afterMax; uint32_t _afterStep; }; }
602
666
<filename>wasp/src/main/java/com/orhanobut/wasp/utils/MimeTypes.java package com.orhanobut.wasp.utils; public final class MimeTypes { private MimeTypes() { // no instance } public static final String CONTENT_TYPE_FORM_URL_ENCODED = "application/x-www-form-urlencoded"; public static final String CONTENT_TYPE_MULTIPART = "multipart/form-data"; public static final String CONTENT_JSON = "application/json"; }
146
2,494
<filename>deps/mozjs/incs/mozilla/IntegerPrintfMacros.h /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* vim: set ts=8 sts=2 et sw=2 tw=80: */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ /* Implements the C99 <inttypes.h> interface, minus the SCN* format macros. */ #ifndef mozilla_IntegerPrintfMacros_h_ #define mozilla_IntegerPrintfMacros_h_ /* * MSVC++ doesn't include <inttypes.h>, even in versions shipping <stdint.h>, so * we have to reimplement it there. Note: <inttypes.h> #includes <stdint.h>. * * Note that this header DOES NOT implement <inttypes.h>'s scanf macros. MSVC's * scanf doesn't have sufficient format specifier support to implement them * (specifically, to implement scanning into an 8-bit location). * * http://stackoverflow.com/questions/3036396/scanfd-char-char-as-int-format-string * * Moreover, scanf is a footgun: if the input number exceeds the bounds of the * target type, behavior is undefined (in the compiler sense: that is, this code * could overwrite your hard drive with zeroes): * * uint8_t u; * sscanf("256", "%" SCNu8, &u); // BAD * * This header will sometimes provide SCN* macros, by dint of being implemented * using <inttypes.h>. But for these reasons, *never* use them! */ #if defined(MOZ_CUSTOM_INTTYPES_H) # include MOZ_CUSTOM_INTTYPES_H #elif defined(_MSC_VER) # include "mozilla/MSIntTypes.h" #else # include <inttypes.h> #endif /* * Fix up Android's broken [u]intptr_t inttype macros. Android's PRI*PTR * macros are defined as "ld", but sizeof(long) is 8 and sizeof(intptr_t) * is 4 on 32-bit Android. TestTypeTraits.cpp asserts that these new macro * definitions match the actual type sizes seen at compile time. */ #if defined(ANDROID) && !defined(__LP64__) # undef PRIdPTR /* intptr_t */ # define PRIdPTR "d" /* intptr_t */ # undef PRIiPTR /* intptr_t */ # define PRIiPTR "i" /* intptr_t */ # undef PRIoPTR /* uintptr_t */ # define PRIoPTR "o" /* uintptr_t */ # undef PRIuPTR /* uintptr_t */ # define PRIuPTR "u" /* uintptr_t */ # undef PRIxPTR /* uintptr_t */ # define PRIxPTR "x" /* uintptr_t */ # undef PRIXPTR /* uintptr_t */ # define PRIXPTR "X" /* uintptr_t */ #endif /** * For printing size_t. */ #define PRIuSIZE PRIuPTR #endif /* mozilla_IntegerPrintfMacros_h_ */
949
649
package net.thucydides.junit.internals; import org.junit.Test; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.is; public class WhenInvokingMethods { static class TestClass { public String foo() { return "bar"; } } static class TestClassWithIllegalAccessException { public String foo() throws Exception { throw new IllegalAccessException(); } } static class TestClassWithInvocationTargetException { public String foo() throws Exception { throw new InvocationTargetException(new Exception()); } } @Test public void should_return_the_result_of_the_invoked_method() throws NoSuchMethodException { TestClass testClass = new TestClass(); Method foo = testClass.getClass().getMethod("foo"); String result = (String) MethodInvoker.on(testClass).run(foo); assertThat(result, is("bar")); } @Test(expected = IllegalArgumentException.class) public void should_throw_an_IllegalArgumentError_if_the_method_cannot_be_invoked() throws Exception { TestClassWithInvocationTargetException testClass = new TestClassWithInvocationTargetException(); Method foo = testClass.getClass().getMethod("foo"); MethodInvoker.on(testClass).run(foo); } }
522
1,431
/* ==================================================================== Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==================================================================== */ package org.apache.poi.xwpf.usermodel; import static org.apache.poi.ooxml.POIXMLTypeLoader.DEFAULT_XML_OPTIONS; import java.io.IOException; import java.io.InputStream; import java.io.StringReader; import java.math.BigDecimal; import java.math.BigInteger; import java.math.RoundingMode; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import javax.xml.namespace.QName; import org.apache.poi.ooxml.POIXMLException; import org.apache.poi.ooxml.util.DocumentHelper; import org.apache.poi.ooxml.util.POIXMLUnits; import org.apache.poi.openxml4j.exceptions.InvalidFormatException; import org.apache.poi.util.HexDump; import org.apache.poi.util.Internal; import org.apache.poi.util.Removal; import org.apache.poi.util.Units; import org.apache.poi.wp.usermodel.CharacterRun; import org.apache.xmlbeans.SimpleValue; import org.apache.xmlbeans.XmlCursor; import org.apache.xmlbeans.XmlException; import org.apache.xmlbeans.XmlObject; import org.apache.xmlbeans.XmlString; import org.apache.xmlbeans.XmlToken; import org.apache.xmlbeans.impl.values.XmlAnyTypeImpl; import org.openxmlformats.schemas.drawingml.x2006.chart.CTChart; import org.openxmlformats.schemas.drawingml.x2006.main.CTBlip; import org.openxmlformats.schemas.drawingml.x2006.main.CTBlipFillProperties; import org.openxmlformats.schemas.drawingml.x2006.main.CTGraphicalObject; import org.openxmlformats.schemas.drawingml.x2006.main.CTGraphicalObjectData; import org.openxmlformats.schemas.drawingml.x2006.main.CTNonVisualDrawingProps; import org.openxmlformats.schemas.drawingml.x2006.main.CTNonVisualPictureProperties; import org.openxmlformats.schemas.drawingml.x2006.main.CTPoint2D; import org.openxmlformats.schemas.drawingml.x2006.main.CTPositiveSize2D; import org.openxmlformats.schemas.drawingml.x2006.main.CTPresetGeometry2D; import org.openxmlformats.schemas.drawingml.x2006.main.CTShapeProperties; import org.openxmlformats.schemas.drawingml.x2006.main.CTTransform2D; import org.openxmlformats.schemas.drawingml.x2006.main.STShapeType; import org.openxmlformats.schemas.drawingml.x2006.picture.CTPicture; import org.openxmlformats.schemas.drawingml.x2006.picture.CTPictureNonVisual; import org.openxmlformats.schemas.drawingml.x2006.wordprocessingDrawing.CTAnchor; import org.openxmlformats.schemas.drawingml.x2006.wordprocessingDrawing.CTInline; import org.openxmlformats.schemas.officeDocument.x2006.sharedTypes.STHexColorRGB; import org.openxmlformats.schemas.officeDocument.x2006.sharedTypes.STOnOff1; import org.openxmlformats.schemas.officeDocument.x2006.sharedTypes.STVerticalAlignRun; import org.openxmlformats.schemas.wordprocessingml.x2006.main.*; import org.w3c.dom.NodeList; import org.w3c.dom.Text; import org.xml.sax.InputSource; import org.xml.sax.SAXException; /** * XWPFRun object defines a region of text with a common set of properties */ public class XWPFRun implements ISDTContents, IRunElement, CharacterRun { private final CTR run; private final String pictureText; private final IRunBody parent; private final List<XWPFPicture> pictures; /** * @param r the CTR bean which holds the run attributes * @param p the parent paragraph */ public XWPFRun(CTR r, IRunBody p) { this.run = r; this.parent = p; /* * reserve already occupied drawing ids, so reserving new ids later will * not corrupt the document */ for (CTDrawing ctDrawing : r.getDrawingArray()) { for (CTAnchor anchor : ctDrawing.getAnchorArray()) { if (anchor.getDocPr() != null) { getDocument().getDrawingIdManager().reserve(anchor.getDocPr().getId()); } } for (CTInline inline : ctDrawing.getInlineArray()) { if (inline.getDocPr() != null) { getDocument().getDrawingIdManager().reserve(inline.getDocPr().getId()); } } } // Look for any text in any of our pictures or drawings StringBuilder text = new StringBuilder(); List<XmlObject> pictTextObjs = new ArrayList<>(); pictTextObjs.addAll(Arrays.asList(r.getPictArray())); pictTextObjs.addAll(Arrays.asList(r.getDrawingArray())); for (XmlObject o : pictTextObjs) { XmlObject[] ts = o.selectPath("declare namespace w='http://schemas.openxmlformats.org/wordprocessingml/2006/main' .//w:t"); for (XmlObject t : ts) { NodeList kids = t.getDomNode().getChildNodes(); for (int n = 0; n < kids.getLength(); n++) { if (kids.item(n) instanceof Text) { if (text.length() > 0) { text.append("\n"); } text.append(kids.item(n).getNodeValue()); } } } } pictureText = text.toString(); // Do we have any embedded pictures? // (They're a different CTPicture, under the drawingml namespace) pictures = new ArrayList<>(); for (XmlObject o : pictTextObjs) { for (CTPicture pict : getCTPictures(o)) { XWPFPicture picture = new XWPFPicture(pict, this); pictures.add(picture); } } } /** * @deprecated Use {@link XWPFRun#XWPFRun(CTR, IRunBody)} */ @Deprecated public XWPFRun(CTR r, XWPFParagraph p) { this(r, (IRunBody) p); } /** * Add the xml:spaces="preserve" attribute if the string has leading or trailing white spaces * * @param xs the string to check */ static void preserveSpaces(XmlString xs) { String text = xs.getStringValue(); if (text != null && text.length() >= 1 && (Character.isWhitespace(text.charAt(0)) || Character.isWhitespace(text.charAt(text.length()-1)))) { XmlCursor c = xs.newCursor(); c.toNextToken(); c.insertAttributeWithValue(new QName("http://www.w3.org/XML/1998/namespace", "space"), "preserve"); c.dispose(); } } private List<CTPicture> getCTPictures(XmlObject o) { List<CTPicture> pics = new ArrayList<>(); XmlObject[] picts = o.selectPath("declare namespace pic='" + CTPicture.type.getName().getNamespaceURI() + "' .//pic:pic"); for (XmlObject pict : picts) { if (pict instanceof XmlAnyTypeImpl) { // Pesky XmlBeans bug - see Bugzilla #49934 try { pict = CTPicture.Factory.parse(pict.toString(), DEFAULT_XML_OPTIONS); } catch (XmlException e) { throw new POIXMLException(e); } } if (pict instanceof CTPicture) { pics.add((CTPicture) pict); } } return pics; } /** * Get the currently used CTR object * * @return ctr object */ @Internal public CTR getCTR() { return run; } /** * Get the currently referenced paragraph/SDT object * * @return current parent */ public IRunBody getParent() { return parent; } /** * Get the currently referenced paragraph, or null if a SDT object * * @deprecated use {@link XWPFRun#getParent()} instead */ @Deprecated public XWPFParagraph getParagraph() { if (parent instanceof XWPFParagraph) { return (XWPFParagraph) parent; } return null; } /** * @return The {@link XWPFDocument} instance, this run belongs to, or * {@code null} if parent structure (paragraph &gt; document) is not properly set. */ public XWPFDocument getDocument() { if (parent != null) { return parent.getDocument(); } return null; } /** * For isBold, isItalic etc */ private static boolean isCTOnOff(CTOnOff onoff) { return !onoff.isSetVal() || POIXMLUnits.parseOnOff(onoff); } /** * Get the language tag associated with this run, if any. * * @return the language tag associated with this run, if any */ public String getLang() { CTRPr pr = getRunProperties(false); Object lang = (pr == null || pr.sizeOfLangArray() == 0) ? null : pr.getLangArray(0).getVal(); return (String) lang; } /** * Set the language tag associated with this run. * * @param lang the language tag associated with this run * @since 4.1.0 */ public void setLang(String lang) { CTRPr pr = getRunProperties(true); CTLanguage ctLang = pr.sizeOfLangArray() > 0 ? pr.getLangArray(0) : pr.addNewLang(); ctLang.setVal(lang); } /** * Whether the bold property shall be applied to all non-complex script * characters in the contents of this run when displayed in a document * * @return {@code true} if the bold property is applied */ @Override public boolean isBold() { CTRPr pr = getRunProperties(false); return pr != null && pr.sizeOfBArray() > 0 && isCTOnOff(pr.getBArray(0)); } /** * Whether the bold property shall be applied to all non-complex script * characters in the contents of this run when displayed in a document. * <p> * This formatting property is a toggle property, which specifies that its * behavior differs between its use within a style definition and its use as * direct formatting. When used as part of a style definition, setting this * property shall toggle the current state of that property as specified up * to this point in the hierarchy (i.e. applied to not applied, and vice * versa). Setting it to {@code false} (or an equivalent) shall * result in the current setting remaining unchanged. However, when used as * direct formatting, setting this property to true or false shall set the * absolute state of the resulting property. * </p> * <p> * If this element is not present, the default value is to leave the * formatting applied at previous level in the style hierarchy. If this * element is never applied in the style hierarchy, then bold shall not be * applied to non-complex script characters. * </p> * * @param value {@code true} if the bold property is applied to * this run */ @Override public void setBold(boolean value) { CTRPr pr = getRunProperties(true); CTOnOff bold = pr.sizeOfBArray() > 0 ? pr.getBArray(0) : pr.addNewB(); bold.setVal(value ? STOnOff1.ON : STOnOff1.OFF); } /** * Get text color. The returned value is a string in the hex form "RRGGBB". */ public String getColor() { String color = null; if (run.isSetRPr()) { CTRPr pr = getRunProperties(false); if (pr != null && pr.sizeOfColorArray() > 0) { CTColor clr = pr.getColorArray(0); color = clr.xgetVal().getStringValue(); } } return color; } /** * Set text color. * * @param rgbStr - the desired color, in the hex form "RRGGBB". */ public void setColor(String rgbStr) { CTRPr pr = getRunProperties(true); CTColor color = pr.sizeOfColorArray() > 0 ? pr.getColorArray(0) : pr.addNewColor(); color.setVal(rgbStr); } /** * Return the string content of this text run * * @return the text of this text run or {@code null} if not set */ public String getText(int pos) { return run.sizeOfTArray() == 0 ? null : run.getTArray(pos) .getStringValue(); } /** * Returns text embedded in pictures */ public String getPictureText() { return pictureText; } /** * Sets the text of this text run * * @param value the literal text which shall be displayed in the document */ public void setText(String value) { setText(value, run.sizeOfTArray()); } /** * Sets the text of this text run in the * * @param value the literal text which shall be displayed in the document * @param pos - position in the text array (NB: 0 based) */ public void setText(String value, int pos) { if (pos > run.sizeOfTArray()) { throw new ArrayIndexOutOfBoundsException("Value too large for the parameter position in XWPFRun.setText(String value,int pos)"); } CTText t = (pos < run.sizeOfTArray() && pos >= 0) ? run.getTArray(pos) : run.addNewT(); t.setStringValue(value); preserveSpaces(t); } /** * Whether the italic property should be applied to all non-complex script * characters in the contents of this run when displayed in a document. * * @return {@code true} if the italic property is applied */ @Override public boolean isItalic() { CTRPr pr = getRunProperties(false); return pr != null && pr.sizeOfIArray() > 0 && isCTOnOff(pr.getIArray(0)); } /** * Whether the bold property shall be applied to all non-complex script * characters in the contents of this run when displayed in a document * <p> * This formatting property is a toggle property, which specifies that its * behavior differs between its use within a style definition and its use as * direct formatting. When used as part of a style definition, setting this * property shall toggle the current state of that property as specified up * to this point in the hierarchy (i.e. applied to not applied, and vice * versa). Setting it to {@code false} (or an equivalent) shall * result in the current setting remaining unchanged. However, when used as * direct formatting, setting this property to true or false shall set the * absolute state of the resulting property. * <p> * If this element is not present, the default value is to leave the * formatting applied at previous level in the style hierarchy. If this * element is never applied in the style hierarchy, then bold shall not be * applied to non-complex script characters. * * @param value {@code true} if the italic property is applied to * this run */ @Override public void setItalic(boolean value) { CTRPr pr = getRunProperties(true); CTOnOff italic = pr.sizeOfIArray() > 0 ? pr.getIArray(0) : pr.addNewI(); italic.setVal(value ? STOnOff1.ON : STOnOff1.OFF); } /** * Get the underline setting for the run. * * @return the Underline pattern applied to this run * @see UnderlinePatterns */ public UnderlinePatterns getUnderline() { UnderlinePatterns value = UnderlinePatterns.NONE; CTUnderline underline = getCTUnderline(false); if (underline != null) { STUnderline.Enum baseValue = underline.getVal(); if (baseValue != null) { value = UnderlinePatterns.valueOf(baseValue.intValue()); } } return value; } /** * Specifies that the contents of this run should be displayed along with an * underline appearing directly below the character height. * <p> * If this element is not present, the default value is to leave the * formatting applied at previous level in the style hierarchy. If this * element is never applied in the style hierarchy, then an underline shall * not be applied to the contents of this run. * </p> * * @param value - * underline type * @see UnderlinePatterns */ public void setUnderline(UnderlinePatterns value) { CTUnderline underline = getCTUnderline(true); assert(underline != null); underline.setVal(STUnderline.Enum.forInt(value.getValue())); } /** * Get the CTUnderline for the run. * @param create Create a new underline if necessary * @return The underline, or null create is false and there is no underline. */ private CTUnderline getCTUnderline(boolean create) { CTRPr pr = getRunProperties(true); return pr.sizeOfUArray() > 0 ? pr.getUArray(0) : (create ? pr.addNewU() : null); } /** * Set the underline color for the run's underline, if any. * * @param color An RGB color value (e.g, "a0C6F3") or "auto". * @since 4.0.0 */ public void setUnderlineColor(String color) { CTUnderline underline = getCTUnderline(true); assert(underline != null); SimpleValue svColor; if (color.equals("auto")) { STHexColorAuto hexColor = STHexColorAuto.Factory.newInstance(); hexColor.setEnumValue(STHexColorAuto.Enum.forString(color)); svColor = (SimpleValue) hexColor; } else { STHexColorRGB rgbColor = STHexColorRGB.Factory.newInstance(); rgbColor.setStringValue(color); svColor = (SimpleValue) rgbColor; } underline.setColor(svColor); } /** * Set the underline theme color for the run's underline, if any. * * @param themeColor A theme color name (see {@link STThemeColor.Enum}). * @since 4.0.0 */ public void setUnderlineThemeColor(String themeColor) { CTUnderline underline = getCTUnderline(true); assert(underline != null); STThemeColor.Enum val = STThemeColor.Enum.forString(themeColor); if (val != null) { underline.setThemeColor(val); } } /** * Get the underline theme color for the run's underline, if any. * * @return The {@link STThemeColor.Enum}. * @since 4.0.0 */ public STThemeColor.Enum getUnderlineThemeColor() { CTUnderline underline = getCTUnderline(false); STThemeColor.Enum color = STThemeColor.NONE; if (underline != null) { color = underline.getThemeColor(); } return color; } /** * Get the underline color for the run's underline, if any. * * @return The RGB color value as as a string of hexadecimal digits (e.g., "A0B2F1") or "auto". * @since 4.0.0 */ public String getUnderlineColor() { CTUnderline underline = getCTUnderline(true); assert(underline != null); String colorName = "auto"; Object rawValue = underline.getColor(); if (rawValue != null) { if (rawValue instanceof String) { colorName = (String)rawValue; } else { byte[] rgbColor = (byte[])rawValue; colorName = HexDump.toHex(rgbColor[0]) + HexDump.toHex(rgbColor[1]) + HexDump.toHex(rgbColor[2]); } } return colorName; } /** * Specifies that the contents of this run shall be displayed with a single * horizontal line through the center of the line. * * @return {@code true} if the strike property is applied */ @Override public boolean isStrikeThrough() { CTRPr pr = getRunProperties(false); return pr != null && pr.sizeOfStrikeArray() > 0 && isCTOnOff(pr.getStrikeArray(0)); } /** * Specifies that the contents of this run shall be displayed with a single * horizontal line through the center of the line. * <p> * This formatting property is a toggle property, which specifies that its * behaviour differs between its use within a style definition and its use as * direct formatting. When used as part of a style definition, setting this * property shall toggle the current state of that property as specified up * to this point in the hierarchy (i.e. applied to not applied, and vice * versa). Setting it to false (or an equivalent) shall result in the * current setting remaining unchanged. However, when used as direct * formatting, setting this property to true or false shall set the absolute * state of the resulting property. * </p> * <p> * If this element is not present, the default value is to leave the * formatting applied at previous level in the style hierarchy. If this * element is never applied in the style hierarchy, then strikethrough shall * not be applied to the contents of this run. * </p> * * @param value {@code true} if the strike property is applied to * this run */ @Override public void setStrikeThrough(boolean value) { CTRPr pr = getRunProperties(true); CTOnOff strike = pr.sizeOfStrikeArray() > 0 ? pr.getStrikeArray(0) : pr.addNewStrike(); strike.setVal(value ? STOnOff1.ON : STOnOff1.OFF); } @Deprecated public boolean isStrike() { return isStrikeThrough(); } @Deprecated public void setStrike(boolean value) { setStrikeThrough(value); } /** * Specifies that the contents of this run shall be displayed with a double * horizontal line through the center of the line. * * @return {@code true} if the double strike property is applied */ @Override public boolean isDoubleStrikeThrough() { CTRPr pr = getRunProperties(false); return pr != null && pr.sizeOfDstrikeArray() > 0 && isCTOnOff(pr.getDstrikeArray(0)); } /** * Specifies that the contents of this run shall be displayed with a * double horizontal line through the center of the line. * * @see #setStrikeThrough(boolean) for the rules about this */ @Override public void setDoubleStrikethrough(boolean value) { CTRPr pr = getRunProperties(true); CTOnOff dstrike = pr.sizeOfDstrikeArray() > 0 ? pr.getDstrikeArray(0) : pr.addNewDstrike(); dstrike.setVal(value ? STOnOff1.ON : STOnOff1.OFF); } @Override public boolean isSmallCaps() { CTRPr pr = getRunProperties(false); return pr != null && pr.sizeOfSmallCapsArray() > 0 && isCTOnOff(pr.getSmallCapsArray(0)); } @Override public void setSmallCaps(boolean value) { CTRPr pr = getRunProperties(true); CTOnOff caps = pr.sizeOfSmallCapsArray() > 0 ? pr.getSmallCapsArray(0) : pr.addNewSmallCaps(); caps.setVal(value ? STOnOff1.ON : STOnOff1.OFF); } @Override public boolean isCapitalized() { CTRPr pr = getRunProperties(false); return pr != null && pr.sizeOfCapsArray() > 0 && isCTOnOff(pr.getCapsArray(0)); } @Override public void setCapitalized(boolean value) { CTRPr pr = getRunProperties(true); CTOnOff caps = pr.sizeOfCapsArray() > 0 ? pr.getCapsArray(0) : pr.addNewCaps(); caps.setVal(value ? STOnOff1.ON : STOnOff1.OFF); } @Override public boolean isShadowed() { CTRPr pr = getRunProperties(false); return pr != null && pr.sizeOfShadowArray() > 0 && isCTOnOff(pr.getShadowArray(0)); } @Override public void setShadow(boolean value) { CTRPr pr = getRunProperties(true); CTOnOff shadow = pr.sizeOfShadowArray() > 0 ? pr.getShadowArray(0) : pr.addNewShadow(); shadow.setVal(value ? STOnOff1.ON : STOnOff1.OFF); } @Override public boolean isImprinted() { CTRPr pr = getRunProperties(false); return pr != null && pr.sizeOfImprintArray() > 0 && isCTOnOff(pr.getImprintArray(0)); } @Override public void setImprinted(boolean value) { CTRPr pr = getRunProperties(true); CTOnOff imprinted = pr.sizeOfImprintArray() > 0 ? pr.getImprintArray(0) : pr.addNewImprint(); imprinted.setVal(value ? STOnOff1.ON : STOnOff1.OFF); } @Override public boolean isEmbossed() { CTRPr pr = getRunProperties(false); return pr != null && pr.sizeOfEmbossArray() > 0 && isCTOnOff(pr.getEmbossArray(0)); } @Override public void setEmbossed(boolean value) { CTRPr pr = getRunProperties(true); CTOnOff emboss = pr.sizeOfEmbossArray() > 0 ? pr.getEmbossArray(0) : pr.addNewEmboss(); emboss.setVal(value ? STOnOff1.ON : STOnOff1.OFF); } /** * Specifies the alignment which shall be applied to the contents of this * run in relation to the default appearance of the run's text. This allows * the text to be repositioned as subscript or superscript without altering * the font size of the run properties. * <p> * If this element is not present, the default value is to leave the * formatting applied at previous level in the style hierarchy. If this * element is never applied in the style hierarchy, then the text shall not * be subscript or superscript relative to the default baseline location for * the contents of this run. * </p> * * @param valign Type of vertical align to apply * @see VerticalAlign */ public void setSubscript(VerticalAlign valign) { CTRPr pr = getRunProperties(true); CTVerticalAlignRun ctValign = pr.sizeOfVertAlignArray() > 0 ? pr.getVertAlignArray(0) : pr.addNewVertAlign(); ctValign.setVal(STVerticalAlignRun.Enum.forInt(valign.getValue())); } @Override public int getKerning() { CTRPr pr = getRunProperties(false); if (pr == null || pr.sizeOfKernArray() == 0) { return 0; } return (int)POIXMLUnits.parseLength(pr.getKernArray(0).xgetVal()); } @Override public void setKerning(int kern) { CTRPr pr = getRunProperties(true); CTHpsMeasure kernmes = pr.sizeOfKernArray() > 0 ? pr.getKernArray(0) : pr.addNewKern(); kernmes.setVal(BigInteger.valueOf(kern)); } @Override public boolean isHighlighted() { CTRPr pr = getRunProperties(false); if (pr == null || pr.sizeOfHighlightArray() == 0) { return false; } STHighlightColor.Enum val = pr.getHighlightArray(0).getVal(); return val != null && val != STHighlightColor.NONE; } // TODO Provide a wrapper round STHighlightColor, then expose getter/setter // for the highlight colour. Ideally also then add to CharacterRun interface @Override public int getCharacterSpacing() { CTRPr pr = getRunProperties(false); if (pr == null || pr.sizeOfSpacingArray() == 0) { return 0; } return (int)Units.toDXA(POIXMLUnits.parseLength(pr.getSpacingArray(0).xgetVal())); } @Override public void setCharacterSpacing(int twips) { CTRPr pr = getRunProperties(true); CTSignedTwipsMeasure spc = pr.sizeOfSpacingArray() > 0 ? pr.getSpacingArray(0) : pr.addNewSpacing(); spc.setVal(BigInteger.valueOf(twips)); } /** * Gets the fonts which shall be used to display the text contents of * this run. Specifies a font which shall be used to format all characters * in the ASCII range (0 - 127) within the parent run * * @return a string representing the font family */ public String getFontFamily() { return getFontFamily(null); } /** * Specifies the fonts which shall be used to display the text contents of * this run. Specifies a font which shall be used to format all characters * in the ASCII range (0 - 127) within the parent run. * <p> * Also sets the other font ranges, if they haven't been set before * * @param fontFamily The font family to apply * @see FontCharRange */ public void setFontFamily(String fontFamily) { setFontFamily(fontFamily, null); } /** * Alias for {@link #getFontFamily()} */ @Override public String getFontName() { return getFontFamily(); } /** * Gets the font family for the specified font char range. * If fcr is null, the font char range "ascii" is used * * @param fcr the font char range, defaults to "ansi" * @return a string representing the font famil */ public String getFontFamily(FontCharRange fcr) { CTRPr pr = getRunProperties(false); if (pr == null || pr.sizeOfRFontsArray() == 0) { return null; } CTFonts fonts = pr.getRFontsArray(0); switch (fcr == null ? FontCharRange.ascii : fcr) { default: case ascii: return fonts.getAscii(); case cs: return fonts.getCs(); case eastAsia: return fonts.getEastAsia(); case hAnsi: return fonts.getHAnsi(); } } /** * Specifies the fonts which shall be used to display the text contents of * this run. The default handling for fcr == null is to overwrite the * ascii font char range with the given font family and also set all not * specified font ranges * * @param fontFamily The font family to apply * @param fcr FontCharRange or null for default handling */ public void setFontFamily(String fontFamily, FontCharRange fcr) { CTRPr pr = getRunProperties(true); CTFonts fonts = pr.sizeOfRFontsArray() > 0 ? pr.getRFontsArray(0) : pr.addNewRFonts(); if (fcr == null) { fonts.setAscii(fontFamily); if (!fonts.isSetHAnsi()) { fonts.setHAnsi(fontFamily); } if (!fonts.isSetCs()) { fonts.setCs(fontFamily); } if (!fonts.isSetEastAsia()) { fonts.setEastAsia(fontFamily); } } else { switch (fcr) { case ascii: fonts.setAscii(fontFamily); break; case cs: fonts.setCs(fontFamily); break; case eastAsia: fonts.setEastAsia(fontFamily); break; case hAnsi: fonts.setHAnsi(fontFamily); break; } } } /** * Specifies the font size which shall be applied to all non complex script * characters in the contents of this run when displayed. * * @return value representing the font size (non-integer size will be rounded with half rounding up, * -1 is returned if size not set) * @deprecated use {@link #getFontSizeAsDouble()} */ @Deprecated @Removal(version = "6.0.0") @Override public int getFontSize() { BigDecimal bd = getFontSizeAsBigDecimal(0); return bd == null ? -1 : bd.intValue(); } /** * Specifies the font size which shall be applied to all non complex script * characters in the contents of this run when displayed. * * @return value representing the font size (can be null if size not set) * @since POI 5.0.0 */ @Override public Double getFontSizeAsDouble() { BigDecimal bd = getFontSizeAsBigDecimal(1); return bd == null ? null : bd.doubleValue(); } private BigDecimal getFontSizeAsBigDecimal(int scale) { CTRPr pr = getRunProperties(false); return (pr != null && pr.sizeOfSzArray() > 0) ? BigDecimal.valueOf(Units.toPoints(POIXMLUnits.parseLength(pr.getSzArray(0).xgetVal()))).divide(BigDecimal.valueOf(4), scale, RoundingMode.HALF_UP) : null; } /** * Specifies the font size which shall be applied to all non complex script * characters in the contents of this run when displayed. * <p> * If this element is not present, the default value is to leave the value * applied at previous level in the style hierarchy. If this element is * never applied in the style hierarchy, then any appropriate font size may * be used for non complex script characters. * </p> * * @param size The font size as number of point measurements. * @see #setFontSize(double) */ @Override public void setFontSize(int size) { BigInteger bint = BigInteger.valueOf(size); CTRPr pr = getRunProperties(true); CTHpsMeasure ctSize = pr.sizeOfSzArray() > 0 ? pr.getSzArray(0) : pr.addNewSz(); ctSize.setVal(bint.multiply(BigInteger.valueOf(2))); } /** * Specifies the font size which shall be applied to all non complex script * characters in the contents of this run when displayed. * <p> * If this element is not present, the default value is to leave the value * applied at previous level in the style hierarchy. If this element is * never applied in the style hierarchy, then any appropriate font size may * be used for non complex script characters. * </p> * * @param size The font size as number of point measurements. * @see #setFontSize(int) * @since POI 5.0.0 */ @Override public void setFontSize(double size) { BigDecimal bd = BigDecimal.valueOf(size); CTRPr pr = getRunProperties(true); CTHpsMeasure ctSize = pr.sizeOfSzArray() > 0 ? pr.getSzArray(0) : pr.addNewSz(); ctSize.setVal(bd.multiply(BigDecimal.valueOf(2)).setScale(0, RoundingMode.HALF_UP).toBigInteger()); } /** * This element specifies the amount by which text shall be raised or * lowered for this run in relation to the default baseline of the * surrounding non-positioned text. This allows the text to be repositioned * without altering the font size of the contents. * * @return a big integer representing the amount of text shall be "moved" */ public int getTextPosition() { CTRPr pr = getRunProperties(false); return (pr != null && pr.sizeOfPositionArray() > 0) ? (int)(Units.toPoints(POIXMLUnits.parseLength(pr.getPositionArray(0).xgetVal())) / 2.) : -1; } /** * This element specifies the amount by which text shall be raised or * lowered for this run in relation to the default baseline of the * surrounding non-positioned text. This allows the text to be repositioned * without altering the font size of the contents. * <p> * If the val attribute is positive, then the parent run shall be raised * above the baseline of the surrounding text by the specified number of * half-points. If the val attribute is negative, then the parent run shall * be lowered below the baseline of the surrounding text by the specified * number of half-points. * </p> * <p> * If this element is not present, the default value is to leave the * formatting applied at previous level in the style hierarchy. If this * element is never applied in the style hierarchy, then the text shall not * be raised or lowered relative to the default baseline location for the * contents of this run. * </p> * * @param val Positive values will raise the baseline of the text, negative * values will lower it. */ public void setTextPosition(int val) { BigInteger bint = new BigInteger(Integer.toString(val)); CTRPr pr = getRunProperties(true); CTSignedHpsMeasure position = pr.sizeOfPositionArray() > 0 ? pr.getPositionArray(0) : pr.addNewPosition(); position.setVal(bint); } /** * */ public void removeBreak() { // TODO } /** * Specifies that a break shall be placed at the current location in the run * content. * A break is a special character which is used to override the * normal line breaking that would be performed based on the normal layout * of the document's contents. * * @see #addCarriageReturn() */ public void addBreak() { run.addNewBr(); } /** * Specifies that a break shall be placed at the current location in the run * content. * A break is a special character which is used to override the * normal line breaking that would be performed based on the normal layout * of the document's contents. * <p> * The behavior of this break character (the * location where text shall be restarted after this break) shall be * determined by its type values. * </p> * * @see BreakType */ public void addBreak(BreakType type) { CTBr br = run.addNewBr(); br.setType(STBrType.Enum.forInt(type.getValue())); } /** * Specifies that a break shall be placed at the current location in the run * content. A break is a special character which is used to override the * normal line breaking that would be performed based on the normal layout * of the document's contents. * <p> * The behavior of this break character (the * location where text shall be restarted after this break) shall be * determined by its type (in this case is BreakType.TEXT_WRAPPING as default) and clear attribute values. * </p> * * @see BreakClear */ public void addBreak(BreakClear clear) { CTBr br = run.addNewBr(); br.setType(STBrType.Enum.forInt(BreakType.TEXT_WRAPPING.getValue())); br.setClear(STBrClear.Enum.forInt(clear.getValue())); } /** * Specifies that a tab shall be placed at the current location in * the run content. */ public void addTab() { run.addNewTab(); } public void removeTab() { //TODO } /** * Specifies that a carriage return shall be placed at the * current location in the run content. * A carriage return is used to end the current line of text in * Wordprocess. * The behavior of a carriage return in run content shall be * identical to a break character with null type and clear attributes, which * shall end the current line and find the next available line on which to * continue. * The carriage return character forced the following text to be * restarted on the next available line in the document. */ public void addCarriageReturn() { run.addNewCr(); } public void removeCarriageReturn() { //TODO } /** * Adds a picture to the run. This method handles * attaching the picture data to the overall file. * * @param pictureData The raw picture data * @param pictureType The type of the picture, eg {@link Document#PICTURE_TYPE_JPEG} * @param width width in EMUs. To convert to / from points use {@link org.apache.poi.util.Units} * @param height height in EMUs. To convert to / from points use {@link org.apache.poi.util.Units} * @throws InvalidFormatException If the format of the picture is not known. * @throws IOException If reading the picture-data from the stream fails. * @see org.apache.poi.xwpf.usermodel.Document#PICTURE_TYPE_EMF * @see org.apache.poi.xwpf.usermodel.Document#PICTURE_TYPE_WMF * @see org.apache.poi.xwpf.usermodel.Document#PICTURE_TYPE_PICT * @see org.apache.poi.xwpf.usermodel.Document#PICTURE_TYPE_JPEG * @see org.apache.poi.xwpf.usermodel.Document#PICTURE_TYPE_PNG * @see org.apache.poi.xwpf.usermodel.Document#PICTURE_TYPE_DIB */ public XWPFPicture addPicture(InputStream pictureData, int pictureType, String filename, int width, int height) throws InvalidFormatException, IOException { String relationId; XWPFPictureData picData; // Work out what to add the picture to, then add both the // picture and the relationship for it // TODO Should we have an interface for this sort of thing? if (parent.getPart() instanceof XWPFHeaderFooter) { XWPFHeaderFooter headerFooter = (XWPFHeaderFooter) parent.getPart(); relationId = headerFooter.addPictureData(pictureData, pictureType); picData = (XWPFPictureData) headerFooter.getRelationById(relationId); } else if (parent.getPart() instanceof XWPFComments) { XWPFComments comments = (XWPFComments) parent.getPart(); relationId = comments.addPictureData(pictureData, pictureType); picData = (XWPFPictureData) comments.getRelationById(relationId); } else { @SuppressWarnings("resource") XWPFDocument doc = parent.getDocument(); relationId = doc.addPictureData(pictureData, pictureType); picData = (XWPFPictureData) doc.getRelationById(relationId); } // Create the drawing entry for it try { CTDrawing drawing = run.addNewDrawing(); CTInline inline = drawing.addNewInline(); // Do the fiddly namespace bits on the inline // (We need full control of what goes where and as what) String xml = "<a:graphic xmlns:a=\"" + CTGraphicalObject.type.getName().getNamespaceURI() + "\">" + "<a:graphicData uri=\"" + CTPicture.type.getName().getNamespaceURI() + "\">" + "<pic:pic xmlns:pic=\"" + CTPicture.type.getName().getNamespaceURI() + "\" />" + "</a:graphicData>" + "</a:graphic>"; InputSource is = new InputSource(new StringReader(xml)); org.w3c.dom.Document doc = DocumentHelper.readDocument(is); inline.set(XmlToken.Factory.parse(doc.getDocumentElement(), DEFAULT_XML_OPTIONS)); // Setup the inline inline.setDistT(0); inline.setDistR(0); inline.setDistB(0); inline.setDistL(0); CTNonVisualDrawingProps docPr = inline.addNewDocPr(); long id = getParent().getDocument().getDrawingIdManager().reserveNew(); docPr.setId(id); /* This name is not visible in Word 2010 anywhere. */ docPr.setName("Drawing " + id); docPr.setDescr(filename); CTPositiveSize2D extent = inline.addNewExtent(); extent.setCx(width); extent.setCy(height); // Grab the picture object CTGraphicalObject graphic = inline.getGraphic(); CTGraphicalObjectData graphicData = graphic.getGraphicData(); CTPicture pic = getCTPictures(graphicData).get(0); // Set it up CTPictureNonVisual nvPicPr = pic.addNewNvPicPr(); CTNonVisualDrawingProps cNvPr = nvPicPr.addNewCNvPr(); /* use "0" for the id. See ECM-576, 20.2.2.3 */ cNvPr.setId(0L); /* This name is not visible in Word 2010 anywhere */ cNvPr.setName("Picture " + id); cNvPr.setDescr(filename); CTNonVisualPictureProperties cNvPicPr = nvPicPr.addNewCNvPicPr(); cNvPicPr.addNewPicLocks().setNoChangeAspect(true); CTBlipFillProperties blipFill = pic.addNewBlipFill(); CTBlip blip = blipFill.addNewBlip(); blip.setEmbed(parent.getPart().getRelationId(picData)); blipFill.addNewStretch().addNewFillRect(); CTShapeProperties spPr = pic.addNewSpPr(); CTTransform2D xfrm = spPr.addNewXfrm(); CTPoint2D off = xfrm.addNewOff(); off.setX(0); off.setY(0); CTPositiveSize2D ext = xfrm.addNewExt(); ext.setCx(width); ext.setCy(height); CTPresetGeometry2D prstGeom = spPr.addNewPrstGeom(); prstGeom.setPrst(STShapeType.RECT); prstGeom.addNewAvLst(); // Finish up XWPFPicture xwpfPicture = new XWPFPicture(pic, this); pictures.add(xwpfPicture); return xwpfPicture; } catch (XmlException | SAXException e) { throw new IllegalStateException(e); } } /** * this method add chart template into document * * @param chartRelId relation id of chart in document relation file * @since POI 4.0.0 */ @Internal public CTInline addChart(String chartRelId) throws InvalidFormatException, IOException { try { CTInline inline = run.addNewDrawing().addNewInline(); //xml part of chart in document String xml = "<a:graphic xmlns:a=\"" + CTGraphicalObject.type.getName().getNamespaceURI() + "\">" + "<a:graphicData uri=\"" + CTChart.type.getName().getNamespaceURI() + "\">" + "<c:chart xmlns:c=\"" + CTChart.type.getName().getNamespaceURI() + "\" xmlns:r=\"http://schemas.openxmlformats.org/officeDocument/2006/relationships\" r:id=\"" + chartRelId + "\" />" + "</a:graphicData>" + "</a:graphic>"; InputSource is = new InputSource(new StringReader(xml)); org.w3c.dom.Document doc = DocumentHelper.readDocument(is); inline.set(XmlToken.Factory.parse(doc.getDocumentElement(), DEFAULT_XML_OPTIONS)); // Setup the inline with 0 margin inline.setDistT(0); inline.setDistR(0); inline.setDistB(0); inline.setDistL(0); CTNonVisualDrawingProps docPr = inline.addNewDocPr(); long id = getParent().getDocument().getDrawingIdManager().reserveNew(); docPr.setId(id); //This name is not visible in Word anywhere. docPr.setName("chart " + id); return inline; } catch (XmlException | SAXException e) { throw new IllegalStateException(e); } } /** * Returns the embedded pictures of the run. These * are pictures which reference an external, * embedded picture image such as a .png or .jpg */ public List<XWPFPicture> getEmbeddedPictures() { return pictures; } /** * Set the style ID for the run. * * @param styleId ID (not name) of the style to set for the run, e.g. "BoldItalic" (not "Bold Italic"). * @since POI 4.1.1 */ public void setStyle(String styleId) { CTRPr pr = getCTR().getRPr(); if (null == pr) { pr = getCTR().addNewRPr(); } CTString style = pr.sizeOfRStyleArray() > 0 ? pr.getRStyleArray(0) : pr.addNewRStyle(); style.setVal(styleId); } /** * Return this run's style ID. If this run has no style (no run properties or properties without a style), * an empty string is returned. * * @since 4.1.1 */ public String getStyle() { CTRPr pr = getCTR().getRPr(); if (pr == null || pr.sizeOfRStyleArray() <= 0) { return ""; } CTString style = pr.getRStyleArray(0); return null == style ? "" : style.getVal(); } /** * Returns the string version of the text and the phonetic string */ @Override public String toString() { String phonetic = getPhonetic(); if (phonetic.length() > 0) { return text() + " (" + phonetic + ")"; } else { return text(); } } /** * Returns the string version of the text, with tabs and * carriage returns in place of their xml equivalents. */ @Override public String text() { StringBuilder text = new StringBuilder(64); // Grab the text and tabs of the text run // Do so in a way that preserves the ordering XmlCursor c = run.newCursor(); c.selectPath("./*"); while (c.toNextSelection()) { XmlObject o = c.getObject(); if (o instanceof CTRuby) { handleRuby(o, text, false); continue; } _getText(o, text); } c.dispose(); return text.toString(); } /** * @return the phonetic (ruby) string associated with this run or an empty String if none exists */ public String getPhonetic() { StringBuilder text = new StringBuilder(64); // Grab the text and tabs of the text run // Do so in a way that preserves the ordering XmlCursor c = run.newCursor(); c.selectPath("./*"); while (c.toNextSelection()) { XmlObject o = c.getObject(); if (o instanceof CTRuby) { handleRuby(o, text, true); } } // Any picture text? if (pictureText != null && pictureText.length() > 0) { text.append("\n").append(pictureText).append("\n"); } c.dispose(); return text.toString(); } /** * @param rubyObj rubyobject * @param text buffer to which to append the content * @param extractPhonetic extract the phonetic (rt) component or the base component */ private void handleRuby(XmlObject rubyObj, StringBuilder text, boolean extractPhonetic) { XmlCursor c = rubyObj.newCursor(); //according to the spec, a ruby object //has the phonetic (rt) first, then the actual text (base) //second. c.selectPath(".//*"); boolean inRT = false; boolean inBase = false; while (c.toNextSelection()) { XmlObject o = c.getObject(); if (o instanceof CTRubyContent) { String tagName = o.getDomNode().getNodeName(); if ("w:rt".equals(tagName)) { inRT = true; } else if ("w:rubyBase".equals(tagName)) { inRT = false; inBase = true; } } else { if (extractPhonetic && inRT) { _getText(o, text); } else if (!extractPhonetic && inBase) { _getText(o, text); } } } c.dispose(); } private void _getText(XmlObject o, StringBuilder text) { if (o instanceof CTText) { String tagName = o.getDomNode().getNodeName(); // Field Codes (w:instrText, defined in spec sec. 17.16.23) // come up as instances of CTText, but we don't want them // in the normal text output if (!"w:instrText".equals(tagName)) { text.append(((CTText) o).getStringValue()); } } // Complex type evaluation (currently only for extraction of check boxes) if (o instanceof CTFldChar) { CTFldChar ctfldChar = ((CTFldChar) o); if (ctfldChar.getFldCharType() == STFldCharType.BEGIN) { if (ctfldChar.getFfData() != null) { for (CTFFCheckBox checkBox : ctfldChar.getFfData().getCheckBoxList()) { text.append((checkBox.getDefault() != null && POIXMLUnits.parseOnOff(checkBox.getDefault().xgetVal())) ? "|X|" : "|_|"); } } } } if (o instanceof CTPTab) { text.append('\t'); } if (o instanceof CTBr) { text.append('\n'); } if (o instanceof CTEmpty) { // Some inline text elements get returned not as // themselves, but as CTEmpty, owing to some odd // definitions around line 5642 of the XSDs // This bit works around it, and replicates the above // rules for that case String tagName = o.getDomNode().getNodeName(); if ("w:tab".equals(tagName) || "tab".equals(tagName)) { text.append('\t'); } if ("w:br".equals(tagName) || "br".equals(tagName)) { text.append('\n'); } if ("w:cr".equals(tagName) || "cr".equals(tagName)) { text.append('\n'); } } if (o instanceof CTFtnEdnRef) { CTFtnEdnRef ftn = (CTFtnEdnRef) o; String footnoteRef = ftn.getDomNode().getLocalName().equals("footnoteReference") ? "[footnoteRef:" + ftn.getId().intValue() + "]" : "[endnoteRef:" + ftn.getId().intValue() + "]"; text.append(footnoteRef); } } /** * @see <a href="http://msdn.microsoft.com/en-us/library/ff533743(v=office.12).aspx">[MS-OI29500] Run Fonts</a> */ public enum FontCharRange { ascii /* char 0-127 */, cs /* complex symbol */, eastAsia /* east asia */, hAnsi /* high ansi */ } /** * Set the text expand/collapse scale value. * * @param percentage The percentage to expand or compress the text * @since 4.0.0 */ public void setTextScale(int percentage) { CTRPr pr = getRunProperties(true); CTTextScale scale = pr.sizeOfWArray() > 0 ? pr.getWArray(0) : pr.addNewW(); scale.setVal(percentage); } /** * Gets the current text scale value. * * @return Value is an integer percentage * @since 4.0.0 */ public int getTextScale() { CTRPr pr = getRunProperties(false); if (pr == null || pr.sizeOfWArray() == 0) { return 100; } int value = POIXMLUnits.parsePercent(pr.getWArray(0).xgetVal()); // 100% scaling, that is, no change. See 17.3.2.43 w (Expanded/Compressed Text) return value == 0 ? 100 : value / 1000; } /** * Set the highlight color for the run. Silently does nothing of colorName is not a recognized value. * * @param colorName The name of the color as defined in the ST_HighlightColor simple type ({@link STHighlightColor}) * @since 4.0.0 */ public void setTextHighlightColor(String colorName) { CTRPr pr = getRunProperties(true); CTHighlight highlight = pr.sizeOfHighlightArray() > 0 ? pr.getHighlightArray(0) : pr.addNewHighlight(); STHighlightColor color = highlight.xgetVal(); if (color == null) { color = STHighlightColor.Factory.newInstance(); } STHighlightColor.Enum val = STHighlightColor.Enum.forString(colorName); if (val != null) { color.setStringValue(val.toString()); highlight.xsetVal(color); } } /** * Gets the highlight color for the run * * @return {@link STHighlightColor} for the run. * @since 4.0.0 */ public STHighlightColor.Enum getTextHightlightColor() { CTRPr pr = getRunProperties(true); CTHighlight highlight = pr.sizeOfHighlightArray() > 0 ? pr.getHighlightArray(0) : pr.addNewHighlight(); STHighlightColor color = highlight.xgetVal(); if (color == null) { color = STHighlightColor.Factory.newInstance(); color.setEnumValue(STHighlightColor.NONE); } return (STHighlightColor.Enum)(color.getEnumValue()); } /** * Get the vanish (hidden text) value * * @return True if the run is hidden text. * @since 4.0.0 */ public boolean isVanish() { CTRPr pr = getRunProperties(true); return pr != null && pr.sizeOfVanishArray() > 0 && isCTOnOff(pr.getVanishArray(0)); } /** * The vanish (hidden text) property for the run. * * @param value Set to true to make the run hidden text. * @since 4.0.0 */ public void setVanish(boolean value) { CTRPr pr = getRunProperties(true); CTOnOff vanish = pr.sizeOfVanishArray() > 0 ? pr.getVanishArray(0) : pr.addNewVanish(); vanish.setVal(value ? STOnOff1.ON : STOnOff1.OFF); } /** * Get the vertical alignment value * * @return {@link STVerticalAlignRun.Enum} value (see 172.16.58.3 ST_VerticalAlignRun (Vertical Positioning Location)) * @since 4.0.0 */ public STVerticalAlignRun.Enum getVerticalAlignment() { CTRPr pr = getRunProperties(true); CTVerticalAlignRun vertAlign = pr.sizeOfVertAlignArray() > 0 ? pr.getVertAlignArray(0) : pr.addNewVertAlign(); STVerticalAlignRun.Enum val = vertAlign.getVal(); if (val == null) { val = STVerticalAlignRun.BASELINE; } return val; } /** * Set the vertical alignment of the run. * * @param verticalAlignment Vertical alignment value, one of "baseline", "superscript", or "subscript". * @since 4.0.0 */ public void setVerticalAlignment(String verticalAlignment) { CTRPr pr = getRunProperties(true); CTVerticalAlignRun vertAlign = pr.sizeOfVertAlignArray() > 0 ? pr.getVertAlignArray(0) : pr.addNewVertAlign(); STVerticalAlignRun align = vertAlign.xgetVal(); if (align == null) { align = STVerticalAlignRun.Factory.newInstance(); } STVerticalAlignRun.Enum val = STVerticalAlignRun.Enum.forString(verticalAlignment); if (val != null) { align.setStringValue(val.toString()); vertAlign.xsetVal(align); } } /** * Get the emphasis mark value for the run. * * @return {@link STEm.Enum} emphasis mark type enumeration. See 17.18.24 ST_Em (Emphasis Mark Type). * @since 4.0.0 */ public STEm.Enum getEmphasisMark() { CTRPr pr = getRunProperties(true); CTEm emphasis = pr.sizeOfEmArray() > 0 ? pr.getEmArray(0) : pr.addNewEm(); STEm.Enum val = emphasis.getVal(); if (val == null) { val = STEm.NONE; } return val; } /** * Set the emphasis mark for the run. The emphasis mark goes above or below the run * text. * * @param markType Emphasis mark type name, e.g., "dot" or "none". See 17.18.24 ST_Em (Emphasis Mark Type) * @since 4.0.0 */ public void setEmphasisMark(String markType) { CTRPr pr = getRunProperties(true); CTEm emphasisMark = pr.sizeOfEmArray() > 0 ? pr.getEmArray(0) : pr.addNewEm(); STEm mark = emphasisMark.xgetVal(); if (mark == null) { mark = STEm.Factory.newInstance(); } STEm.Enum val = STEm.Enum.forString(markType); if (val != null) { mark.setStringValue(val.toString()); emphasisMark.xsetVal(mark); } } /** * Get the run properties for the run. * * @param create If true, create the properties, if false, do not. * @return The run properties or null if there are no properties and create is false. */ protected CTRPr getRunProperties(boolean create) { CTRPr pr = run.isSetRPr() ? run.getRPr() : null; if (create && pr == null) { pr = run.addNewRPr(); } return pr; } }
25,012
2,151
<reponame>fugu-helper/android_external_swiftshader<filename>third_party/LLVM/include/llvm/Analysis/DOTGraphTraitsPass.h<gh_stars>1000+ //===-- DOTGraphTraitsPass.h - Print/View dotty graphs-----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Templates to create dotty viewer and printer passes for GraphTraits graphs. // //===----------------------------------------------------------------------===// #ifndef LLVM_ANALYSIS_DOT_GRAPHTRAITS_PASS_H #define LLVM_ANALYSIS_DOT_GRAPHTRAITS_PASS_H #include "llvm/Pass.h" #include "llvm/Analysis/CFGPrinter.h" namespace llvm { template <class Analysis, bool Simple> struct DOTGraphTraitsViewer : public FunctionPass { std::string Name; DOTGraphTraitsViewer(std::string GraphName, char &ID) : FunctionPass(ID) { Name = GraphName; } virtual bool runOnFunction(Function &F) { Analysis *Graph; std::string Title, GraphName; Graph = &getAnalysis<Analysis>(); GraphName = DOTGraphTraits<Analysis*>::getGraphName(Graph); Title = GraphName + " for '" + F.getNameStr() + "' function"; ViewGraph(Graph, Name, Simple, Title); return false; } virtual void getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesAll(); AU.addRequired<Analysis>(); } }; template <class Analysis, bool Simple> struct DOTGraphTraitsPrinter : public FunctionPass { std::string Name; DOTGraphTraitsPrinter(std::string GraphName, char &ID) : FunctionPass(ID) { Name = GraphName; } virtual bool runOnFunction(Function &F) { Analysis *Graph; std::string Filename = Name + "." + F.getNameStr() + ".dot"; errs() << "Writing '" << Filename << "'..."; std::string ErrorInfo; raw_fd_ostream File(Filename.c_str(), ErrorInfo); Graph = &getAnalysis<Analysis>(); std::string Title, GraphName; GraphName = DOTGraphTraits<Analysis*>::getGraphName(Graph); Title = GraphName + " for '" + F.getNameStr() + "' function"; if (ErrorInfo.empty()) WriteGraph(File, Graph, Simple, Title); else errs() << " error opening file for writing!"; errs() << "\n"; return false; } virtual void getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesAll(); AU.addRequired<Analysis>(); } }; } #endif
847
460
<filename>trunk/win/Source/BT_FacebookWizard.h<gh_stars>100-1000 // Copyright 2012 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once class SpinnerWidget; class JavaScriptAPI; class FacebookWizard : QDialog { Q_OBJECT QWebView * _webView; QString _loginUrl; QString _successUrl; QString _failureUrl; QString _result; SpinnerWidget * _spinner; JavaScriptAPI * _jsApi; private: QUrl getDisplayUrl(); public: FacebookWizard(const QString& loginUrl, const QString& successUrl, const QString& failureUrl, JavaScriptAPI * jsApiRef); // returns empty string if fails QString loginExec(); public slots: void urlChanged(const QUrl& url); void loadFinished(bool); void loadStarted(); void reloadLoginPage(); void javaScriptWindowObjectCleared(); void closeWindow(); };
453
1,442
<filename>apps/shared/toolbox_helpers.cpp #include "toolbox_helpers.h" #include <apps/i18n.h> #include <ion/unicode/utf8_decoder.h> #include <string.h> #include <assert.h> namespace Shared { namespace ToolboxHelpers { int CursorIndexInCommandText(const char * text) { UTF8Decoder decoder(text); size_t index = 0; const char * currentPointer = text; CodePoint codePoint = decoder.nextCodePoint(); const char * nextPointer = decoder.stringPosition(); while (codePoint != UCodePointNull) { if (codePoint == '(' || codePoint == '\'') { return index + 1; } if (codePoint == '[') { return index; } index+= nextPointer - currentPointer; currentPointer = nextPointer; codePoint = decoder.nextCodePoint(); nextPointer = decoder.stringPosition(); } return index; } void TextToInsertForCommandMessage(I18n::Message message, char * buffer, int bufferSize, bool replaceArgsWithEmptyChar) { TextToInsertForCommandText(I18n::translate(message), -1, buffer, bufferSize, replaceArgsWithEmptyChar); } void TextToInsertForCommandText(const char * command, int commandLength, char * buffer, int bufferSize, bool replaceArgsWithEmptyChar) { int index = 0; int numberOfOpenParentheses = 0; int numberOfOpenBrackets = 0; bool insideQuote = false; bool argumentAlreadyReplaced = false; UTF8Decoder decoder(command); CodePoint codePoint = decoder.nextCodePoint(); while (codePoint != UCodePointNull && index < bufferSize - 1 && (commandLength < 0 || (decoder.stringPosition() - command <= commandLength))) { if (codePoint == ')') { numberOfOpenParentheses--; } else if (codePoint == ']') { numberOfOpenBrackets--; } if ((!insideQuote || codePoint == '\'') && ((numberOfOpenParentheses == 0 && numberOfOpenBrackets == 0) || codePoint == ',' || (numberOfOpenBrackets > 0 && (codePoint == ',' || codePoint == '[' || codePoint == ']')))) { assert(index < bufferSize); if (argumentAlreadyReplaced) { argumentAlreadyReplaced = false; } index += UTF8Decoder::CodePointToChars(codePoint, buffer + index, bufferSize - index - 1); } else { if (replaceArgsWithEmptyChar && !argumentAlreadyReplaced) { assert(index < bufferSize); index += UTF8Decoder::CodePointToChars(UCodePointEmpty, buffer + index, bufferSize - index - 1); argumentAlreadyReplaced = true; } } if (codePoint == '(') { numberOfOpenParentheses++; } else if (codePoint == '[') { numberOfOpenBrackets++; } else if (codePoint == '\'') { insideQuote = !insideQuote; } codePoint = decoder.nextCodePoint(); } assert(index < bufferSize); buffer[index] = 0; } } }
1,033
4,756
<reponame>wynr/libphonenumber // Copyright (C) 2012 The Libphonenumber Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: <NAME> #include "cpp-build/generate_geocoding_data.h" int main(int argc, const char* argv[]) { return i18n::phonenumbers::Main(argc, argv); }
231
319
<filename>sample/src/main/java/eu/f3rog/blade/sample/mvp/ui/activity/MvpMainActivity.java<gh_stars>100-1000 package eu.f3rog.blade.sample.mvp.ui.activity; import android.os.Bundle; import androidx.appcompat.app.AppCompatActivity; import blade.Blade; import blade.I; import butterknife.ButterKnife; import butterknife.OnClick; import eu.f3rog.blade.sample.R; @Blade public final class MvpMainActivity extends AppCompatActivity { @OnClick(R.id.btn_1) void showExample1() { I.startActorsActivity(this, ActorsActivity.DetailType.DIALOG_FRAG); } @OnClick(R.id.btn_2) void showExample2() { I.startActorsActivity(this, ActorsActivity.DetailType.ACTIVITY_WITH_FRAG); } @OnClick(R.id.btn_3) void showExample3() { I.startActorsActivity(this, ActorsActivity.DetailType.ACTIVITY_WITH_VIEW); } @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.mvp_activity_main); ButterKnife.bind(this); } }
439
1,205
<filename>app/src/main/java/io/github/marktony/espresso/data/source/local/PackagesLocalDataSource.java /* * Copyright(c) 2017 lizhaotailang * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.github.marktony.espresso.data.source.local; import android.support.annotation.NonNull; import android.support.annotation.Nullable; import java.util.List; import io.github.marktony.espresso.data.Package; import io.github.marktony.espresso.data.source.PackagesDataSource; import io.github.marktony.espresso.realm.RealmHelper; import io.reactivex.Observable; import io.realm.Case; import io.realm.Realm; import io.realm.RealmResults; import io.realm.Sort; /** * Created by lizhaotailang on 2017/2/25. * Concrete implementation of a data source as a db. */ public class PackagesLocalDataSource implements PackagesDataSource { @Nullable private static PackagesLocalDataSource INSTANCE; // Prevent direct instantiation private PackagesLocalDataSource() { } // Access this instance for other classes. public static PackagesLocalDataSource getInstance() { if (INSTANCE == null) { INSTANCE = new PackagesLocalDataSource(); } return INSTANCE; } // Destroy the instance. public static void destroyInstance() { INSTANCE = null; } /** * Get the packages in database and sort them in timestamp descending. * @return The observable packages from database. */ @Override public Observable<List<Package>> getPackages() { Realm rlm = RealmHelper.newRealmInstance(); return Observable.just(rlm.copyFromRealm(rlm.where(Package.class) .findAllSorted("timestamp", Sort.DESCENDING))); } /** * Get a package in database of specific number. * @param packNumber The primary key * or in another words, the package id. * See {@link Package#number} * @return The observable package from database. */ @Override public Observable<Package> getPackage(@NonNull String packNumber) { Realm rlm = RealmHelper.newRealmInstance(); return Observable.just(rlm.copyFromRealm(rlm.where(Package.class) .equalTo("number", packNumber) .findFirst())); } /** * Save a package to database. * @param pack The package to save. See {@link Package} */ @Override public void savePackage(@NonNull Package pack) { Realm rlm = RealmHelper.newRealmInstance(); // DO NOT forget begin and commit the transaction. rlm.beginTransaction(); rlm.copyToRealmOrUpdate(pack); rlm.commitTransaction(); rlm.close(); } /** * Delete a package with specific id from database. * @param packageId The primary key of a package * or in another words, the package id. * See {@link Package#number} */ @Override public void deletePackage(@NonNull String packageId) { Realm rlm = RealmHelper.newRealmInstance(); Package p = rlm.where(Package.class) .equalTo("number", packageId) .findFirst(); if (p != null) { rlm.beginTransaction(); p.deleteFromRealm(); rlm.commitTransaction(); } rlm.close(); } @Override public Observable<List<Package>> refreshPackages() { // Not required because the {@link PackagesRepository} handles the logic // of refreshing the packages from all available data source return null; } @Override public Observable<Package> refreshPackage(@NonNull String packageId) { // Not required because the {@link PackagesRepository} handles the logic // of refreshing the packages from all available data source return null; } /** * Set all the packages which are the unread new read. */ @Override public void setAllPackagesRead() { Realm rlm = RealmHelper.newRealmInstance(); List<Package> results = rlm.copyFromRealm(rlm.where(Package.class).findAll()); for (Package p : results) { p.setReadable(false); p.setPushable(false); rlm.beginTransaction(); rlm.copyToRealmOrUpdate(p); rlm.commitTransaction(); } rlm.close(); } /** * Set a package of specific number read or unread new. * @param packageId The primary key or the package id. * See {@link Package#number} * @param readable Read or unread new. * See {@link Package#readable} */ @Override public void setPackageReadable(@NonNull String packageId, boolean readable) { Realm rlm = RealmHelper.newRealmInstance(); Package p = rlm.copyFromRealm(rlm.where(Package.class) .equalTo("number", packageId) .findFirst()); if (p != null) { rlm.beginTransaction(); p.setReadable(readable); // When a package is not readable, it is not pushable. p.setPushable(readable); rlm.copyToRealmOrUpdate(p); rlm.commitTransaction(); rlm.close(); } } /** * Query the existence of a specific number. * @param packageId the package number to query. * See {@link Package#number} * @return whether the number is in the database. */ @Override public boolean isPackageExist(@NonNull String packageId) { Realm rlm = RealmHelper.newRealmInstance(); RealmResults<Package> results = rlm.where(Package.class) .equalTo("number", packageId) .findAll(); return (results != null) && (!results.isEmpty()); } @Override public void updatePackageName(@NonNull String packageId, @NonNull String name) { Realm rlm = RealmHelper.newRealmInstance(); Package p = rlm.where(Package.class) .equalTo("number", packageId) .findFirst(); if (p != null) { rlm.beginTransaction(); p.setName(name); rlm.copyToRealmOrUpdate(p); rlm.commitTransaction(); } rlm.close(); } @Override public Observable<List<Package>> searchPackages(@NonNull String keyWords) { Realm rlm = RealmHelper.newRealmInstance(); return Observable.fromIterable(rlm.copyFromRealm( rlm.where(Package.class) .like("name", "*" + keyWords + "*", Case.INSENSITIVE) .or() .like("companyChineseName", "*" + keyWords + "*", Case.INSENSITIVE) .or() .like("company", "*" + keyWords + "*", Case.INSENSITIVE) .or() .like("number", "*" + keyWords + "*", Case.INSENSITIVE) .findAll())) .toList() .toObservable(); } }
3,238
2,542
// ------------------------------------------------------------ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License (MIT). See License.txt in the repo root for license information. // ------------------------------------------------------------ #include "stdafx.h" using namespace std; using namespace Common; using namespace TXRStatefulServiceBase; using namespace TpccService; using namespace TxnReplicator; using namespace Data::Utilities; static const ULONG DISTRICT_KEY_SL_TAG = 'dksl'; DistrictKeySerializer::DistrictKeySerializer() { } DistrictKeySerializer::~DistrictKeySerializer() { } NTSTATUS DistrictKeySerializer::Create( __in KAllocator& allocator, __out SPtr& result) { NTSTATUS status; SPtr output = _new(DISTRICT_KEY_SL_TAG, allocator) DistrictKeySerializer(); if (output == nullptr) { return STATUS_INSUFFICIENT_RESOURCES; } status = output->Status(); if (!NT_SUCCESS(status)) { return status; } result = Ktl::Move(output); return STATUS_SUCCESS; } void DistrictKeySerializer::Write( __in DistrictKey::SPtr districtKey, __in BinaryWriter& binaryWriter) { binaryWriter.Write(districtKey->Warehouse); binaryWriter.Write(districtKey->District); } DistrictKey::SPtr DistrictKeySerializer::Read( __in BinaryReader& binaryReader) { DistrictKey::SPtr districtKey = nullptr; NTSTATUS status = DistrictKey::Create(this->GetThisAllocator(), districtKey); KInvariant(NT_SUCCESS(status)); binaryReader.Read(districtKey->Warehouse); binaryReader.Read(districtKey->District); return districtKey; }
547
5,169
{ "name": "VCXWebRTC", "version": "0.9", "summary": "It is a GoogleWebRTC framework with version:1.0.136171", "homepage": "https://github.com/enablex/VCXWebRTC", "authors": { "enablex": "<EMAIL>" }, "license": { "type": "Apache-2.0", "file": "LICENSE" }, "platforms": { "ios": "9.0" }, "source": { "http": "https://github.com/enablex/VCXWebRTC/archive/0.9.tar.gz" }, "ios": { "vendored_frameworks": "WebRTC.framework" } }
221
22,426
<filename>algorithms/strings/min_distance.py """ Given two words word1 and word2, find the minimum number of steps required to make word1 and word2 the same, where in each step you can delete one character in either string. For example: Input: "sea", "eat" Output: 2 Explanation: You need one step to make "sea" to "ea" and another step to make "eat" to "ea". Reference: https://leetcode.com/problems/delete-operation-for-two-strings/description/ """ def min_distance(word1, word2): return len(word1) + len(word2) - 2 * lcs(word1, word2, len(word1), len(word2)) def lcs(s1, s2, i, j): """ The length of longest common subsequence among the two given strings s1 and s2 """ if i == 0 or j == 0: return 0 elif s1[i - 1] == s2[j - 1]: return 1 + lcs(s1, s2, i - 1, j - 1) else: return max(lcs(s1, s2, i - 1, j), lcs(s1, s2, i, j - 1)) # TODO: Using dynamic programming
362
18,012
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dubbo.qos.server.handler; import io.netty.buffer.ByteBuf; import io.netty.channel.Channel; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelFutureListener; import io.netty.channel.ChannelHandlerContext; import org.junit.jupiter.api.Test; import org.mockito.ArgumentCaptor; import java.net.InetAddress; import java.net.InetSocketAddress; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.containsString; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class LocalHostPermitHandlerTest { @Test public void testHandlerAdded() throws Exception { ChannelHandlerContext context = mock(ChannelHandlerContext.class); Channel channel = mock(Channel.class); when(context.channel()).thenReturn(channel); InetAddress addr = mock(InetAddress.class); when(addr.isLoopbackAddress()).thenReturn(false); InetSocketAddress address = new InetSocketAddress(addr, 12345); when(channel.remoteAddress()).thenReturn(address); ChannelFuture future = mock(ChannelFuture.class); when(context.writeAndFlush(any(ByteBuf.class))).thenReturn(future); LocalHostPermitHandler handler = new LocalHostPermitHandler(false); handler.handlerAdded(context); ArgumentCaptor<ByteBuf> captor = ArgumentCaptor.forClass(ByteBuf.class); verify(context).writeAndFlush(captor.capture()); assertThat(new String(captor.getValue().array()), containsString("Foreign Ip Not Permitted")); verify(future).addListener(ChannelFutureListener.CLOSE); } }
802
923
package core.controller; import java.awt.Color; import java.awt.Point; import core.controller.internals.AbstractMouseCoreImplementation; import utilities.Function; public class MouseCore extends AbstractMouseCoreImplementation { private AbstractMouseCoreImplementation m; protected MouseCore(AbstractMouseCoreImplementation m) { this.m = m; } @Override public Point getPosition() { return m.getPosition(); } @Override public Color getColor(int x, int y) { return m.getColor(x, y); } @Override public final Color getColor() { return m.getColor(); } @Override public void hold(int mask, int duration) throws InterruptedException { m.hold(mask, duration); } @Override public void hold(int mask, int x, int y, int duration) throws InterruptedException { m.hold(mask, x, y, duration); } @Override public void press(int mask) { m.press(mask); } @Override public void release(int mask) { m.release(mask); } @Override public void move(int newX, int newY) { m.move(newX, newY); } @Override public void drag(int sourceX, int sourceY, int destX, int destY) { m.drag(sourceX, sourceY, destX, destY); } @Override public void moveBy(int amountX, int amountY) { m.moveBy(amountX, amountY); } @Override public void dragBy(int amountX, int amountY) { m.dragBy(amountX, amountY); } /** * Not fully supported. Use at own risk * Move mouse in a grid (defined by topLeft, bottomRight, number of column and number of row) * and perform an action at each point on the grid * @param topLeft topLeft coordinate of grid * @param bottomRight bottomRight coordinate of grid * @param col number of column of grid * @param row number of row of grid * @param action action to perform. */ public void moveArea(Point topLeft, Point bottomRight, int col, int row, Function<Point, Void> action) { if (col < 1 || row < 1) { return; } Point current = new Point(topLeft.x, topLeft.y); int xIncrement = (bottomRight.x - topLeft.x) / col; int yIncrement = (bottomRight.y - topLeft.y) / row; for (int i = 0; i < row; i++) { for (int j = 0; j < col; j++) { move(current); action.apply(current); current.x += xIncrement; if (current.x > bottomRight.x) { current.x = topLeft.x; current.y += yIncrement; } } } } }
833
435
{ "copyright_text": null, "description": "Reproducibility and collaboration are difficult aspects of any business-based analytics, speaking from personal experience, where a project is likely shared between a technical analyst and a business analyst. This talk aims to show examples of how this can be improved through aspects of the Python / R toolset.", "duration": 1281, "language": "eng", "recorded": "2019-09-16", "related_urls": [ { "label": "Conference schedule", "url": "http://2019.pyconuk.org/schedule/" } ], "speakers": [ "<NAME>" ], "tags": [], "thumbnail_url": "https://i.ytimg.com/vi/EnClLB2jzeA/maxresdefault.jpg", "title": "Battles with reproducibility and collaboration in large organisations", "videos": [ { "type": "youtube", "url": "https://www.youtube.com/watch?v=EnClLB2jzeA" } ] }
303
1,056
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.netbeans.modules.maven.hints.pom; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.prefs.Preferences; import javax.swing.JComponent; import javax.swing.text.Document; import org.netbeans.api.project.Project; import org.netbeans.editor.Utilities; import org.netbeans.modules.editor.NbEditorUtilities; import org.netbeans.modules.maven.api.NbMavenProject; import org.netbeans.modules.maven.hints.pom.spi.Configuration; import org.netbeans.modules.maven.hints.pom.spi.POMErrorFixProvider; import org.netbeans.modules.maven.model.pom.Dependency; import org.netbeans.modules.maven.model.pom.POMModel; import org.netbeans.modules.maven.model.pom.Profile; import org.netbeans.modules.xml.xam.Model; import org.netbeans.spi.editor.hints.ChangeInfo; import org.netbeans.spi.editor.hints.ErrorDescription; import org.netbeans.spi.editor.hints.ErrorDescriptionFactory; import org.netbeans.spi.editor.hints.Fix; import org.openide.text.Line; import org.openide.util.NbBundle; /** * * @author mkleint */ public class OverrideDependencyManagementError implements POMErrorFixProvider { private final Configuration configuration; public OverrideDependencyManagementError() { configuration = new Configuration("OverrideDependencyManagementError", //NOI18N NbBundle.getMessage(OverrideDependencyManagementError.class, "TIT_OverrideDependencyManagementError"), NbBundle.getMessage(OverrideDependencyManagementError.class, "DESC_OverrideDependencyManagementError"), true, Configuration.HintSeverity.WARNING); } @Override public List<ErrorDescription> getErrorsForDocument(POMModel model, Project prj) { assert model != null; List<ErrorDescription> toRet = new ArrayList<ErrorDescription>(); if (prj == null) { return toRet; } Map<String, String> managed = collectManaged(prj); if (managed.isEmpty()) { return toRet; } checkDependencyList(model.getProject().getDependencies(), model, toRet, managed); List<Profile> profiles = model.getProject().getProfiles(); if (profiles != null) { for (Profile prof : profiles) { checkDependencyList(prof.getDependencies(), model, toRet, managed); } } return toRet; } private void checkDependencyList(List<Dependency> deps, final POMModel model, List<ErrorDescription> toRet, Map<String, String> managed) { if (deps != null) { for (final Dependency dep : deps) { String ver = dep.getVersion(); if (ver != null) { String art = dep.getArtifactId(); String gr = dep.getGroupId(); String key = gr + ":" + art; //NOI18N if (managed.keySet().contains(key)) { final String managedver = managed.get(key); Document doc = model.getBaseDocument(); final Line[] line = new Line[1]; doc.render(new Runnable() { @Override public void run() { int position = dep.findChildElementPosition(model.getPOMQNames().VERSION.getQName()); line[0] = NbEditorUtilities.getLine(model.getBaseDocument(), position, false); } }); toRet.add(ErrorDescriptionFactory.createErrorDescription( configuration.getSeverity(configuration.getPreferences()).toEditorSeverity(), NbBundle.getMessage(OverrideDependencyManagementError.class, "TXT_OverrideDependencyManagementError", managedver), Collections.<Fix>singletonList(new OverrideFix(dep)), doc, line[0].getLineNumber() + 1)); } } } } } @Override public JComponent getCustomizer(Preferences preferences) { return null; } @Override public String getSavedValue(JComponent customizer, String key) { return null; } @Override public void cancel() { } @Override public Configuration getConfiguration() { return configuration; } private Map<String, String> collectManaged(Project prj) { NbMavenProject project = prj.getLookup().lookup(NbMavenProject.class); @SuppressWarnings("unchecked") HashMap<String, String> toRet = new HashMap<String, String>(); if (project == null) { //#154462 return toRet; } org.apache.maven.model.DependencyManagement dm = project.getMavenProject().getDependencyManagement(); if (dm != null) { @SuppressWarnings("unchecked") List<org.apache.maven.model.Dependency> plugins = dm.getDependencies(); for (org.apache.maven.model.Dependency dep : plugins) { toRet.put(dep.getGroupId() + ":" + dep.getArtifactId(), dep.getVersion()); //NOI18N } } return toRet; } private static class OverrideFix implements Fix, Runnable { private final Dependency dependency; OverrideFix(Dependency dep) { dependency = dep; } @Override public String getText() { return NbBundle.getMessage(OverrideDependencyManagementError.class, "TEXT_OverrideDependencyFix"); } @Override public void run() { dependency.setVersion(null); } @Override public ChangeInfo implement() throws Exception { ChangeInfo info = new ChangeInfo(); POMModel mdl = dependency.getModel(); if (!mdl.getState().equals(Model.State.VALID)) { return info; } PomModelUtils.implementInTransaction(mdl, this); return info; } } }
2,933
7,482
<filename>bsp/CME_M7/StdPeriph_Driver/inc/cmem7_conf.h /** ***************************************************************************** * @file cmem7_conf.h * * @brief CMEM7 config file * * * @version V1.0 * @date 3. September 2013 * * @note * ***************************************************************************** * @attention * * THE PRESENT FIRMWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS * WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE * TIME. AS A RESULT, CAPITAL-MICRO SHALL NOT BE HELD LIABLE FOR ANY DIRECT, * INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING * FROM THE CONTENT OF SUCH FIRMWARE AND/OR THE USE MADE BY CUSTOMERS OF THE * CODING INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS. * * <h2><center>&copy; COPYRIGHT 2013 Capital-micro </center></h2> ***************************************************************************** */ #ifndef __CMEM7_CONF_H #define __CMEM7_CONF_H #define _ADC #define _AES #define _CAN #define _DDR #define _DMA #define _EFUSE #define _ETH #define _FLASH #define _GPIO #define _I2C #define _MISC #define _RTC #define _SPI #define _TIM #define _UART #define _USB #define _WDG //#define _MARVELL //#define _IP1826D #define _M7NORFLASH #define _ME_6095_F #define USE_FULL_ASSERT 1 #ifdef USE_FULL_ASSERT /** * @brief The assert_param macro is used for function's parameters check. * @param expr: If expr is false, it calls assert_failed function which reports * the name of the source file and the source line number of the call * that failed. If expr is true, it returns no value. * @retval None */ #define assert_param(expr) ((expr) ? (void)0 : assert_failed((unsigned char *)__FILE__, __LINE__)) static void assert_failed(unsigned char* file, unsigned long line) { while (1) { ; } } #else #define assert_param(expr) ((void)0) #endif /* USE_FULL_ASSERT */ typedef enum _BOOL {FALSE = 0, TRUE = 1} BOOL; /** * System clock frequency, unit is Hz. */ #define SYSTEM_CLOCK_FREQ 300000000 //250000000 //300000000 /** * @brief usecond delay * @note It can't delay in an accurate time * @param[in] usec usecond to be delay * @retval None */ static void udelay(unsigned long usec) { unsigned long count = 0; unsigned long utime = SYSTEM_CLOCK_FREQ / 1000000 * usec; while(++count < utime) ; } /** * UART definition for print */ #define PRINT_UART UART2 /** * DDR type definition */ #define DDR_TYPE 3 // 2 for DDR2, 3 for DDR3 #if (DDR_TYPE == 3) # define DDR_SIZE (256 << 20) #elif (DDR_TYPE == 2) # define DDR_SIZE (128 << 20) #else # error #endif #endif /* __CMEM7_CONF_H */
1,055
910
/* * Copyright (c) 2021 VMware, Inc. * SPDX-License-Identifier: MIT * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ package ddlog; import com.vmware.ddlog.translator.TranslationException; import org.junit.Test; /** * Test various CAST expressions */ public class CastTest extends BaseQueriesTest { @Test public void testCastIntToFloat() { String query = "create view v0 as SELECT DISTINCT CAST(t1.column1 AS FLOAT) AS f FROM t1"; String program = this.header(false) + "typedef TRtmp = TRtmp{f:float}\n" + this.relations(false) + "output relation Rv0[TRtmp]\n" + "Rv0[v1] :- Rt1[v],var v0 = TRtmp{.f = v.column1 as float},var v1 = v0."; this.testTranslation(query, program); } @Test public void testCastIntToString() { String query = "create view v0 as SELECT DISTINCT CAST(t1.column1 AS VARCHAR) AS s FROM t1"; String program = this.header(false) + "typedef TRtmp = TRtmp{s:string}\n" + this.relations(false) + "output relation Rv0[TRtmp]\n" + "Rv0[v1] :- Rt1[v],var v0 = TRtmp{.s = [|${v.column1}|]},var v1 = v0."; this.testTranslation(query, program); } @Test public void testCastStringToInt() { String query = "create view v0 as SELECT DISTINCT CAST(t1.column2 AS INT) AS i FROM t1"; String program = this.header(false) + "typedef TRtmp = TRtmp{i:signed<64>}\n" + this.relations(false) + "output relation Rv0[TRtmp]\n" + "Rv0[v1] :- Rt1[v],var v0 = TRtmp{.i = option_unwrap_or_default(parse_dec_i64(v.column2))},var v1 = v0."; this.testTranslation(query, program); } @Test public void testCastStringToFloat() { String query = "create view v0 as SELECT DISTINCT CAST(t1.column2 AS REAL) AS d FROM t1"; String program = this.header(false) + "typedef TRtmp = TRtmp{d:double}\n" + this.relations(false) + "output relation Rv0[TRtmp]\n" + "Rv0[v1] :- Rt1[v],var v0 = TRtmp{.d = result_unwrap_or_default(parse_d(v.column2))},var v1 = v0."; this.testTranslation(query, program); } @Test public void testCastStringToDate() { String query = "create view v0 as SELECT DISTINCT CAST(t1.column2 AS DATE) AS d FROM t1"; String program = this.header(false) + "typedef TRtmp = TRtmp{d:Date}\n" + this.relations(false) + "output relation Rv0[TRtmp]\n" + "Rv0[v1] :- Rt1[v],var v0 = TRtmp{.d = result_unwrap_or_default(string2date(v.column2))},var v1 = v0."; this.testTranslation(query, program); } @Test public void testCastIntToDate() { String query = "create view v0 as SELECT DISTINCT CAST(t1.column1 AS DATE) AS d FROM t1"; String program = this.header(false) + "typedef TRtmp = TRtmp{d:Date}\n" + this.relations(false) + "output relation Rv0[TRtmp]\n" + "Rv0[v1] :- Rt1[v],var v0 = TRtmp{.d = result_unwrap_or_default(string2date([|${v.column1}|]))},var v1 = v0."; this.testTranslation(query, program); } @Test public void testCastStringToDateTime() { String query = "create view v0 as SELECT DISTINCT CAST(t1.column2 AS TIMESTAMP) AS t FROM t1"; String program = this.header(false) + "typedef TRtmp = TRtmp{t:DateTime}\n" + this.relations(false) + "output relation Rv0[TRtmp]\n" + "Rv0[v1] :- Rt1[v],var v0 = TRtmp{.t = result_unwrap_or_default(string2datetime(v.column2))},var v1 = v0."; this.testTranslation(query, program); } @Test public void testCastFloatToInt() { String query = "create view v0 as SELECT DISTINCT CAST(t1.column4 AS INTEGER) AS i FROM t1"; String program = this.header(false) + "typedef TRtmp = TRtmp{i:signed<64>}\n" + this.relations(false) + "output relation Rv0[TRtmp]\n" + "Rv0[v1] :- Rt1[v],var v0 = TRtmp{.i = option_unwrap_or_default(int_from_d(v.column4)) as signed<64>},var v1 = v0."; this.testTranslation(query, program); } @Test public void testCastDateToString() { String query = "create view v0 as SELECT DISTINCT CAST(t3.d AS VARCHAR) AS s FROM t3"; String program = this.header(false) + "typedef TRtmp = TRtmp{s:string}\n" + this.relations(false) + "output relation Rv0[TRtmp]\n" + "Rv0[v1] :- Rt3[v],var v0 = TRtmp{.s = [|${v.d}|]},var v1 = v0."; this.testTranslation(query, program); } @Test public void testCastBoolToInt() { String query = "create view v0 as SELECT DISTINCT CAST(t1.column3 AS INTEGER) AS i FROM t1"; String program = this.header(false) + "typedef TRtmp = TRtmp{i:signed<64>}\n" + this.relations(false) + "output relation Rv0[TRtmp]\n" + "Rv0[v1] :- Rt1[v],var v0 = TRtmp{.i = if (v.column3) {\n64'sd1} else {\n64'sd0}},var v1 = v0."; this.testTranslation(query, program); } @Test public void testCastIntToBool() { String query = "create view v0 as SELECT DISTINCT CAST(t1.column1 AS BOOLEAN) AS b FROM t1"; String program = this.header(false) + "typedef TRtmp = TRtmp{b:bool}\n" + this.relations(false) + "output relation Rv0[TRtmp]\n" + "Rv0[v1] :- Rt1[v],var v0 = TRtmp{.b = (v.column1 != 64'sd0)},var v1 = v0."; this.testTranslation(query, program); } @Test public void testCastIntToFloatWithNull() { String query = "create view v0 as SELECT DISTINCT CAST(t1.column1 AS FLOAT) AS f FROM t1"; String program = this.header(true) + "typedef TRtmp = TRtmp{f:Option<float>}\n" + this.relations(true) + "output relation Rv0[TRtmp]\n" + "Rv0[v1] :- Rt1[v],var v0 = TRtmp{.f = match(v.column1) {None{}: Option<signed<64>> -> None{}: Option<float>,\n" + "Some{.x = var x} -> Some{.x = x as float}\n" + "}},var v1 = v0."; this.testTranslation(query, program, true); } @Test public void testCastIntToStringWithNull() { String query = "create view v0 as SELECT DISTINCT CAST(t1.column1 AS VARCHAR) AS s FROM t1"; String program = this.header(true) + "typedef TRtmp = TRtmp{s:Option<string>}\n" + this.relations(true) + "output relation Rv0[TRtmp]\n" + "Rv0[v1] :- Rt1[v],var v0 = TRtmp{.s = match(v.column1) {None{}: Option<signed<64>> -> None{}: Option<string>,\n" + "Some{.x = var x} -> Some{.x = [|${x}|]}\n" + "}},var v1 = v0."; this.testTranslation(query, program, true); } @Test public void testCastStringToIntWithNull() { String query = "create view v0 as SELECT DISTINCT CAST(t1.column2 AS INT) AS i FROM t1"; String program = this.header(true) + "typedef TRtmp = TRtmp{i:Option<signed<64>>}\n" + this.relations(true) + "output relation Rv0[TRtmp]\n" + "Rv0[v1] :- Rt1[v],var v0 = TRtmp{.i = match(v.column2) {None{}: Option<string> -> None{}: Option<signed<64>>,\n" + "Some{.x = var x} -> Some{.x = option_unwrap_or_default(parse_dec_i64(x))}\n" + "}},var v1 = v0."; this.testTranslation(query, program, true); } @Test public void testCastStringToFloatWithNull() { String query = "create view v0 as SELECT DISTINCT CAST(t1.column2 AS REAL) AS d FROM t1"; String program = this.header(true) + "typedef TRtmp = TRtmp{d:Option<double>}\n" + this.relations(true) + "output relation Rv0[TRtmp]\n" + "Rv0[v1] :- Rt1[v],var v0 = TRtmp{.d = match(v.column2) {None{}: Option<string> -> None{}: Option<double>,\n" + "Some{.x = var x} -> Some{.x = result_unwrap_or_default(parse_d(x))}\n" + "}},var v1 = v0."; this.testTranslation(query, program, true); } @Test public void testCastStringToDateWithNull() { String query = "create view v0 as SELECT DISTINCT CAST(t1.column2 AS DATE) AS d FROM t1"; String program = this.header(true) + "typedef TRtmp = TRtmp{d:Option<Date>}\n" + this.relations(true) + "output relation Rv0[TRtmp]\n" + "Rv0[v1] :- Rt1[v],var v0 = TRtmp{.d = match(v.column2) {None{}: Option<string> -> None{}: Option<Date>,\n" + "Some{.x = var x} -> Some{.x = result_unwrap_or_default(string2date(x))}\n" + "}},var v1 = v0."; this.testTranslation(query, program, true); } @Test public void testCastIntToDateWithNull() { String query = "create view v0 as SELECT DISTINCT CAST(t1.column1 AS DATE) AS d FROM t1"; String program = this.header(true) + "typedef TRtmp = TRtmp{d:Option<Date>}\n" + this.relations(true) + "output relation Rv0[TRtmp]\n" + "Rv0[v1] :- Rt1[v],var v0 = TRtmp{.d = match(v.column1) {None{}: Option<signed<64>> -> None{}: Option<Date>,\n" + "Some{.x = var x} -> Some{.x = result_unwrap_or_default(string2date([|${v.column1}|]))}\n" + "}},var v1 = v0."; this.testTranslation(query, program, true); } @Test public void testCastStringToDateTimeWithNull() { String query = "create view v0 as SELECT DISTINCT CAST(t1.column2 AS TIMESTAMP) AS t FROM t1"; String program = this.header(true) + "typedef TRtmp = TRtmp{t:Option<DateTime>}\n" + this.relations(true) + "output relation Rv0[TRtmp]\n" + "Rv0[v1] :- Rt1[v],var v0 = TRtmp{.t = match(v.column2) {None{}: Option<string> -> None{}: Option<DateTime>,\n" + "Some{.x = var x} -> Some{.x = result_unwrap_or_default(string2datetime(x))}\n" + "}},var v1 = v0."; this.testTranslation(query, program, true); } @Test public void testCastFloatToIntWithNull() { String query = "create view v0 as SELECT DISTINCT CAST(t1.column4 AS INTEGER) AS i FROM t1"; String program = this.header(true) + "typedef TRtmp = TRtmp{i:Option<signed<64>>}\n" + this.relations(true) + "output relation Rv0[TRtmp]\n" + "Rv0[v1] :- Rt1[v],var v0 = TRtmp{.i = match(v.column4) {None{}: Option<double> -> None{}: Option<signed<64>>,\n" + "Some{.x = var x} -> Some{.x = option_unwrap_or_default(int_from_d(x)) as signed<64>}\n" + "}},var v1 = v0."; this.testTranslation(query, program, true); } @Test public void testCastDateToStringWithNull() { String query = "create view v0 as SELECT DISTINCT CAST(t3.d AS VARCHAR) AS s FROM t3"; String program = this.header(true) + "typedef TRtmp = TRtmp{s:Option<string>}\n" + this.relations(true) + "output relation Rv0[TRtmp]\n" + "Rv0[v1] :- Rt3[v],var v0 = TRtmp{.s = match(v.d) {None{}: Option<Date> -> None{}: Option<string>,\n" + "Some{.x = var x} -> Some{.x = [|${x}|]}\n" + "}},var v1 = v0."; this.testTranslation(query, program, true); } @Test public void testCastBoolToIntWithNull() { String query = "create view v0 as SELECT DISTINCT CAST(t1.column3 AS INTEGER) AS i FROM t1"; String program = this.header(true) + "typedef TRtmp = TRtmp{i:Option<signed<64>>}\n" + this.relations(true) + "output relation Rv0[TRtmp]\n" + "Rv0[v1] :- Rt1[v],var v0 = TRtmp{.i = match(v.column3) {None{}: Option<bool> -> None{}: Option<signed<64>>,\n" + "Some{.x = var x} -> Some{.x = if (x) {\n" + "64'sd1} else {\n" + "64'sd0}}\n" + "}},var v1 = v0."; this.testTranslation(query, program, true); } @Test public void testCastIntToBoolWithNull() { String query = "create view v0 as SELECT DISTINCT CAST(t1.column1 AS BOOLEAN) AS b FROM t1"; String program = this.header(true) + "typedef TRtmp = TRtmp{b:Option<bool>}\n" + this.relations(true) + "output relation Rv0[TRtmp]\n" + "Rv0[v1] :- Rt1[v],var v0 = TRtmp{.b = match(v.column1) {None{}: Option<signed<64>> -> None{}: Option<bool>,\n" + "Some{.x = var x} -> Some{.x = (x != 64'sd0)}\n" + "}},var v1 = v0."; this.testTranslation(query, program, true); } @Test(expected = TranslationException.class) public void testCastDateToBoolWithNull() { String query = "create view v0 as SELECT DISTINCT CAST(t3.d AS BOOLEAN) AS b FROM t3"; String program = ""; this.testTranslation(query, program, true); } }
7,033
1,199
// Some ideas of how to implement hierarchical ("sub-command") completion with // readline, where each sub-command can have its own completion options. // // This is a very simple implementation just to demonstrate a point. In a // realistic program things would be better factored out. Note that using // globals at least to some degree is unavoidable since readline takes bare // functions pointers into C code, so there's no way to pass in state/context. // Luckily, command-line completion is handled in a singple place in an // application anyway. // // <NAME> [http://eli.thegreenplace.net] // This code is in the public domain. #include <stdio.h> #include <stdlib.h> #include <string.h> #include <algorithm> #include <iostream> #include <map> #include <string> #include <unordered_map> #include <vector> #include <readline/history.h> #include <readline/readline.h> #include "utils.h" // Maps a command to a vector of subcommands it supports. using CommandVocabulary = std::map<std::string, std::vector<std::string>>; CommandVocabulary command_vocabulary = { // "file" is a special command with no subcommands. {"file", {}}, {"eat", {"breakfast", "dinner", "lunch", "snack"}}, {"play", {"cards", "chess", "go"}}, {"walk", {"left", "right", "straight"}}}; // All supported commands - populated from command_vocabulary in main(). std::vector<std::string> all_commands; // Determines if we're supposed to complete a sub-command now. Takes the input // line typed in so far, split to tokens, and the cursor location. Returns the // command name if a subcommand is expected (the command is always the first // token on the line), and "" if it seems like we're still completing the // command itself. std::string find_command(const std::vector<Token>& tokens, int cursor) { if (tokens.size() == 0) { return ""; } const Token& command = tokens[0]; if (static_cast<size_t>(cursor) > command.buf_index + command.text.size()) { return command.text; } else { return ""; } } char** completer(const char* text, int start, int end) { std::vector<Token> line_tokens = tokenize_line_buffer(rl_line_buffer); std::string command = find_command(line_tokens, start); // The "file" command is special and must be intercepted early, so that we // can return nullptr from here before setting rl_attempted_completion_over. if (command == "file") { // Request filename completion. return nullptr; } // Disable default filename completion even if we don't find completion // matches. rl_attempted_completion_over = 1; // Find which vocabulary to auto-complete from; if we're now completing the // command, point to all_commands. If the command was already entered, find // the subcommands vocabulary to complete. // Note: this will perform subcommand completion for every token after the // first one (which is the command). This can be easily avoided if necessary, // by checking how many tokens the line already has. std::vector<std::string>* vocabulary; if (command == "") { vocabulary = &all_commands; } else { auto command_iter = command_vocabulary.find(command); if (command_iter == command_vocabulary.end()) { return nullptr; } vocabulary = &(command_iter->second); } // Filter out all words in the vocabulary that do not begin with `text`. std::string textstr(text); std::vector<std::string> matches; std::copy_if(vocabulary->begin(), vocabulary->end(), std::back_inserter(matches), [&textstr](const std::string& s) { return (s.size() >= textstr.size() && s.compare(0, textstr.size(), textstr) == 0); }); if (matches.empty()) { return nullptr; } // See the readline-complete-nogen.cpp sample for more details on what is // returned from this function. char** array = static_cast<char**>(malloc((2 + matches.size()) * sizeof(*array))); array[0] = strdup(longest_common_prefix(textstr, matches).c_str()); size_t ptr = 1; for (const auto& m : matches) { array[ptr++] = strdup(m.c_str()); } array[ptr] = nullptr; return array; } int main(int argc, char** argv) { printf("Welcome! You can exit by pressing Ctrl+C at any time...\n"); for (auto const& kv : command_vocabulary) { all_commands.push_back(kv.first); } // Register our custom comleter with readline. rl_attempted_completion_function = completer; char* buf; while ((buf = readline(">> ")) != nullptr) { if (strlen(buf) > 0) { add_history(buf); } printf("[%s]\n", buf); // readline malloc's a new buffer every time. free(buf); } return 0; }
1,553
2,743
# -*- coding: utf-8 -*- # Generated by Django 1.10.7 on 2017-08-25 06:52 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('contenttypes', '0002_remove_content_type_name'), ('common', '0007_auto_20170118_1758'), ] operations = [ migrations.CreateModel( name='ErrorLogEntry', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('namespace', models.CharField(max_length=128, verbose_name='Namespace')), ('object_id', models.PositiveIntegerField(blank=True, null=True)), ('datetime', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='Date time')), ('result', models.TextField(blank=True, null=True, verbose_name='Result')), ('content_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='error_log_content_type', to='contenttypes.ContentType')), ], options={ 'ordering': ('datetime',), 'verbose_name': 'Error log entry', 'verbose_name_plural': 'Error log entries', }, ), ]
616
669
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "core/framework/random_generator.h" namespace onnxruntime { namespace cuda { template <typename T> void DropoutKernelImpl(const cudaDeviceProp& prop, cudaStream_t stream, const int64_t N, const int64_t mask_element_count, const float ratio, PhiloxGenerator& generator, const T* X_data, T* Y_data, void* mask_data, bool use_bitmask); } // namespace cuda } // namespace onnxruntime
208
3,353
// // ======================================================================== // Copyright (c) 1995-2021 Mort Bay Consulting Pty Ltd and others. // // This program and the accompanying materials are made available under the // terms of the Eclipse Public License v. 2.0 which is available at // https://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 // which is available at https://www.apache.org/licenses/LICENSE-2.0. // // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 // ======================================================================== // package org.eclipse.jetty.http3.qpack.internal.metadata; import org.eclipse.jetty.http.HttpField; import org.eclipse.jetty.http.HttpHeader; public class StaticTableHttpField extends HttpField { private final Object _value; public StaticTableHttpField(HttpHeader header, String name, String valueString, Object value) { super(header, name, valueString); if (value == null) throw new IllegalArgumentException(); _value = value; } public StaticTableHttpField(HttpHeader header, String valueString, Object value) { this(header, header.asString(), valueString, value); } public StaticTableHttpField(String name, String valueString, Object value) { super(name, valueString); if (value == null) throw new IllegalArgumentException(); _value = value; } public Object getStaticValue() { return _value; } @Override public String toString() { return super.toString() + "(evaluated)"; } }
539
370
<reponame>Jenny19880324/suitesparse-metis-for-windows<filename>SuiteSparse/GraphBLAS/Source/GrB_finalize.c<gh_stars>100-1000 //------------------------------------------------------------------------------ // GrB_finalize: finalize GraphBLAS //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, <NAME>, (c) 2017-2018, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // GrB_finalize must be called as the last GraphBLAS function, per the // GraphBLAS C API Specification. #include "GB.h" GrB_Info GrB_finalize ( ) { //-------------------------------------------------------------------------- // destroy the queue //-------------------------------------------------------------------------- GB_CRITICAL (GB_queue_destroy ( )) ; //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- return (GrB_SUCCESS) ; }
261
568
<gh_stars>100-1000 package com.fincatto.documentofiscal.cte300.classes.nota; import com.fincatto.documentofiscal.DFBase; import com.fincatto.documentofiscal.validadores.DFStringValidador; import org.simpleframework.xml.Element; import org.simpleframework.xml.Namespace; import org.simpleframework.xml.Root; /** * @author Caio * @info Informações das Ferrovias Envolvidas */ @Root(name = "ferroEnv") @Namespace(reference = "http://www.portalfiscal.inf.br/cte") public class CTeNotaInfoCTeNormalInfoModalFerroviarioTrafegoMutuoFerroviasEnvolvidas extends DFBase { private static final long serialVersionUID = -7408236804856205178L; @Element(name = "CNPJ") private String cnpj; @Element(name = "cInt", required = false) private String codigoInterno; @Element(name = "IE", required = false) private String inscricaoEstadual; @Element(name = "xNome") private String razaoSocial; @Element(name = "enderFerro") private CTeNotaEnderecoFerrovia endereco; public CTeNotaInfoCTeNormalInfoModalFerroviarioTrafegoMutuoFerroviasEnvolvidas() { this.cnpj = null; this.codigoInterno = null; this.inscricaoEstadual = null; this.razaoSocial = null; this.endereco = null; } public String getCnpj() { return this.cnpj; } /** * Número do CNPJ<br> * Informar o CNPJ da Ferrovia Envolvida. Caso a Ferrovia envolvida não seja inscrita no CNPJ o campo deverá preenchido com zeros. Informar os zeros não significativos. */ public void setCnpj(final String cnpj) { DFStringValidador.cnpj(cnpj); this.cnpj = cnpj; } public String getCodigoInterno() { return this.codigoInterno; } /** * Código interno da Ferrovia envolvida<br> * Uso da transportadora */ public void setCodigoInterno(final String codigoInterno) { DFStringValidador.tamanho10(codigoInterno, "Código interno da Ferrovia envolvida"); this.codigoInterno = codigoInterno; } public String getInscricaoEstadual() { return this.inscricaoEstadual; } /** * Inscrição Estadual */ public void setInscricaoEstadual(final String inscricaoEstadual) { DFStringValidador.inscricaoEstadual(inscricaoEstadual); this.inscricaoEstadual = inscricaoEstadual; } public String getRazaoSocial() { return this.razaoSocial; } /** * Razão Social ou Nome */ public void setRazaoSocial(final String razaoSocial) { DFStringValidador.tamanho2ate60(razaoSocial, "Razão Social ou Nome"); this.razaoSocial = razaoSocial; } public CTeNotaEnderecoFerrovia getEndereco() { return this.endereco; } /** * Dados do endereço da ferrovia envolvida */ public void setEndereco(final CTeNotaEnderecoFerrovia endereco) { this.endereco = endereco; } }
1,269
381
package org.apache.helix.monitoring.metrics.model; /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ import org.apache.helix.monitoring.mbeans.dynamicMBeans.DynamicMetric; import org.apache.helix.monitoring.mbeans.dynamicMBeans.SimpleDynamicMetric; /** * Represents a count metric and defines methods to help with calculation. A count metric gives a * gauge value of a certain property. */ public abstract class CountMetric extends SimpleDynamicMetric<Long> implements Metric<Long> { /** * Instantiates a new count metric. * * @param metricName the metric name * @param initCount the initial count */ public CountMetric(String metricName, long initCount) { super(metricName, initCount); } /** * Increment the metric by the input count. * * @param count */ public void increment(long count) { updateValue(getValue() + count); } @Override public String getMetricName() { return _metricName; } @Override public String toString() { return String.format("Metric %s's count is %d", getMetricName(), getValue()); } @Override public Long getLastEmittedMetricValue() { return getValue(); } @Override public DynamicMetric getDynamicMetric() { return this; } }
583
527
/************************************************************************* * * Copyright 2016 Realm Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * **************************************************************************/ #ifndef REALM_GROUP_SHARED_HPP #define REALM_GROUP_SHARED_HPP #include <functional> #include <limits> #include <realm/util/features.h> #include <realm/util/thread.hpp> #include <realm/util/interprocess_condvar.hpp> #include <realm/util/interprocess_mutex.hpp> #include <realm/group.hpp> #include <realm/group_shared_options.hpp> #include <realm/handover_defs.hpp> #include <realm/impl/transact_log.hpp> #include <realm/metrics/metrics.hpp> #include <realm/replication.hpp> #include <realm/version_id.hpp> namespace realm { namespace _impl { class SharedGroupFriend; class WriteLogCollector; } /// Thrown by SharedGroup::open() if the lock file is already open in another /// process which can't share mutexes with this process struct IncompatibleLockFile : std::runtime_error { IncompatibleLockFile(const std::string& msg) : std::runtime_error("Incompatible lock file. " + msg) { } }; /// Thrown by SharedGroup::open() if the type of history /// (Replication::HistoryType) in the opened Realm file is incompatible with the /// mode in which the Realm file is opened. For example, if there is a mismatch /// between the history type in the file, and the history type associated with /// the replication plugin passed to SharedGroup::open(). /// /// This exception will also be thrown if the history schema version is lower /// than required, and no migration is possible /// (Replication::is_upgradable_history_schema()). struct IncompatibleHistories : util::File::AccessError { IncompatibleHistories(const std::string& msg, const std::string& path) : util::File::AccessError("Incompatible histories. " + msg, path) { } }; /// A SharedGroup facilitates transactions. /// /// When multiple threads or processes need to access a database /// concurrently, they must do so using transactions. By design, /// Realm does not allow for multiple threads (or processes) to /// share a single instance of SharedGroup. Instead, each concurrently /// executing thread or process must use a separate instance of /// SharedGroup. /// /// Each instance of SharedGroup manages a single transaction at a /// time. That transaction can be either a read transaction, or a /// write transaction. /// /// Utility classes ReadTransaction and WriteTransaction are provided /// to make it safe and easy to work with transactions in a scoped /// manner (by means of the RAII idiom). However, transactions can /// also be explicitly started (begin_read(), begin_write()) and /// stopped (end_read(), commit(), rollback()). /// /// If a transaction is active when the SharedGroup is destroyed, that /// transaction is implicitly terminated, either by a call to /// end_read() or rollback(). /// /// Two processes that want to share a database file must reside on /// the same host. /// /// /// Desired exception behavior (not yet fully implemented) /// ------------------------------------------------------ /// /// - If any data access API function throws an unexpected exception during a /// read transaction, the shared group accessor is left in state "error /// during read". /// /// - If any data access API function throws an unexpected exception during a /// write transaction, the shared group accessor is left in state "error /// during write". /// /// - If SharedGroup::begin_write() or SharedGroup::begin_read() throws an /// unexpected exception, the shared group accessor is left in state "no /// transaction in progress". /// /// - SharedGroup::end_read() and SharedGroup::rollback() do not throw. /// /// - If SharedGroup::commit() throws an unexpected exception, the shared group /// accessor is left in state "error during write" and the transaction was /// not committed. /// /// - If SharedGroup::advance_read() or SharedGroup::promote_to_write() throws /// an unexpected exception, the shared group accessor is left in state /// "error during read". /// /// - If SharedGroup::commit_and_continue_as_read() or /// SharedGroup::rollback_and_continue_as_read() throws an unexpected /// exception, the shared group accessor is left in state "error during /// write". /// /// It has not yet been decided exactly what an "unexpected exception" is, but /// `std::bad_alloc` is surely one example. On the other hand, an expected /// exception is one that is mentioned in the function specific documentation, /// and is used to abort an operation due to a special, but expected condition. /// /// States /// ------ /// /// - A newly created shared group accessor is in state "no transaction in /// progress". /// /// - In state "error during read", almost all Realm API functions are /// illegal on the connected group of accessors. The only valid operations /// are destruction of the shared group, and SharedGroup::end_read(). If /// SharedGroup::end_read() is called, the new state becomes "no transaction /// in progress". /// /// - In state "error during write", almost all Realm API functions are /// illegal on the connected group of accessors. The only valid operations /// are destruction of the shared group, and SharedGroup::rollback(). If /// SharedGroup::end_write() is called, the new state becomes "no transaction /// in progress" class SharedGroup { public: /// \brief Same as calling the corresponding version of open() on a instance /// constructed in the unattached state. Exception safety note: if the /// `upgrade_callback` throws, then the file will be closed properly and the /// upgrade will be aborted. explicit SharedGroup(const std::string& file, bool no_create = false, const SharedGroupOptions options = SharedGroupOptions()); /// \brief Same as calling the corresponding version of open() on a instance /// constructed in the unattached state. Exception safety note: if the /// `upgrade_callback` throws, then the file will be closed properly and /// the upgrade will be aborted. explicit SharedGroup(Replication& repl, const SharedGroupOptions options = SharedGroupOptions()); struct unattached_tag { }; /// Create a SharedGroup instance in its unattached state. It may /// then be attached to a database file later by calling /// open(). You may test whether this instance is currently in its /// attached state by calling is_attached(). Calling any other /// function (except the destructor) while in the unattached state /// has undefined behavior. SharedGroup(unattached_tag) noexcept; ~SharedGroup() noexcept; // Disable copying to prevent accessor errors. If you really want another // instance, open another SharedGroup object on the same file. SharedGroup(const SharedGroup&) = delete; SharedGroup& operator=(const SharedGroup&) = delete; /// Attach this SharedGroup instance to the specified database file. /// /// While at least one instance of SharedGroup exists for a specific /// database file, a "lock" file will be present too. The lock file will be /// placed in the same directory as the database file, and its name will be /// derived by appending ".lock" to the name of the database file. /// /// When multiple SharedGroup instances refer to the same file, they must /// specify the same durability level, otherwise an exception will be /// thrown. /// /// \param file Filesystem path to a Realm database file. /// /// \param no_create If the database file does not already exist, it will be /// created (unless this is set to true.) When multiple threads are involved, /// it is safe to let the first thread, that gets to it, create the file. /// /// \param options See SharedGroupOptions for details of each option. /// Sensible defaults are provided if this parameter is left out. /// /// Calling open() on a SharedGroup instance that is already in the attached /// state has undefined behavior. /// /// \throw util::File::AccessError If the file could not be opened. If the /// reason corresponds to one of the exception types that are derived from /// util::File::AccessError, the derived exception type is thrown. Note that /// InvalidDatabase is among these derived exception types. /// /// \throw FileFormatUpgradeRequired only if \a SharedGroupOptions::allow_upgrade /// is `false` and an upgrade is required. void open(const std::string& file, bool no_create = false, const SharedGroupOptions options = SharedGroupOptions()); /// Open this group in replication mode. The specified Replication instance /// must remain in existence for as long as the SharedGroup. void open(Replication&, const SharedGroupOptions options = SharedGroupOptions()); /// Close any open database, returning to the unattached state. void close() noexcept; /// A SharedGroup may be created in the unattached state, and then /// later attached to a file with a call to open(). Calling any /// function other than open(), is_attached(), and ~SharedGroup() /// on an unattached instance results in undefined behavior. bool is_attached() const noexcept; /// Reserve disk space now to avoid allocation errors at a later /// point in time, and to minimize on-disk fragmentation. In some /// cases, less fragmentation translates into improved /// performance. /// /// When supported by the system, a call to this function will /// make the database file at least as big as the specified size, /// and cause space on the target device to be allocated (note /// that on many systems on-disk allocation is done lazily by /// default). If the file is already bigger than the specified /// size, the size will be unchanged, and on-disk allocation will /// occur only for the initial section that corresponds to the /// specified size. On systems that do not support preallocation, /// this function has no effect. To know whether preallocation is /// supported by Realm on your platform, call /// util::File::is_prealloc_supported(). /// /// It is an error to call this function on an unattached shared /// group. Doing so will result in undefined behavior. void reserve(size_t size_in_bytes); /// Querying for changes: /// /// NOTE: /// "changed" means that one or more commits has been made to the database /// since the SharedGroup (on which wait_for_change() is called) last /// started, committed, promoted or advanced a transaction. If the /// SharedGroup has not yet begun a transaction, "changed" is undefined. /// /// No distinction is made between changes done by another process /// and changes done by another thread in the same process as the caller. /// /// Has db been changed ? bool has_changed(); /// The calling thread goes to sleep until the database is changed, or /// until wait_for_change_release() is called. After a call to /// wait_for_change_release() further calls to wait_for_change() will return /// immediately. To restore the ability to wait for a change, a call to /// enable_wait_for_change() is required. Return true if the database has /// changed, false if it might have. bool wait_for_change(); /// release any thread waiting in wait_for_change() on *this* SharedGroup. void wait_for_change_release(); /// re-enable waiting for change void enable_wait_for_change(); // Transactions: using version_type = _impl::History::version_type; using VersionID = realm::VersionID; /// Thrown by begin_read() if the specified version does not correspond to a /// bound (or tethered) snapshot. struct BadVersion; /// \defgroup group_shared_transactions //@{ /// begin_read() initiates a new read transaction. A read transaction is /// bound to, and provides access to a particular snapshot of the underlying /// Realm (in general the latest snapshot, but see \a version). It cannot be /// used to modify the Realm, and in that sense, a read transaction is not a /// real transaction. /// /// begin_write() initiates a new write transaction. A write transaction /// allows the application to both read and modify the underlying Realm /// file. At most one write transaction can be in progress at any given time /// for a particular underlying Realm file. If another write transaction is /// already in progress, begin_write() will block the caller until the other /// write transaction terminates. No guarantees are made about the order in /// which multiple concurrent requests will be served. /// /// It is an error to call begin_read() or begin_write() on a SharedGroup /// object with an active read or write transaction. /// /// If begin_read() or begin_write() throws, no transaction is initiated, /// and the application may try to initiate a new read or write transaction /// later. /// /// end_read() terminates the active read transaction. If no read /// transaction is active, end_read() does nothing. It is an error to call /// this function on a SharedGroup object with an active write /// transaction. end_read() does not throw. /// /// commit() commits all changes performed in the context of the active /// write transaction, and thereby terminates that transaction. This /// produces a new snapshot in the underlying Realm. commit() returns the /// version associated with the new snapshot. It is an error to call /// commit() when there is no active write transaction. If commit() throws, /// no changes will have been committed, and the transaction will still be /// active, but in a bad state. In that case, the application must either /// call rollback() to terminate the bad transaction (in which case a new /// transaction can be initiated), call close() which also terminates the /// bad transaction, or destroy the SharedGroup object entirely. When the /// transaction is in a bad state, the application is not allowed to call /// any method on the Group accessor or on any of its subordinate accessors /// (Table, Row, Descriptor). Note that the transaction is also left in a /// bad state when a modifying operation on any subordinate accessor throws. /// /// rollback() terminates the active write transaction and discards any /// changes performed in the context of it. If no write transaction is /// active, rollback() does nothing. It is an error to call this function in /// a SharedGroup object with an active read transaction. rollback() does /// not throw. /// /// the Group accessor and all subordinate accessors (Table, Row, /// Descriptor) that are obtained in the context of a particular read or /// write transaction will become detached upon termination of that /// transaction, which means that they can no longer be used to access the /// underlying objects. /// /// Subordinate accessors that were detached at the end of the previous /// read or write transaction will not be automatically reattached when a /// new transaction is initiated. The application must reobtain new /// accessors during a new transaction to regain access to the underlying /// objects. /// /// \param version If specified, this must be the version associated with a /// *bound* snapshot. A snapshot is said to be bound (or tethered) if there /// is at least one active read or write transaction bound to it. A read /// transaction is bound to the snapshot that it provides access to. A write /// transaction is bound to the latest snapshot available at the time of /// initiation of the write transaction. If the specified version is not /// associated with a bound snapshot, this function throws BadVersion. /// /// \throw BadVersion Thrown by begin_read() if the specified version does /// not correspond to a bound (or tethered) snapshot. const Group& begin_read(VersionID version = VersionID()); void end_read() noexcept; Group& begin_write(); // Return true (and take the write lock) if there is no other write // in progress. In case of contention return false immediately. // If the write lock is obtained, also provide the Group associated // with the SharedGroup for further operations. bool try_begin_write(Group*& group); version_type commit(); void rollback() noexcept; // report statistics of last commit done on THIS shared group. // The free space reported is what can be expected to be freed // by compact(). This may not correspond to the space which is free // at the point where get_stats() is called, since that will include // memory required to hold older versions of data, which still // needs to be available. void get_stats(size_t& free_space, size_t& used_space); //@} enum TransactStage { transact_Ready, transact_Reading, transact_Writing, }; /// Get the current transaction type TransactStage get_transact_stage() const noexcept; /// Get a version id which may be used to request a different SharedGroup /// to start transaction at a specific version. VersionID get_version_of_current_transaction(); /// Report the number of distinct versions currently stored in the database. /// Note: the database only cleans up versions as part of commit, so ending /// a read transaction will not immediately release any versions. uint_fast64_t get_number_of_versions(); /// Compact the database file. /// - The method will throw if called inside a transaction. /// - The method will throw if called in unattached state. /// - The method will return false if other SharedGroups are accessing the /// database in which case compaction is not done. This is not /// necessarily an error. /// It will return true following successful compaction. /// While compaction is in progress, attempts by other /// threads or processes to open the database will wait. /// Be warned that resource requirements for compaction is proportional to /// the amount of live data in the database. /// Compaction works by writing the database contents to a temporary /// database file and then replacing the database with the temporary one. /// The name of the temporary file is formed by appending /// ".tmp_compaction_space" to the name of the database /// /// FIXME: This function is not yet implemented in an exception-safe manner, /// therefore, if it throws, the application should not attempt to /// continue. If may not even be safe to destroy the SharedGroup object. /// /// WARNING / FIXME: compact() should NOT be exposed publicly on Windows /// because it's not crash safe! It may corrupt your database if something fails bool compact(); #ifdef REALM_DEBUG void test_ringbuf(); #endif /// To handover a table view, query, linkview or row accessor of type T, you /// must wrap it into a Handover<T> for the transfer. Wrapping and /// unwrapping of a handover object is done by the methods /// 'export_for_handover()' and 'import_from_handover()' declared below. /// 'export_for_handover()' returns a Handover object, and /// 'import_for_handover()' consumes that object, producing a new accessor /// which is ready for use in the context of the importing SharedGroup. /// /// The Handover always creates a new accessor object at the importing side. /// For TableViews, there are 3 forms of handover. /// /// - with payload move: the payload is handed over and ends up as a payload /// held by the accessor at the importing side. The accessor on the /// exporting side will rerun its query and generate a new payload, if /// TableView::sync_if_needed() is called. If the original payload was in /// sync at the exporting side, it will also be in sync at the importing /// side. This is indicated to handover_export() by the argument /// MutableSourcePayload::Move /// /// - with payload copy: a copy of the payload is handed over, so both the /// accessors on the exporting side *and* the accessors created at the /// importing side has their own payload. This is indicated to /// handover_export() by the argument ConstSourcePayload::Copy /// /// - without payload: the payload stays with the accessor on the exporting /// side. On the importing side, the new accessor is created without /// payload. A call to TableView::sync_if_needed() will trigger generation /// of a new payload. This form of handover is indicated to /// handover_export() by the argument ConstSourcePayload::Stay. /// /// For all other (non-TableView) accessors, handover is done with payload /// copy, since the payload is trivial. /// /// Handover *without* payload is useful when you want to ship a tableview /// with its query for execution in a background thread. Handover with /// *payload move* is useful when you want to transfer the result back. /// /// Handover *without* payload or with payload copy is guaranteed *not* to /// change the accessors on the exporting side. /// /// Handover is *not* thread safe and should be carried out /// by the thread that "owns" the involved accessors. /// /// Handover is transitive: /// If the object being handed over depends on other views /// (table- or link- ), those objects will be handed over as well. The mode /// of handover (payload copy, payload move, without payload) is applied /// recursively. Note: If you are handing over a tableview dependent upon /// another tableview and using MutableSourcePayload::Move, /// you are on thin ice! /// /// On the importing side, the top-level accessor being created during /// import takes ownership of all other accessors (if any) being created as /// part of the import. /// Type used to support handover of accessors between shared groups. template <typename T> struct Handover; /// thread-safe/const export (mode is Stay or Copy) /// during export, the following operations on the shared group is locked: /// - advance_read(), promote_to_write(), commit_and_continue_as_read(), /// rollback_and_continue_as_read(), close() template <typename T> std::unique_ptr<Handover<T>> export_for_handover(const T& accessor, ConstSourcePayload mode); // specialization for handover of Rows template <typename T> std::unique_ptr<Handover<BasicRow<T>>> export_for_handover(const BasicRow<T>& accessor); // destructive export (mode is Move) template <typename T> std::unique_ptr<Handover<T>> export_for_handover(T& accessor, MutableSourcePayload mode); /// Import an accessor wrapped in a handover object. The import will fail /// if the importing SharedGroup is viewing a version of the database that /// is different from the exporting SharedGroup. The call to /// import_from_handover is not thread-safe. template <typename T> std::unique_ptr<T> import_from_handover(std::unique_ptr<Handover<T>> handover); // We need two cases for handling of LinkViews, because they are ref counted. std::unique_ptr<Handover<LinkView>> export_linkview_for_handover(const LinkViewRef& accessor); LinkViewRef import_linkview_from_handover(std::unique_ptr<Handover<LinkView>> handover); // likewise for Tables. std::unique_ptr<Handover<Table>> export_table_for_handover(const TableRef& accessor); TableRef import_table_from_handover(std::unique_ptr<Handover<Table>> handover); /// When doing handover to background tasks that may be run later, we /// may want to momentarily pin the current version until the other thread /// has retrieved it. /// /// Pinning can be done in both read- and write-transactions, but with different /// semantics. When pinning during a read-transaction, the version pinned is the /// one accessible during the read-transaction. When pinning during a write-transaction, /// the version pinned will be the last version that was succesfully committed to the /// realm file at the point in time, when the write-transaction was started. /// /// The release is not thread-safe, so it has to be done on the SharedGroup /// associated with the thread calling unpin_version(), and the SharedGroup /// must be attached to the realm file at the point of unpinning. // Pin version for handover (not thread safe) VersionID pin_version(); // Release pinned version (not thread safe) void unpin_version(VersionID version); #if REALM_METRICS std::shared_ptr<metrics::Metrics> get_metrics(); #endif // REALM_METRICS private: struct SharedInfo; struct ReadCount; struct ReadLockInfo { uint_fast64_t m_version = std::numeric_limits<version_type>::max(); uint_fast32_t m_reader_idx = 0; ref_type m_top_ref = 0; size_t m_file_size = 0; }; class ReadLockUnlockGuard; // Member variables size_t m_free_space = 0; size_t m_used_space = 0; Group m_group; ReadLockInfo m_read_lock; uint_fast32_t m_local_max_entry; util::File m_file; util::File::Map<SharedInfo> m_file_map; // Never remapped util::File::Map<SharedInfo> m_reader_map; bool m_wait_for_change_enabled; std::string m_lockfile_path; std::string m_lockfile_prefix; std::string m_db_path; std::string m_coordination_dir; const char* m_key; TransactStage m_transact_stage; util::InterprocessMutex m_writemutex; #ifdef REALM_ASYNC_DAEMON util::InterprocessMutex m_balancemutex; #endif util::InterprocessMutex m_controlmutex; #ifdef REALM_ASYNC_DAEMON util::InterprocessCondVar m_room_to_write; util::InterprocessCondVar m_work_to_do; util::InterprocessCondVar m_daemon_becomes_ready; #endif util::InterprocessCondVar m_new_commit_available; util::InterprocessCondVar m_pick_next_writer; std::function<void(int, int)> m_upgrade_callback; #if REALM_METRICS std::shared_ptr<metrics::Metrics> m_metrics; #endif // REALM_METRICS void do_open(const std::string& file, bool no_create, bool is_backend, const SharedGroupOptions options); // Ring buffer management bool ringbuf_is_empty() const noexcept; size_t ringbuf_size() const noexcept; size_t ringbuf_capacity() const noexcept; bool ringbuf_is_first(size_t ndx) const noexcept; void ringbuf_remove_first() noexcept; size_t ringbuf_find(uint64_t version) const noexcept; ReadCount& ringbuf_get(size_t ndx) noexcept; ReadCount& ringbuf_get_first() noexcept; ReadCount& ringbuf_get_last() noexcept; void ringbuf_put(const ReadCount& v); void ringbuf_expand(); /// Grab a read lock on the snapshot associated with the specified /// version. If `version_id == VersionID()`, a read lock will be grabbed on /// the latest available snapshot. Fails if the snapshot is no longer /// available. /// /// As a side effect update memory mapping to ensure that the ringbuffer /// entries referenced in the readlock info is accessible. /// /// FIXME: It needs to be made more clear exactly under which conditions /// this function fails. Also, why is it useful to promise anything about /// detection of bad versions? Can we really promise enough to make such a /// promise useful to the caller? void grab_read_lock(ReadLockInfo&, VersionID); // Release a specific read lock. The read lock MUST have been obtained by a // call to grab_read_lock(). void release_read_lock(ReadLockInfo&) noexcept; void do_begin_read(VersionID, bool writable); void do_end_read() noexcept; /// return true if write transaction can commence, false otherwise. bool do_try_begin_write(); void do_begin_write(); version_type do_commit(); void do_end_write() noexcept; void set_transact_stage(TransactStage stage) noexcept; /// Returns the version of the latest snapshot. version_type get_version_of_latest_snapshot(); /// Returns the version of the snapshot bound in the current read or write /// transaction. It is an error to call this function when no transaction is /// in progress. version_type get_version_of_bound_snapshot() const noexcept; // make sure the given index is within the currently mapped area. // if not, expand the mapped area. Returns true if the area is expanded. bool grow_reader_mapping(uint_fast32_t index); // Must be called only by someone that has a lock on the write // mutex. void low_level_commit(uint_fast64_t new_version); void do_async_commits(); /// Upgrade file format and/or history schema void upgrade_file_format(bool allow_file_format_upgrade, int target_file_format_version, int current_hist_schema_version, int target_hist_schema_version); //@{ /// See LangBindHelper. template <class O> void advance_read(O* observer, VersionID); template <class O> void promote_to_write(O* observer); version_type commit_and_continue_as_read(); template <class O> void rollback_and_continue_as_read(O* observer); //@} /// Returns true if, and only if _impl::History::update_early_from_top_ref() /// was called during the execution of this function. template <class O> bool do_advance_read(O* observer, VersionID, _impl::History&); /// If there is an associated \ref Replication object, then this function /// returns `repl->get_history()` where `repl` is that Replication object, /// otherwise this function returns null. _impl::History* get_history(); int get_file_format_version() const noexcept; /// finish up the process of starting a write transaction. Internal use only. void finish_begin_write(); friend class _impl::SharedGroupFriend; }; inline void SharedGroup::get_stats(size_t& free_space, size_t& used_space) { free_space = m_free_space; used_space = m_used_space; } class ReadTransaction { public: ReadTransaction(SharedGroup& sg) : m_shared_group(sg) { m_shared_group.begin_read(); // Throws } ~ReadTransaction() noexcept { m_shared_group.end_read(); } bool has_table(StringData name) const noexcept { return get_group().has_table(name); } ConstTableRef get_table(size_t table_ndx) const { return get_group().get_table(table_ndx); // Throws } ConstTableRef get_table(StringData name) const { return get_group().get_table(name); // Throws } const Group& get_group() const noexcept; /// Get the version of the snapshot to which this read transaction is bound. SharedGroup::version_type get_version() const noexcept; private: SharedGroup& m_shared_group; }; class WriteTransaction { public: WriteTransaction(SharedGroup& sg) : m_shared_group(&sg) { m_shared_group->begin_write(); // Throws } ~WriteTransaction() noexcept { if (m_shared_group) m_shared_group->rollback(); } bool has_table(StringData name) const noexcept { return get_group().has_table(name); } TableRef get_table(size_t table_ndx) const { return get_group().get_table(table_ndx); // Throws } TableRef get_table(StringData name) const { return get_group().get_table(name); // Throws } TableRef add_table(StringData name, bool require_unique_name = true) const { return get_group().add_table(name, require_unique_name); // Throws } TableRef get_or_add_table(StringData name, bool* was_added = nullptr) const { return get_group().get_or_add_table(name, was_added); // Throws } Group& get_group() const noexcept; /// Get the version of the snapshot on which this write transaction is /// based. SharedGroup::version_type get_version() const noexcept; SharedGroup::version_type commit() { REALM_ASSERT(m_shared_group); SharedGroup::version_type new_version = m_shared_group->commit(); m_shared_group = nullptr; return new_version; } void rollback() noexcept { REALM_ASSERT(m_shared_group); m_shared_group->rollback(); m_shared_group = nullptr; } private: SharedGroup* m_shared_group; }; // Implementation: struct SharedGroup::BadVersion : std::exception { }; inline SharedGroup::SharedGroup(const std::string& file, bool no_create, const SharedGroupOptions options) : m_group(Group::shared_tag()) , m_upgrade_callback(std::move(options.upgrade_callback)) { open(file, no_create, options); // Throws } inline SharedGroup::SharedGroup(unattached_tag) noexcept : m_group(Group::shared_tag()) { } inline SharedGroup::SharedGroup(Replication& repl, const SharedGroupOptions options) : m_group(Group::shared_tag()) , m_upgrade_callback(std::move(options.upgrade_callback)) { open(repl, options); // Throws } inline void SharedGroup::open(const std::string& path, bool no_create_file, const SharedGroupOptions options) { // Exception safety: Since open() is called from constructors, if it throws, // it must leave the file closed. bool is_backend = false; do_open(path, no_create_file, is_backend, options); // Throws } inline void SharedGroup::open(Replication& repl, const SharedGroupOptions options) { // Exception safety: Since open() is called from constructors, if it throws, // it must leave the file closed. REALM_ASSERT(!is_attached()); repl.initialize(*this); // Throws typedef _impl::GroupFriend gf; gf::set_replication(m_group, &repl); std::string file = repl.get_database_path(); bool no_create = false; bool is_backend = false; do_open(file, no_create, is_backend, options); // Throws } inline bool SharedGroup::is_attached() const noexcept { return m_file_map.is_attached(); } inline SharedGroup::TransactStage SharedGroup::get_transact_stage() const noexcept { return m_transact_stage; } inline SharedGroup::version_type SharedGroup::get_version_of_bound_snapshot() const noexcept { return m_read_lock.m_version; } class SharedGroup::ReadLockUnlockGuard { public: ReadLockUnlockGuard(SharedGroup& shared_group, ReadLockInfo& read_lock) noexcept : m_shared_group(shared_group) , m_read_lock(&read_lock) { } ~ReadLockUnlockGuard() noexcept { if (m_read_lock) m_shared_group.release_read_lock(*m_read_lock); } void release() noexcept { m_read_lock = 0; } private: SharedGroup& m_shared_group; ReadLockInfo* m_read_lock; }; template <typename T> struct SharedGroup::Handover { std::unique_ptr<typename T::HandoverPatch> patch; std::unique_ptr<T> clone; VersionID version; }; template <typename T> std::unique_ptr<SharedGroup::Handover<T>> SharedGroup::export_for_handover(const T& accessor, ConstSourcePayload mode) { if (m_transact_stage != transact_Reading) throw LogicError(LogicError::wrong_transact_state); std::unique_ptr<Handover<T>> result(new Handover<T>()); // Implementation note: // often, the return value from clone will be T*, BUT it may be ptr to some // base of T instead, so we must cast it to T*. This is always safe, because // no matter the type, clone() will clone the actual accessor instance, and // hence return an instance of the same type. result->clone.reset(dynamic_cast<T*>(accessor.clone_for_handover(result->patch, mode).release())); result->version = get_version_of_current_transaction(); return move(result); } template <typename T> std::unique_ptr<SharedGroup::Handover<BasicRow<T>>> SharedGroup::export_for_handover(const BasicRow<T>& accessor) { if (m_transact_stage != transact_Reading) throw LogicError(LogicError::wrong_transact_state); std::unique_ptr<Handover<BasicRow<T>>> result(new Handover<BasicRow<T>>()); // See implementation note above. result->clone.reset(dynamic_cast<BasicRow<T>*>(accessor.clone_for_handover(result->patch).release())); result->version = get_version_of_current_transaction(); return move(result); } template <typename T> std::unique_ptr<SharedGroup::Handover<T>> SharedGroup::export_for_handover(T& accessor, MutableSourcePayload mode) { if (m_transact_stage != transact_Reading) throw LogicError(LogicError::wrong_transact_state); std::unique_ptr<Handover<T>> result(new Handover<T>()); // see implementation note above. result->clone.reset(dynamic_cast<T*>(accessor.clone_for_handover(result->patch, mode).release())); result->version = get_version_of_current_transaction(); return move(result); } template <typename T> std::unique_ptr<T> SharedGroup::import_from_handover(std::unique_ptr<SharedGroup::Handover<T>> handover) { if (handover->version != get_version_of_current_transaction()) { throw BadVersion(); } std::unique_ptr<T> result = move(handover->clone); result->apply_and_consume_patch(handover->patch, m_group); return result; } template <class O> inline void SharedGroup::advance_read(O* observer, VersionID version_id) { if (m_transact_stage != transact_Reading) throw LogicError(LogicError::wrong_transact_state); // It is an error if the new version precedes the currently bound one. if (version_id.version < m_read_lock.m_version) throw LogicError(LogicError::bad_version); _impl::History* hist = get_history(); // Throws if (!hist) throw LogicError(LogicError::no_history); do_advance_read(observer, version_id, *hist); // Throws } template <class O> inline void SharedGroup::promote_to_write(O* observer) { if (m_transact_stage != transact_Reading) throw LogicError(LogicError::wrong_transact_state); _impl::History* hist = get_history(); // Throws if (!hist) throw LogicError(LogicError::no_history); do_begin_write(); // Throws try { VersionID version = VersionID(); // Latest bool history_updated = do_advance_read(observer, version, *hist); // Throws Replication* repl = m_group.get_replication(); REALM_ASSERT(repl); // Presence of `repl` follows from the presence of `hist` version_type current_version = m_read_lock.m_version; repl->initiate_transact(current_version, history_updated); // Throws // If the group has no top array (top_ref == 0), create a new node // structure for an empty group now, to be ready for modifications. See // also Group::attach_shared(). using gf = _impl::GroupFriend; gf::create_empty_group_when_missing(m_group); // Throws } catch (...) { do_end_write(); throw; } set_transact_stage(transact_Writing); } template <class O> inline void SharedGroup::rollback_and_continue_as_read(O* observer) { if (m_transact_stage != transact_Writing) throw LogicError(LogicError::wrong_transact_state); _impl::History* hist = get_history(); // Throws if (!hist) throw LogicError(LogicError::no_history); // Mark all managed space (beyond the attached file) as free. using gf = _impl::GroupFriend; gf::reset_free_space_tracking(m_group); // Throws BinaryData uncommitted_changes = hist->get_uncommitted_changes(); // FIXME: We are currently creating two transaction log parsers, one here, // and one in advance_transact(). That is wasteful as the parser creation is // expensive. _impl::SimpleInputStream in(uncommitted_changes.data(), uncommitted_changes.size()); _impl::TransactLogParser parser; // Throws _impl::TransactReverser reverser; parser.parse(in, reverser); // Throws if (observer && uncommitted_changes.size()) { _impl::ReversedNoCopyInputStream reversed_in(reverser); parser.parse(reversed_in, *observer); // Throws observer->parse_complete(); // Throws } ref_type top_ref = m_read_lock.m_top_ref; size_t file_size = m_read_lock.m_file_size; _impl::ReversedNoCopyInputStream reversed_in(reverser); gf::advance_transact(m_group, top_ref, file_size, reversed_in); // Throws do_end_write(); Replication* repl = gf::get_replication(m_group); REALM_ASSERT(repl); // Presence of `repl` follows from the presence of `hist` repl->abort_transact(); set_transact_stage(transact_Reading); } template <class O> inline bool SharedGroup::do_advance_read(O* observer, VersionID version_id, _impl::History& hist) { ReadLockInfo new_read_lock; grab_read_lock(new_read_lock, version_id); // Throws REALM_ASSERT(new_read_lock.m_version >= m_read_lock.m_version); if (new_read_lock.m_version == m_read_lock.m_version) { release_read_lock(new_read_lock); // _impl::History::update_early_from_top_ref() was not called return false; } ReadLockUnlockGuard g(*this, new_read_lock); { version_type new_version = new_read_lock.m_version; size_t new_file_size = new_read_lock.m_file_size; ref_type new_top_ref = new_read_lock.m_top_ref; // Synchronize readers view of the file SlabAlloc& alloc = m_group.m_alloc; alloc.update_reader_view(new_file_size); hist.update_early_from_top_ref(new_version, new_file_size, new_top_ref); // Throws } if (observer) { // This has to happen in the context of the originally bound snapshot // and while the read transaction is still in a fully functional state. _impl::TransactLogParser parser; version_type old_version = m_read_lock.m_version; version_type new_version = new_read_lock.m_version; _impl::ChangesetInputStream in(hist, old_version, new_version); parser.parse(in, *observer); // Throws observer->parse_complete(); // Throws } // The old read lock must be retained for as long as the change history is // accessed (until Group::advance_transact() returns). This ensures that the // oldest needed changeset remains in the history, even when the history is // implemented as a separate unversioned entity outside the Realm (i.e., the // old implementation and ShortCircuitHistory in // test_lang_Bind_helper.cpp). On the other hand, if it had been the case, // that the history was always implemented as a versioned entity, that was // part of the Realm state, then it would not have been necessary to retain // the old read lock beyond this point. { version_type old_version = m_read_lock.m_version; version_type new_version = new_read_lock.m_version; ref_type new_top_ref = new_read_lock.m_top_ref; size_t new_file_size = new_read_lock.m_file_size; _impl::ChangesetInputStream in(hist, old_version, new_version); m_group.advance_transact(new_top_ref, new_file_size, in); // Throws } g.release(); release_read_lock(m_read_lock); m_read_lock = new_read_lock; return true; // _impl::History::update_early_from_top_ref() was called } inline _impl::History* SharedGroup::get_history() { using gf = _impl::GroupFriend; if (Replication* repl = gf::get_replication(m_group)) return repl->get_history(); return 0; } inline int SharedGroup::get_file_format_version() const noexcept { using gf = _impl::GroupFriend; return gf::get_file_format_version(m_group); } // The purpose of this class is to give internal access to some, but // not all of the non-public parts of the SharedGroup class. class _impl::SharedGroupFriend { public: static Group& get_group(SharedGroup& sg) noexcept { return sg.m_group; } template <class O> static void advance_read(SharedGroup& sg, O* obs, SharedGroup::VersionID ver) { sg.advance_read(obs, ver); // Throws } template <class O> static void promote_to_write(SharedGroup& sg, O* obs) { sg.promote_to_write(obs); // Throws } static SharedGroup::version_type commit_and_continue_as_read(SharedGroup& sg) { return sg.commit_and_continue_as_read(); // Throws } template <class O> static void rollback_and_continue_as_read(SharedGroup& sg, O* obs) { sg.rollback_and_continue_as_read(obs); // Throws } static void async_daemon_open(SharedGroup& sg, const std::string& file) { bool no_create = true; bool is_backend = true; SharedGroupOptions options; options.durability = SharedGroupOptions::Durability::Async; options.encryption_key = nullptr; options.allow_file_format_upgrade = false; sg.do_open(file, no_create, is_backend, options); // Throws } static int get_file_format_version(const SharedGroup& sg) noexcept { return sg.get_file_format_version(); } static SharedGroup::version_type get_version_of_latest_snapshot(SharedGroup& sg) { return sg.get_version_of_latest_snapshot(); } static SharedGroup::version_type get_version_of_bound_snapshot(const SharedGroup& sg) noexcept { return sg.get_version_of_bound_snapshot(); } }; inline const Group& ReadTransaction::get_group() const noexcept { using sgf = _impl::SharedGroupFriend; return sgf::get_group(m_shared_group); } inline SharedGroup::version_type ReadTransaction::get_version() const noexcept { using sgf = _impl::SharedGroupFriend; return sgf::get_version_of_bound_snapshot(m_shared_group); } inline Group& WriteTransaction::get_group() const noexcept { REALM_ASSERT(m_shared_group); using sgf = _impl::SharedGroupFriend; return sgf::get_group(*m_shared_group); } inline SharedGroup::version_type WriteTransaction::get_version() const noexcept { using sgf = _impl::SharedGroupFriend; return sgf::get_version_of_bound_snapshot(*m_shared_group); } } // namespace realm #endif // REALM_GROUP_SHARED_HPP
15,189
16,461
<reponame>zakharchenkoAndrii/expo /* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #pragma once #include <ABI43_0_0React/ABI43_0_0renderer/components/view/ViewEventEmitter.h> namespace ABI43_0_0facebook { namespace ABI43_0_0React { class PickerEventEmitter : public ViewEventEmitter { public: using ViewEventEmitter::ViewEventEmitter; struct PickerIOSChangeEvent { std::string newValue; int newIndex; }; void onChange(PickerIOSChangeEvent event) const; }; } // namespace ABI43_0_0React } // namespace ABI43_0_0facebook
240
5,169
<gh_stars>1000+ { "name": "SwiftJokes", "version": "1.0.0", "summary": "An easy-to-use and hilarious joke provider.", "description": "A simple Cocoa Touch framework to asynchronously provide jokes from various sources.", "homepage": "https://github.com/Davidde94/SwiftJokes", "license": "MIT", "authors": { "<NAME>": "<EMAIL>" }, "platforms": { "ios": "12.0" }, "swift_versions": "5", "source": { "git": "https://github.com/Davidde94/SwiftJokes.git", "tag": "1.0.0" }, "source_files": "SwiftJokes/**/*.{swift}", "swift_version": "5" }
241
1,056
<filename>ide/xml.tax/lib/src/org/netbeans/tax/TreeObject.java<gh_stars>1000+ /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.netbeans.tax; import java.io.PrintStream; import java.beans.PropertyChangeListener; import org.netbeans.tax.event.TreeEventManager; import org.netbeans.tax.event.TreeEventModel; import org.netbeans.tax.event.TreeEvent; import org.netbeans.tax.event.TreeEventChangeSupport; /** * Tree objects base class with support for firing <b>events</b> and <b>merging</b>. * <p> * It also prescribes that each subclass MUST have <b>copy constuctor</b> * calling its superclass copy constructor. The copy constructor MUST be then called * during <b>cloning</b>. * <p> * All TreeObject subclasses should not have public contructors and therefore * should be created just by factory methods. * <p> * Pending: validation on request, invalidation * * @author <NAME> * @author <NAME> * @version 0.1 */ public abstract class TreeObject implements TreeEventModel { /** */ public static final String PROP_READ_ONLY = "readOnly"; // NOI18N /** */ private boolean readOnly; /** */ transient private TreeEventChangeSupport eventChangeSupport; // // init // /** Creates new TreeObject. */ protected TreeObject () { this.readOnly = false; this.eventChangeSupport = null; } /** * Creates new TreeObject - copy constructor. * (it does not copy eventChangeSupport) */ protected TreeObject (TreeObject object) { this.readOnly = object.readOnly; this.eventChangeSupport = null; } // // clone // /** * Cloning must use copy constructors! */ public abstract Object clone (); // // util // /** */ protected final boolean isInstance (Object object) { return ( this.getClass ().isInstance (object) ); } // // context // /** */ abstract public boolean isInContext (); /** */ abstract public void removeFromContext () throws ReadOnlyException; // // equals // /** */ public /*final*/ boolean equals (Object object) { return super.equals (object); // when TreeObjectList will compare inserted object by 'instance' instead of 'equals' we should final this method and use this impl: // return equals (object, true); } /** */ public boolean equals (Object object, boolean deep) { if (!!! isInstance (object)) return false; TreeObject peer = (TreeObject) object; return (this.readOnly == peer.readOnly); } // // merge // /** * <p>Update algorithm pattern that <b>reuses original tree instances</b>: * <pre> * // 1. optimalization * if (this == treeObject) return; * * // 2. can merge just my instances (so no cross implemetation merge allowed) * if (getClass().isAssignablFrom(treeObject.getClass())) throw CannotMergeException; * * // 3. let superclass do its merge * super.merge(treeObject); * * // 4. cast to myself (see step 2) * {getClass()} peer = ({getClass()}) treeObject; * * // 5. merge all fields at THIS CLASS HIEARCHY LEVEL but * // fields that references object "parents" * // use setters that just fires property changes, i.e. such that never fails * // due to read-only or other constrains checks * * foreach field in suitableClassFields * if field is simple * set{field}Impl( peer.get{field}() ) * if field is collection or TreeObject * {field}.merge(peer.{field}) * next field * * </pre> * @param treeobject merge peer * @throws CannotMergeException if can not merge with given node (invalid class) */ public void merge (TreeObject treeObject) throws CannotMergeException { if (treeObject == this) return; checkMergeObject (treeObject); TreeObject peer = treeObject; setReadOnly (peer.isReadOnly ()); } /** */ protected final void checkMergeObject (TreeObject treeObject) throws CannotMergeException { if ( Util.THIS.isLoggable() ) /* then */ Util.THIS.debug ("TreeObject::checkMergeObject: this = " + this); // NOI18N if ( Util.THIS.isLoggable() ) /* then */ Util.THIS.debug (" ::checkMergeObject: treeObject = " + treeObject); // NOI18N if ( Util.THIS.isLoggable() ) /* then */ Util.THIS.debug (" checkMergeObject: isSameClass ? " + isInstance (treeObject)); // NOI18N if ( (treeObject == null) || (!!! isInstance (treeObject)) ) { throw new CannotMergeException (treeObject); } } // // read only // /** */ public final boolean isReadOnly () { return readOnly; } /** */ protected void setReadOnly (boolean newReadOnly) { if (readOnly == newReadOnly) return; boolean oldReadOnly = this.readOnly; this.readOnly = newReadOnly; firePropertyChange (getEventChangeSupport ().createEvent (PROP_READ_ONLY, oldReadOnly ? Boolean.TRUE : Boolean.FALSE, newReadOnly ? Boolean.TRUE : Boolean.FALSE)); } /** */ protected final void checkReadOnly () throws ReadOnlyException { if (readOnly == true) { throw new ReadOnlyException (this); } } // // event model // /** * @return support that delegates to TreeEventManager */ protected final TreeEventChangeSupport getEventChangeSupport () { if (eventChangeSupport == null) { eventChangeSupport = new TreeEventChangeSupport (this); } return eventChangeSupport; } /** * Get assigned event manager. * Whole document should have only one and same EventManager. When there is not * available manager, it returns null. * * @return assigned event manager (may be null). */ public abstract TreeEventManager getEventManager (); /** */ // protected final void addEventManagerChangeListener (PropertyChangeListener listener) { // getEventChangeSupport().addPropertyChangeListener (PROP_EVENT_MANAGER, listener); // } /** */ // protected final void removeEventManagerChangeListener (PropertyChangeListener listener) { // getEventChangeSupport().removePropertyChangeListener (PROP_EVENT_MANAGER, listener); // } /** */ public final void addReadonlyChangeListener (PropertyChangeListener listener) { getEventChangeSupport ().addPropertyChangeListener (PROP_READ_ONLY, listener); } /** */ public final void removeReadonlyChangeListener (PropertyChangeListener listener) { getEventChangeSupport ().removePropertyChangeListener (PROP_READ_ONLY, listener); } /** * Add a PropertyChangeListener to the listener list. * @param listener The listener to add. */ public final void addPropertyChangeListener (PropertyChangeListener listener) { if ( Util.THIS.isLoggable() ) /* then */ Util.THIS.debug ("Tree " + this + "attached listener" + listener); // NOI18N getEventChangeSupport ().addPropertyChangeListener (listener); } /** * Removes a PropertyChangeListener from the listener list. * @param listener The listener to remove. */ public final void removePropertyChangeListener (PropertyChangeListener listener) { getEventChangeSupport ().removePropertyChangeListener (listener); } /** * Fire an existing TreeEvent to any registered listeners. * No event is fired if the given event's old and new values are * equal and non-null. * @param evt The TreeEvent object. */ protected final void firePropertyChange (TreeEvent evt) { if ( Util.THIS.isLoggable() ) /* then */ Util.THIS.debug ("TreeObject firing " + evt); // NOI18N getEventChangeSupport ().firePropertyChange (evt); bubblePropertyChange (evt); } /** Add a PropertyChangeListener for a specific property to the listener list. * @param propertyname Name of the property to listen on. * @param listener The listener to add. */ public final void addPropertyChangeListener (String propertyName, PropertyChangeListener listener) { getEventChangeSupport ().addPropertyChangeListener (propertyName, listener); } /** Removes a PropertyChangeListener for a specific property from the listener list. * @param propertyname Name of the property that was listened on. * @param listener The listener to remove. */ public final void removePropertyChangeListener (String propertyName, PropertyChangeListener listener) { getEventChangeSupport ().removePropertyChangeListener (propertyName, listener); } /** * Check if there are any listeners for a specific property. * * @param propertyName the property name. * @return true if there are ore or more listeners for the given property */ public final boolean hasPropertyChangeListeners (String propertyName) { return getEventChangeSupport ().hasPropertyChangeListeners (propertyName); } /** * Report a bound property update to any registered listeners. * No event is fired if old and new are equal and non-null. * * @param propertyName The programmatic name of the property that was changed. * @param oldValue The old value of the property. * @param newValue The new value of the property. */ protected final void firePropertyChange (String propertyName, Object oldValue, Object newValue) { firePropertyChange (getEventChangeSupport ().createEvent (propertyName, oldValue, newValue)); } /** * Propagate event to parents' listeners. */ protected final void bubblePropertyChange (TreeEvent origEvt) { if ( Util.THIS.isLoggable() ) /* then */ Util.THIS.debug ("\nTreeObject [ " + this + " ]::bubblePropertyChange: origEvt = " + origEvt.getPropertyName ()); // NOI18N TreeObject source = (TreeObject)origEvt.getSource (); if ( source instanceof TreeAttribute ) { TreeAttribute attr = (TreeAttribute)source; TreeElement ownElem = attr.getOwnerElement (); if ( ownElem != null ) { ownElem.firePropertyChange (TreeElement.PROP_ATTRIBUTES, attr, null); } } else if ( source instanceof TreeChild ) { while ( source != null ) { TreeChild child = (TreeChild)source; TreeParentNode parent = child.getParentNode (); if ( Util.THIS.isLoggable() ) /* then */ Util.THIS.debug (" ::bubblePropertyChange::parentNode = " + parent); // NOI18N if ( parent != null ) { parent.getEventChangeSupport ().firePropertyChange (origEvt.createBubbling (parent)); } source = parent; } } } // // debug // /** * For debugging purposes. */ public final String listListeners () { return getEventChangeSupport ().listListeners (); } }
4,765
6,289
<gh_stars>1000+ package com.wix.invoke.types; import org.apache.commons.lang3.reflect.MethodUtils; import java.lang.reflect.InvocationTargetException; /** * Created by rotemm on 20/10/2016. */ public class ObjectInstanceTarget extends Target { public ObjectInstanceTarget(Object value) { super(value); } @Override public Object execute(Invocation invocation) throws NoSuchMethodException, IllegalAccessException, InvocationTargetException { return MethodUtils.invokeExactMethod(this.value, invocation.getMethod(), invocation.getArgs()); } }
184
190,993
<filename>tensorflow/compiler/xla/pjrt/distributed/client.h /* Copyright 2020 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_COMPILER_XLA_PJRT_DISTRIBUTED_CLIENT_H_ #define TENSORFLOW_COMPILER_XLA_PJRT_DISTRIBUTED_CLIENT_H_ #include <memory> #include "absl/synchronization/mutex.h" #include "absl/synchronization/notification.h" #include "absl/time/time.h" #include "grpcpp/channel.h" #include "tensorflow/compiler/xla/pjrt/distributed/protocol.grpc.pb.h" #include "tensorflow/compiler/xla/statusor.h" #include "tensorflow/compiler/xla/types.h" #include "tensorflow/core/platform/env.h" namespace xla { class DistributedRuntimeClient { public: struct Options { // This node's global ID. Required. int32 node_id = -1; // Environment used for starting threads. tensorflow::Env* env = tensorflow::Env::Default(); // RPC timeout used for RPC that don't have their own timeouts. absl::Duration rpc_timeout = absl::Seconds(120); // Time period for which Connect() should be retried. The client will keep // trying to open the initial connection for this period, even if any // individual Connect() RPC fails. May be zero, in which case Connect() will // only be attempted once. absl::Duration init_timeout = absl::ZeroDuration(); // How long to wait for all nodes to call Shutdown(). If the timeout // expires, then shutdown() reports an error and returns control. absl::Duration shutdown_timeout = absl::Seconds(60); // Interval at which the client should send heartbeat RPCs to the // coordinator. absl::Duration heartbeat_interval = absl::Seconds(10); // How many failed heartbeat RPCs may fail due to a possibly-ephemeral // reason before we decide the coordinator has vanished and that we should // shut down. int max_missing_heartbeats = 10; // Callback invoked by the client when notification of a missing heartbeat // is reported by the coordinator, or we have not heard from the coordinator // recently. `coordinator_reported_failure` is true in the former case. // Exposed so tests can override this behavior to something non-fatal. std::function<void(xla::Status, bool coordinator_reported_failure)> missed_heartbeat_callback = [](xla::Status status, bool coordinator_reported_failure) { if (coordinator_reported_failure) { LOG(QFATAL) << "Terminating process because the coordinator detected " "missing heartbeats. This most likely indicates that " "another task died; see the other task logs for more " "details. Status: " << status; } else { LOG(QFATAL) << "Terminating process because of missing heartbeat " "response from the coordinator. This most likely " "indicates that the coordinator task died; see the " "coordinator's task logs for more details. Status: " << status; } }; // For testing. Should the client explicitly Shutdown() on destruction? bool shutdown_on_destruction = true; }; DistributedRuntimeClient(std::shared_ptr<::grpc::Channel> channel, const Options& options); explicit DistributedRuntimeClient(std::shared_ptr<::grpc::Channel> channel) : DistributedRuntimeClient(channel, Options()) {} ~DistributedRuntimeClient(); // Connects to the master, and blocks until all clients have successfully // connected. // Not thread-safe, i.e., calls to Connect()/Shutdown()/EnumerateDevices() // must be serialized by some other means. xla::Status Connect(); // Reports to the master that the client is ready to shutdown, and blocks // until all clients are ready to shutdown or the shutdown timeout expires. // Not thread-safe. xla::Status Shutdown(); // Blocking enumeration of global devices. Used by the GPU platform. // Not thread-safe. xla::Status EnumerateDevices(const LocalTopologyProto& local_topology, GlobalTopologyProto* global_topology); // The following APIs are thread-safe. xla::StatusOr<std::string> BlockingKeyValueGet(std::string key, absl::Duration timeout); xla::Status KeyValueSet(std::string key, std::string value); private: // Entry point for the heartbeat thread. void HeartbeatLoop(); const std::unique_ptr<grpc::DistributedRuntimeService::Stub> stub_; const Options options_; // Possible states of the client. // The only legal transitions are downwards in the order below. i.e., there is // no way to reopen a closed client. enum class State { // The client has not yet connected to the server, i.e., had a Connect() // RPC succeed. kNotConnected, // The client is connected to the server and as far as we are aware the // connection is healthy. kConnected, // The client is in the process of shutting down, i.e., Shutdown() has been // called. kShuttingDown, // The client has shut down its server connection, either due to an error // or due to an explicit shutdown. kClosed, }; static absl::string_view StateToString(State state); // state_ is protected by a mutex because the heartbeat thread needs to look // at it. absl::Mutex mu_; State state_ ABSL_GUARDED_BY(mu_) = State::kNotConnected; // A unique session ID, assigned by the server during Connect(). uint64 session_id_; // Notification that tells the heartbeat thread to stop running. absl::Notification stop_heartbeats_; // Thread responsible for performing heartbeats. std::unique_ptr<tensorflow::Thread> heartbeat_thread_; }; } // namespace xla #endif // TENSORFLOW_COMPILER_XLA_PJRT_DISTRIBUTED_CLIENT_H_
2,235
4,213
<gh_stars>1000+ from pathlib import Path from parso.tree import search_ancestor from jedi.inference.cache import inference_state_method_cache from jedi.inference.imports import load_module_from_path from jedi.inference.filters import ParserTreeFilter from jedi.inference.base_value import NO_VALUES, ValueSet from jedi.inference.helpers import infer_call_of_leaf _PYTEST_FIXTURE_MODULES = [ ('_pytest', 'monkeypatch'), ('_pytest', 'capture'), ('_pytest', 'logging'), ('_pytest', 'tmpdir'), ('_pytest', 'pytester'), ] def execute(callback): def wrapper(value, arguments): # This might not be necessary anymore in pytest 4/5, definitely needed # for pytest 3. if value.py__name__() == 'fixture' \ and value.parent_context.py__name__() == '_pytest.fixtures': return NO_VALUES return callback(value, arguments) return wrapper def infer_anonymous_param(func): def get_returns(value): if value.tree_node.annotation is not None: result = value.execute_with_values() if any(v.name.get_qualified_names(include_module_names=True) == ('typing', 'Generator') for v in result): return ValueSet.from_sets( v.py__getattribute__('__next__').execute_annotation() for v in result ) return result # In pytest we need to differentiate between generators and normal # returns. # Parameters still need to be anonymous, .as_context() ensures that. function_context = value.as_context() if function_context.is_generator(): return function_context.merge_yield_values() else: return function_context.get_return_values() def wrapper(param_name): # parameters with an annotation do not need special handling if param_name.annotation_node: return func(param_name) is_pytest_param, param_name_is_function_name = \ _is_a_pytest_param_and_inherited(param_name) if is_pytest_param: module = param_name.get_root_context() fixtures = _goto_pytest_fixture( module, param_name.string_name, # This skips the current module, because we are basically # inheriting a fixture from somewhere else. skip_own_module=param_name_is_function_name, ) if fixtures: return ValueSet.from_sets( get_returns(value) for fixture in fixtures for value in fixture.infer() ) return func(param_name) return wrapper def goto_anonymous_param(func): def wrapper(param_name): is_pytest_param, param_name_is_function_name = \ _is_a_pytest_param_and_inherited(param_name) if is_pytest_param: names = _goto_pytest_fixture( param_name.get_root_context(), param_name.string_name, skip_own_module=param_name_is_function_name, ) if names: return names return func(param_name) return wrapper def complete_param_names(func): def wrapper(context, func_name, decorator_nodes): module_context = context.get_root_context() if _is_pytest_func(func_name, decorator_nodes): names = [] for module_context in _iter_pytest_modules(module_context): names += FixtureFilter(module_context).values() if names: return names return func(context, func_name, decorator_nodes) return wrapper def _goto_pytest_fixture(module_context, name, skip_own_module): for module_context in _iter_pytest_modules(module_context, skip_own_module=skip_own_module): names = FixtureFilter(module_context).get(name) if names: return names def _is_a_pytest_param_and_inherited(param_name): """ Pytest params are either in a `test_*` function or have a pytest fixture with the decorator @pytest.fixture. This is a heuristic and will work in most cases. """ funcdef = search_ancestor(param_name.tree_name, 'funcdef') if funcdef is None: # A lambda return False, False decorators = funcdef.get_decorators() return _is_pytest_func(funcdef.name.value, decorators), \ funcdef.name.value == param_name.string_name def _is_pytest_func(func_name, decorator_nodes): return func_name.startswith('test') \ or any('fixture' in n.get_code() for n in decorator_nodes) @inference_state_method_cache() def _iter_pytest_modules(module_context, skip_own_module=False): if not skip_own_module: yield module_context file_io = module_context.get_value().file_io if file_io is not None: folder = file_io.get_parent_folder() sys_path = module_context.inference_state.get_sys_path() # prevent an infinite loop when reaching the root of the current drive last_folder = None while any(folder.path.startswith(p) for p in sys_path): file_io = folder.get_file_io('conftest.py') if Path(file_io.path) != module_context.py__file__(): try: m = load_module_from_path(module_context.inference_state, file_io) yield m.as_context() except FileNotFoundError: pass folder = folder.get_parent_folder() # prevent an infinite for loop if the same parent folder is return twice if last_folder is not None and folder.path == last_folder.path: break last_folder = folder # keep track of the last found parent name for names in _PYTEST_FIXTURE_MODULES: for module_value in module_context.inference_state.import_module(names): yield module_value.as_context() class FixtureFilter(ParserTreeFilter): def _filter(self, names): for name in super()._filter(names): funcdef = name.parent # Class fixtures are not supported if funcdef.type == 'funcdef': decorated = funcdef.parent if decorated.type == 'decorated' and self._is_fixture(decorated): yield name def _is_fixture(self, decorated): decorators = decorated.children[0] if decorators.type == 'decorators': decorators = decorators.children else: decorators = [decorators] for decorator in decorators: dotted_name = decorator.children[1] # A heuristic, this makes it faster. if 'fixture' in dotted_name.get_code(): if dotted_name.type == 'atom_expr': # Since Python3.9 a decorator does not have dotted names # anymore. last_trailer = dotted_name.children[-1] last_leaf = last_trailer.get_last_leaf() if last_leaf == ')': values = infer_call_of_leaf( self.parent_context, last_leaf, cut_own_trailer=True) else: values = self.parent_context.infer_node(dotted_name) else: values = self.parent_context.infer_node(dotted_name) for value in values: if value.name.get_qualified_names(include_module_names=True) \ == ('_pytest', 'fixtures', 'fixture'): return True return False
3,578
456
// SPDX-License-Identifier: BSD-3-Clause // Copyright (c) 2019-2020 <NAME> // All rights reserved. #include <djvScene3D/IPrimitive.h> #include <djvCore/UID.h> using namespace djv::Core; namespace djv { namespace Scene3D { std::vector<std::shared_ptr<Geom::TriangleMesh> > IPrimitive::_meshesDummy; std::vector<std::shared_ptr<Geom::PointList> > IPrimitive::_polyLinesDummy; std::shared_ptr<Geom::PointList> IPrimitive::_pointListDummy; IPrimitive::IPrimitive() : _uid(Core::createUID()) {} IPrimitive::~IPrimitive() {} void IPrimitive::addChild(const std::shared_ptr<IPrimitive>& value) { if (auto prevParent = value->getParent().lock()) { prevParent->removeChild(value); } value->_parent = std::dynamic_pointer_cast<IPrimitive>(shared_from_this()); _children.push_back(value); } void IPrimitive::removeChild(const std::shared_ptr<IPrimitive>& value) { const auto i = std::find(_children.begin(), _children.end(), value); if (i != _children.end()) { (*i)->_parent.reset(); _children.erase(i); } } void IPrimitive::clearChildren() { for (auto& i : _children) { i->_parent.reset(); } _children.clear(); } } // namespace Scene3D } // namespace djv
770
18,012
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dubbo.rpc.model; import org.apache.dubbo.common.config.Environment; import org.apache.dubbo.common.context.ApplicationExt; import org.apache.dubbo.common.deploy.ApplicationDeployer; import org.apache.dubbo.common.extension.ExtensionLoader; import org.apache.dubbo.common.extension.ExtensionScope; import org.apache.dubbo.common.logger.Logger; import org.apache.dubbo.common.logger.LoggerFactory; import org.apache.dubbo.common.threadpool.manager.ExecutorRepository; import org.apache.dubbo.common.utils.Assert; import org.apache.dubbo.config.ApplicationConfig; import org.apache.dubbo.config.context.ConfigManager; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Optional; import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.atomic.AtomicInteger; /** * {@link ExtensionLoader}, {@code DubboBootstrap} and this class are at present designed to be * singleton or static (by itself totally static or uses some static fields). So the instances * returned from them are of process scope. If you want to support multiple dubbo servers in one * single process, you may need to refactor those three classes. * <p> * Represent a application which is using Dubbo and store basic metadata info for using * during the processing of RPC invoking. * <p> * ApplicationModel includes many ProviderModel which is about published services * and many Consumer Model which is about subscribed services. * <p> */ public class ApplicationModel extends ScopeModel { protected static final Logger LOGGER = LoggerFactory.getLogger(ApplicationModel.class); public static final String NAME = "ApplicationModel"; private final List<ModuleModel> moduleModels = new CopyOnWriteArrayList<>(); private final List<ModuleModel> pubModuleModels = new CopyOnWriteArrayList<>(); private Environment environment; private ConfigManager configManager; private ServiceRepository serviceRepository; private ApplicationDeployer deployer; private final FrameworkModel frameworkModel; private ModuleModel internalModule; private volatile ModuleModel defaultModule; // internal module index is 0, default module index is 1 private AtomicInteger moduleIndex = new AtomicInteger(0); private Object moduleLock = new Object(); // --------- static methods ----------// public static ApplicationModel ofNullable(ApplicationModel applicationModel) { if (applicationModel != null) { return applicationModel; } else { return defaultModel(); } } /** * During destroying the default FrameworkModel, the FrameworkModel.defaultModel() or ApplicationModel.defaultModel() * will return a broken model, maybe cause unpredictable problem. * Recommendation: Avoid using the default model as much as possible. * @return the global default ApplicationModel */ public static ApplicationModel defaultModel() { // should get from default FrameworkModel, avoid out of sync return FrameworkModel.defaultModel().defaultApplication(); } /** * @deprecated use {@link ServiceRepository#allConsumerModels()} */ @Deprecated public static Collection<ConsumerModel> allConsumerModels() { return defaultModel().getApplicationServiceRepository().allConsumerModels(); } /** * @deprecated use {@link ServiceRepository#allProviderModels()} */ @Deprecated public static Collection<ProviderModel> allProviderModels() { return defaultModel().getApplicationServiceRepository().allProviderModels(); } /** * @deprecated use {@link FrameworkServiceRepository#lookupExportedService(String)} */ @Deprecated public static ProviderModel getProviderModel(String serviceKey) { return defaultModel().getDefaultModule().getServiceRepository().lookupExportedService(serviceKey); } /** * @deprecated ConsumerModel should fetch from context */ @Deprecated public static ConsumerModel getConsumerModel(String serviceKey) { return defaultModel().getDefaultModule().getServiceRepository().lookupReferredService(serviceKey); } /** * @deprecated Replace to {@link ScopeModel#getModelEnvironment()} */ @Deprecated public static Environment getEnvironment() { return defaultModel().getModelEnvironment(); } /** * @deprecated Replace to {@link ApplicationModel#getApplicationConfigManager()} */ @Deprecated public static ConfigManager getConfigManager() { return defaultModel().getApplicationConfigManager(); } /** * @deprecated Replace to {@link ApplicationModel#getApplicationServiceRepository()} */ @Deprecated public static ServiceRepository getServiceRepository() { return defaultModel().getApplicationServiceRepository(); } /** * @deprecated Replace to {@link ApplicationModel#getApplicationExecutorRepository()} */ @Deprecated public static ExecutorRepository getExecutorRepository() { return defaultModel().getApplicationExecutorRepository(); } /** * @deprecated Replace to {@link ApplicationModel#getCurrentConfig()} */ @Deprecated public static ApplicationConfig getApplicationConfig() { return defaultModel().getCurrentConfig(); } /** * @deprecated Replace to {@link ApplicationModel#getApplicationName()} */ @Deprecated public static String getName() { return defaultModel().getCurrentConfig().getName(); } /** * @deprecated Replace to {@link ApplicationModel#getApplicationName()} */ @Deprecated public static String getApplication() { return getName(); } // only for unit test @Deprecated public static void reset() { if (FrameworkModel.defaultModel().getDefaultAppModel() != null) { FrameworkModel.defaultModel().getDefaultAppModel().destroy(); } } // ------------- instance methods ---------------// public ApplicationModel(FrameworkModel frameworkModel) { this(frameworkModel, false); } public ApplicationModel(FrameworkModel frameworkModel, boolean isInternal) { super(frameworkModel, ExtensionScope.APPLICATION, isInternal); Assert.notNull(frameworkModel, "FrameworkModel can not be null"); this.frameworkModel = frameworkModel; frameworkModel.addApplication(this); if (LOGGER.isInfoEnabled()) { LOGGER.info(getDesc() + " is created"); } initialize(); } @Override protected void initialize() { super.initialize(); internalModule = new ModuleModel(this, true); this.serviceRepository = new ServiceRepository(this); ExtensionLoader<ApplicationInitListener> extensionLoader = this.getExtensionLoader(ApplicationInitListener.class); Set<String> listenerNames = extensionLoader.getSupportedExtensions(); for (String listenerName : listenerNames) { extensionLoader.getExtension(listenerName).init(); } initApplicationExts(); ExtensionLoader<ScopeModelInitializer> initializerExtensionLoader = this.getExtensionLoader(ScopeModelInitializer.class); Set<ScopeModelInitializer> initializers = initializerExtensionLoader.getSupportedExtensionInstances(); for (ScopeModelInitializer initializer : initializers) { initializer.initializeApplicationModel(this); } } private void initApplicationExts() { Set<ApplicationExt> exts = this.getExtensionLoader(ApplicationExt.class).getSupportedExtensionInstances(); for (ApplicationExt ext : exts) { ext.initialize(); } } @Override protected void onDestroy() { // 1. remove from frameworkModel frameworkModel.removeApplication(this); // 2. pre-destroy, set stopping if (deployer != null) { // destroy registries and unregister services from registries first to notify consumers to stop consuming this instance. deployer.preDestroy(); } // 3. Try to destroy protocols to stop this instance from receiving new requests from connections frameworkModel.tryDestroyProtocols(); // 4. destroy application resources for (ModuleModel moduleModel : new ArrayList<>(moduleModels)) { if (moduleModel != internalModule) { moduleModel.destroy(); } } // 5. destroy internal module later internalModule.destroy(); // 6. post-destroy, release registry resources if (deployer != null) { deployer.postDestroy(); } // 7. destroy other resources (e.g. ZookeeperTransporter ) notifyDestroy(); if (environment != null) { environment.destroy(); environment = null; } if (configManager != null) { configManager.destroy(); configManager = null; } if (serviceRepository != null) { serviceRepository.destroy(); serviceRepository = null; } // 8. destroy framework if none application frameworkModel.tryDestroy(); } public FrameworkModel getFrameworkModel() { return frameworkModel; } public ModuleModel newModule() { return new ModuleModel(this); } @Override public Environment getModelEnvironment() { if (environment == null) { environment = (Environment) this.getExtensionLoader(ApplicationExt.class) .getExtension(Environment.NAME); } return environment; } public ConfigManager getApplicationConfigManager() { if (configManager == null) { configManager = (ConfigManager) this.getExtensionLoader(ApplicationExt.class) .getExtension(ConfigManager.NAME); } return configManager; } public ServiceRepository getApplicationServiceRepository() { return serviceRepository; } public ExecutorRepository getApplicationExecutorRepository() { return this.getExtensionLoader(ExecutorRepository.class).getDefaultExtension(); } public ApplicationConfig getCurrentConfig() { return getApplicationConfigManager().getApplicationOrElseThrow(); } public String getApplicationName() { return getCurrentConfig().getName(); } public String tryGetApplicationName() { Optional<ApplicationConfig> appCfgOptional = getApplicationConfigManager().getApplication(); return appCfgOptional.isPresent() ? appCfgOptional.get().getName() : null; } void addModule(ModuleModel moduleModel, boolean isInternal) { synchronized (moduleLock) { if (!this.moduleModels.contains(moduleModel)) { checkDestroyed(); this.moduleModels.add(moduleModel); moduleModel.setInternalId(buildInternalId(getInternalId(), moduleIndex.getAndIncrement())); if (!isInternal) { pubModuleModels.add(moduleModel); } } } } public void removeModule(ModuleModel moduleModel) { synchronized (moduleLock) { this.moduleModels.remove(moduleModel); this.pubModuleModels.remove(moduleModel); if (moduleModel == defaultModule) { defaultModule = findDefaultModule(); } } } void tryDestroy() { if (this.moduleModels.isEmpty() || (this.moduleModels.size() == 1 && this.moduleModels.get(0) == internalModule)) { destroy(); } } private void checkDestroyed() { if (isDestroyed()) { throw new IllegalStateException("ApplicationModel is destroyed"); } } public List<ModuleModel> getModuleModels() { return Collections.unmodifiableList(moduleModels); } public List<ModuleModel> getPubModuleModels() { return Collections.unmodifiableList(pubModuleModels); } public ModuleModel getDefaultModule() { if (defaultModule == null) { synchronized (moduleLock) { if (defaultModule == null) { defaultModule = findDefaultModule(); if (defaultModule == null) { defaultModule = this.newModule(); } } } } return defaultModule; } private ModuleModel findDefaultModule() { for (ModuleModel moduleModel : moduleModels) { if (moduleModel != internalModule) { return moduleModel; } } return null; } public ModuleModel getInternalModule() { return internalModule; } /** * @deprecated only for ut */ @Deprecated public void setEnvironment(Environment environment) { this.environment = environment; } /** * @deprecated only for ut */ @Deprecated public void setConfigManager(ConfigManager configManager) { this.configManager = configManager; } /** * @deprecated only for ut */ @Deprecated public void setServiceRepository(ServiceRepository serviceRepository) { this.serviceRepository = serviceRepository; } @Override public void addClassLoader(ClassLoader classLoader) { super.addClassLoader(classLoader); if (environment != null) { environment.refreshClassLoaders(); } } @Override public void removeClassLoader(ClassLoader classLoader) { super.removeClassLoader(classLoader); if (environment != null) { environment.refreshClassLoaders(); } } @Override protected boolean checkIfClassLoaderCanRemoved(ClassLoader classLoader) { return super.checkIfClassLoaderCanRemoved(classLoader) && !containsClassLoader(classLoader); } protected boolean containsClassLoader(ClassLoader classLoader) { return moduleModels.stream().anyMatch(moduleModel -> moduleModel.getClassLoaders().contains(classLoader)); } public ApplicationDeployer getDeployer() { return deployer; } public void setDeployer(ApplicationDeployer deployer) { this.deployer = deployer; } }
5,397
5,607
package io.micronaut.docs.http.server.bind.type; import io.micronaut.http.HttpResponse; import io.micronaut.http.annotation.Controller; import io.micronaut.http.annotation.Get; import java.util.HashMap; import java.util.Map; @Controller("/customBinding") public class ShoppingCartController { // tag::method[] @Get("/typed") public HttpResponse<?> loadCart(ShoppingCart shoppingCart) { //<1> Map<String, Object> responseMap = new HashMap<>(); responseMap.put("sessionId", shoppingCart.getSessionId()); responseMap.put("total", shoppingCart.getTotal()); return HttpResponse.ok(responseMap); } // end::method[] }
242
305
<reponame>AgesX/llvm // RUN: %clang_cc1 -fsyntax-only -verify -triple x86_64-linux-pc %s // RUN: %clang_cc1 -fsyntax-only -verify -triple spir-unknown-unknown %s -DHAVE // RUN: %clang_cc1 -fsyntax-only -verify -triple armv7a-linux-gnu %s -DHAVE // RUN: %clang_cc1 -fsyntax-only -verify -triple aarch64-linux-gnu %s -DHAVE #ifndef HAVE // expected-error@+2{{_Float16 is not supported on this target}} #endif // HAVE _Float16 f; #ifdef HAVE // FIXME: Should this be valid? _Complex _Float16 a; // expected-error {{'_Complex _Float16' is invalid}} void builtin_complex() { _Float16 a = 0; (void)__builtin_complex(a, a); // expected-error {{'_Complex _Float16' is invalid}} } #endif
279
6,989
# -*- coding: utf-8 -*- """ pygments.lexers._csound_builtins ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ # Opcodes in Csound 6.13.0 using: # python3 -c " # import re # from subprocess import Popen, PIPE # output = Popen(['csound', '--list-opcodes0'], stderr=PIPE, text=True).communicate()[1] # opcodes = output[re.search(r'^\$', output, re.M).end() : re.search(r'^\d+ opcodes\$', output, re.M).start()].split() # output = Popen(['csound', '--list-opcodes2'], stderr=PIPE, text=True).communicate()[1] # all_opcodes = output[re.search(r'^\$', output, re.M).end() : re.search(r'^\d+ opcodes\$', output, re.M).start()].split() # deprecated_opcodes = [opcode for opcode in all_opcodes if opcode not in opcodes] # # Remove opcodes that csound.py treats as keywords. # keyword_opcodes = [ # 'cggoto', # https://csound.com/docs/manual/cggoto.html # 'cigoto', # https://csound.com/docs/manual/cigoto.html # 'cingoto', # (undocumented) # 'ckgoto', # https://csound.com/docs/manual/ckgoto.html # 'cngoto', # https://csound.com/docs/manual/cngoto.html # 'cnkgoto', # (undocumented) # 'endin', # https://csound.com/docs/manual/endin.html # 'endop', # https://csound.com/docs/manual/endop.html # 'goto', # https://csound.com/docs/manual/goto.html # 'igoto', # https://csound.com/docs/manual/igoto.html # 'instr', # https://csound.com/docs/manual/instr.html # 'kgoto', # https://csound.com/docs/manual/kgoto.html # 'loop_ge', # https://csound.com/docs/manual/loop_ge.html # 'loop_gt', # https://csound.com/docs/manual/loop_gt.html # 'loop_le', # https://csound.com/docs/manual/loop_le.html # 'loop_lt', # https://csound.com/docs/manual/loop_lt.html # 'opcode', # https://csound.com/docs/manual/opcode.html # 'reinit', # https://csound.com/docs/manual/reinit.html # 'return', # https://csound.com/docs/manual/return.html # 'rireturn', # https://csound.com/docs/manual/rireturn.html # 'rigoto', # https://csound.com/docs/manual/rigoto.html # 'tigoto', # https://csound.com/docs/manual/tigoto.html # 'timout' # https://csound.com/docs/manual/timout.html # ] # opcodes = [opcode for opcode in opcodes if opcode not in keyword_opcodes] # newline = '\n' # print(f'''OPCODES = set(\''' # {newline.join(opcodes)} # \'''.split()) # # DEPRECATED_OPCODES = set(\''' # {newline.join(deprecated_opcodes)} # \'''.split()) # ''') # " OPCODES = set(''' ATSadd ATSaddnz ATSbufread ATScross ATSinfo ATSinterpread ATSpartialtap ATSread ATSreadnz ATSsinnoi FLbox FLbutBank FLbutton FLcloseButton FLcolor FLcolor2 FLcount FLexecButton FLgetsnap FLgroup FLgroupEnd FLgroup_end FLhide FLhvsBox FLhvsBoxSetValue FLjoy FLkeyIn FLknob FLlabel FLloadsnap FLmouse FLpack FLpackEnd FLpack_end FLpanel FLpanelEnd FLpanel_end FLprintk FLprintk2 FLroller FLrun FLsavesnap FLscroll FLscrollEnd FLscroll_end FLsetAlign FLsetBox FLsetColor FLsetColor2 FLsetFont FLsetPosition FLsetSize FLsetSnapGroup FLsetText FLsetTextColor FLsetTextSize FLsetTextType FLsetVal FLsetVal_i FLsetVali FLsetsnap FLshow FLslidBnk FLslidBnk2 FLslidBnk2Set FLslidBnk2Setk FLslidBnkGetHandle FLslidBnkSet FLslidBnkSetk FLslider FLtabs FLtabsEnd FLtabs_end FLtext FLupdate FLvalue FLvkeybd FLvslidBnk FLvslidBnk2 FLxyin JackoAudioIn JackoAudioInConnect JackoAudioOut JackoAudioOutConnect JackoFreewheel JackoInfo JackoInit JackoMidiInConnect JackoMidiOut JackoMidiOutConnect JackoNoteOut JackoOn JackoTransport K35_hpf K35_lpf MixerClear MixerGetLevel MixerReceive MixerSend MixerSetLevel MixerSetLevel_i OSCbundle OSCcount OSCinit OSCinitM OSClisten OSCraw OSCsend OSCsend_lo S STKBandedWG STKBeeThree STKBlowBotl STKBlowHole STKBowed STKBrass STKClarinet STKDrummer STKFMVoices STKFlute STKHevyMetl STKMandolin STKModalBar STKMoog STKPercFlut STKPlucked STKResonate STKRhodey STKSaxofony STKShakers STKSimple STKSitar STKStifKarp STKTubeBell STKVoicForm STKWhistle STKWurley a abs active adsr adsyn adsynt adsynt2 aftouch alpass alwayson ampdb ampdbfs ampmidi ampmidicurve ampmidid areson aresonk atone atonek atonex babo balance balance2 bamboo barmodel bbcutm bbcuts beadsynt beosc betarand bexprnd bformdec1 bformenc1 binit biquad biquada birnd bpf bpfcos bqrez butbp butbr buthp butlp butterbp butterbr butterhp butterlp button buzz c2r cabasa cauchy cauchyi cbrt ceil cell cent centroid ceps cepsinv chanctrl changed2 chani chano chebyshevpoly checkbox chn_S chn_a chn_k chnclear chnexport chnget chngetks chnmix chnparams chnset chnsetks chuap clear clfilt clip clockoff clockon cmp cmplxprod comb combinv compilecsd compileorc compilestr compress compress2 connect control convle convolve copya2ftab copyf2array cos cosh cosinv cosseg cossegb cossegr cps2pch cpsmidi cpsmidib cpsmidinn cpsoct cpspch cpstmid cpstun cpstuni cpsxpch cpumeter cpuprc cross2 crossfm crossfmi crossfmpm crossfmpmi crosspm crosspmi crunch ctlchn ctrl14 ctrl21 ctrl7 ctrlinit cuserrnd dam date dates db dbamp dbfsamp dcblock dcblock2 dconv dct dctinv deinterleave delay delay1 delayk delayr delayw deltap deltap3 deltapi deltapn deltapx deltapxw denorm diff diode_ladder directory diskgrain diskin diskin2 dispfft display distort distort1 divz doppler dot downsamp dripwater dssiactivate dssiaudio dssictls dssiinit dssilist dumpk dumpk2 dumpk3 dumpk4 duserrnd dust dust2 envlpx envlpxr ephasor eqfil evalstr event event_i exciter exitnow exp expcurve expon exprand exprandi expseg expsega expsegb expsegba expsegr fareylen fareyleni faustaudio faustcompile faustctl faustdsp faustgen faustplay fft fftinv ficlose filebit filelen filenchnls filepeak filescal filesr filevalid fillarray filter2 fin fini fink fiopen flanger flashtxt flooper flooper2 floor fluidAllOut fluidCCi fluidCCk fluidControl fluidEngine fluidInfo fluidLoad fluidNote fluidOut fluidProgramSelect fluidSetInterpMethod fmanal fmax fmb3 fmbell fmin fmmetal fmod fmpercfl fmrhode fmvoice fmwurlie fof fof2 fofilter fog fold follow follow2 foscil foscili fout fouti foutir foutk fprintks fprints frac fractalnoise framebuffer freeverb ftaudio ftchnls ftconv ftcps ftfree ftgen ftgenonce ftgentmp ftlen ftload ftloadk ftlptim ftmorf ftom ftprint ftresize ftresizei ftsamplebank ftsave ftsavek ftslice ftsr gain gainslider gauss gaussi gausstrig gbuzz genarray genarray_i gendy gendyc gendyx getcfg getcol getftargs getrow getrowlin getseed gogobel grain grain2 grain3 granule gtf guiro harmon harmon2 harmon3 harmon4 hdf5read hdf5write hilbert hilbert2 hrtfearly hrtfmove hrtfmove2 hrtfreverb hrtfstat hsboscil hvs1 hvs2 hvs3 hypot i ihold imagecreate imagefree imagegetpixel imageload imagesave imagesetpixel imagesize in in32 inch inh init initc14 initc21 initc7 inleta inletf inletk inletkid inletv ino inq inrg ins insglobal insremot int integ interleave interp invalue inx inz jacktransport jitter jitter2 joystick jspline k la_i_add_mc la_i_add_mr la_i_add_vc la_i_add_vr la_i_assign_mc la_i_assign_mr la_i_assign_t la_i_assign_vc la_i_assign_vr la_i_conjugate_mc la_i_conjugate_mr la_i_conjugate_vc la_i_conjugate_vr la_i_distance_vc la_i_distance_vr la_i_divide_mc la_i_divide_mr la_i_divide_vc la_i_divide_vr la_i_dot_mc la_i_dot_mc_vc la_i_dot_mr la_i_dot_mr_vr la_i_dot_vc la_i_dot_vr la_i_get_mc la_i_get_mr la_i_get_vc la_i_get_vr la_i_invert_mc la_i_invert_mr la_i_lower_solve_mc la_i_lower_solve_mr la_i_lu_det_mc la_i_lu_det_mr la_i_lu_factor_mc la_i_lu_factor_mr la_i_lu_solve_mc la_i_lu_solve_mr la_i_mc_create la_i_mc_set la_i_mr_create la_i_mr_set la_i_multiply_mc la_i_multiply_mr la_i_multiply_vc la_i_multiply_vr la_i_norm1_mc la_i_norm1_mr la_i_norm1_vc la_i_norm1_vr la_i_norm_euclid_mc la_i_norm_euclid_mr la_i_norm_euclid_vc la_i_norm_euclid_vr la_i_norm_inf_mc la_i_norm_inf_mr la_i_norm_inf_vc la_i_norm_inf_vr la_i_norm_max_mc la_i_norm_max_mr la_i_print_mc la_i_print_mr la_i_print_vc la_i_print_vr la_i_qr_eigen_mc la_i_qr_eigen_mr la_i_qr_factor_mc la_i_qr_factor_mr la_i_qr_sym_eigen_mc la_i_qr_sym_eigen_mr la_i_random_mc la_i_random_mr la_i_random_vc la_i_random_vr la_i_size_mc la_i_size_mr la_i_size_vc la_i_size_vr la_i_subtract_mc la_i_subtract_mr la_i_subtract_vc la_i_subtract_vr la_i_t_assign la_i_trace_mc la_i_trace_mr la_i_transpose_mc la_i_transpose_mr la_i_upper_solve_mc la_i_upper_solve_mr la_i_vc_create la_i_vc_set la_i_vr_create la_i_vr_set la_k_a_assign la_k_add_mc la_k_add_mr la_k_add_vc la_k_add_vr la_k_assign_a la_k_assign_f la_k_assign_mc la_k_assign_mr la_k_assign_t la_k_assign_vc la_k_assign_vr la_k_conjugate_mc la_k_conjugate_mr la_k_conjugate_vc la_k_conjugate_vr la_k_current_f la_k_current_vr la_k_distance_vc la_k_distance_vr la_k_divide_mc la_k_divide_mr la_k_divide_vc la_k_divide_vr la_k_dot_mc la_k_dot_mc_vc la_k_dot_mr la_k_dot_mr_vr la_k_dot_vc la_k_dot_vr la_k_f_assign la_k_get_mc la_k_get_mr la_k_get_vc la_k_get_vr la_k_invert_mc la_k_invert_mr la_k_lower_solve_mc la_k_lower_solve_mr la_k_lu_det_mc la_k_lu_det_mr la_k_lu_factor_mc la_k_lu_factor_mr la_k_lu_solve_mc la_k_lu_solve_mr la_k_mc_set la_k_mr_set la_k_multiply_mc la_k_multiply_mr la_k_multiply_vc la_k_multiply_vr la_k_norm1_mc la_k_norm1_mr la_k_norm1_vc la_k_norm1_vr la_k_norm_euclid_mc la_k_norm_euclid_mr la_k_norm_euclid_vc la_k_norm_euclid_vr la_k_norm_inf_mc la_k_norm_inf_mr la_k_norm_inf_vc la_k_norm_inf_vr la_k_norm_max_mc la_k_norm_max_mr la_k_qr_eigen_mc la_k_qr_eigen_mr la_k_qr_factor_mc la_k_qr_factor_mr la_k_qr_sym_eigen_mc la_k_qr_sym_eigen_mr la_k_random_mc la_k_random_mr la_k_random_vc la_k_random_vr la_k_subtract_mc la_k_subtract_mr la_k_subtract_vc la_k_subtract_vr la_k_t_assign la_k_trace_mc la_k_trace_mr la_k_upper_solve_mc la_k_upper_solve_mr la_k_vc_set la_k_vr_set lenarray lfo limit limit1 lincos line linen linenr lineto link_beat_force link_beat_get link_beat_request link_create link_enable link_is_enabled link_metro link_peers link_tempo_get link_tempo_set linlin linrand linseg linsegb linsegr liveconv locsend locsig log log10 log2 logbtwo logcurve loopseg loopsegp looptseg loopxseg lorenz loscil loscil3 loscil3phs loscilphs loscilx lowpass2 lowres lowresx lpf18 lpform lpfreson lphasor lpinterp lposcil lposcil3 lposcila lposcilsa lposcilsa2 lpread lpreson lpshold lpsholdp lpslot lua_exec lua_iaopcall lua_iaopcall_off lua_ikopcall lua_ikopcall_off lua_iopcall lua_iopcall_off lua_opdef mac maca madsr mags mandel mandol maparray maparray_i marimba massign max max_k maxabs maxabsaccum maxaccum maxalloc maxarray mclock mdelay median mediank metro mfb midglobal midiarp midic14 midic21 midic7 midichannelaftertouch midichn midicontrolchange midictrl mididefault midifilestatus midiin midinoteoff midinoteoncps midinoteonkey midinoteonoct midinoteonpch midion midion2 midiout midiout_i midipgm midipitchbend midipolyaftertouch midiprogramchange miditempo midremot min minabs minabsaccum minaccum minarray mincer mirror mode modmatrix monitor moog moogladder moogladder2 moogvcf moogvcf2 moscil mp3bitrate mp3in mp3len mp3nchnls mp3scal mp3sr mpulse mrtmsg mtof mton multitap mute mvchpf mvclpf1 mvclpf2 mvclpf3 mvclpf4 mxadsr nchnls_hw nestedap nlalp nlfilt nlfilt2 noise noteoff noteon noteondur noteondur2 notnum nreverb nrpn nsamp nstance nstrnum nstrstr ntof ntom ntrpol nxtpow2 octave octcps octmidi octmidib octmidinn octpch olabuffer oscbnk oscil oscil1 oscil1i oscil3 oscili oscilikt osciliktp oscilikts osciln oscils oscilx out out32 outc outch outh outiat outic outic14 outipat outipb outipc outkat outkc outkc14 outkpat outkpb outkpc outleta outletf outletk outletkid outletv outo outq outq1 outq2 outq3 outq4 outrg outs outs1 outs2 outvalue outx outz p p5gconnect p5gdata pan pan2 pareq part2txt partials partikkel partikkelget partikkelset partikkelsync passign paulstretch pcauchy pchbend pchmidi pchmidib pchmidinn pchoct pchtom pconvolve pcount pdclip pdhalf pdhalfy peak pgmassign pgmchn phaser1 phaser2 phasor phasorbnk phs pindex pinker pinkish pitch pitchac pitchamdf planet platerev plltrack pluck poisson pol2rect polyaft polynomial port portk poscil poscil3 pow powershape powoftwo pows prealloc prepiano print print_type printarray printf printf_i printk printk2 printks printks2 prints product pset ptable ptable3 ptablei ptablew ptrack puts pvadd pvbufread pvcross pvinterp pvoc pvread pvs2array pvs2tab pvsadsyn pvsanal pvsarp pvsbandp pvsbandr pvsbin pvsblur pvsbuffer pvsbufread pvsbufread2 pvscale pvscent pvsceps pvscross pvsdemix pvsdiskin pvsdisp pvsenvftw pvsfilter pvsfread pvsfreeze pvsfromarray pvsftr pvsftw pvsfwrite pvsgain pvshift pvsifd pvsin pvsinfo pvsinit pvslock pvsmaska pvsmix pvsmooth pvsmorph pvsosc pvsout pvspitch pvstanal pvstencil pvstrace pvsvoc pvswarp pvsynth pwd pyassign pyassigni pyassignt pycall pycall1 pycall1i pycall1t pycall2 pycall2i pycall2t pycall3 pycall3i pycall3t pycall4 pycall4i pycall4t pycall5 pycall5i pycall5t pycall6 pycall6i pycall6t pycall7 pycall7i pycall7t pycall8 pycall8i pycall8t pycalli pycalln pycallni pycallt pyeval pyevali pyevalt pyexec pyexeci pyexect pyinit pylassign pylassigni pylassignt pylcall pylcall1 pylcall1i pylcall1t pylcall2 pylcall2i pylcall2t pylcall3 pylcall3i pylcall3t pylcall4 pylcall4i pylcall4t pylcall5 pylcall5i pylcall5t pylcall6 pylcall6i pylcall6t pylcall7 pylcall7i pylcall7t pylcall8 pylcall8i pylcall8t pylcalli pylcalln pylcallni pylcallt pyleval pylevali pylevalt pylexec pylexeci pylexect pylrun pylruni pylrunt pyrun pyruni pyrunt qinf qnan r2c rand randh randi random randomh randomi rbjeq readclock readf readfi readk readk2 readk3 readk4 readks readscore readscratch rect2pol release remoteport remove repluck reshapearray reson resonk resonr resonx resonxk resony resonz resyn reverb reverb2 reverbsc rewindscore rezzy rfft rifft rms rnd rnd31 round rspline rtclock s16b14 s32b14 samphold sandpaper sc_lag sc_lagud sc_phasor sc_trig scale scalearray scanhammer scans scantable scanu schedkwhen schedkwhennamed schedule schedwhen scoreline scoreline_i seed sekere select semitone sense sensekey seqtime seqtime2 serialBegin serialEnd serialFlush serialPrint serialRead serialWrite serialWrite_i setcol setctrl setksmps setrow setscorepos sfilist sfinstr sfinstr3 sfinstr3m sfinstrm sfload sflooper sfpassign sfplay sfplay3 sfplay3m sfplaym sfplist sfpreset shaker shiftin shiftout signum sin sinh sininv sinsyn sleighbells slicearray slicearray_i slider16 slider16f slider16table slider16tablef slider32 slider32f slider32table slider32tablef slider64 slider64f slider64table slider64tablef slider8 slider8f slider8table slider8tablef sliderKawai sndloop sndwarp sndwarpst sockrecv sockrecvs socksend socksends sorta sortd soundin space spat3d spat3di spat3dt spdist splitrig sprintf sprintfk spsend sqrt squinewave statevar stix strcat strcatk strchar strchark strcmp strcmpk strcpy strcpyk strecv streson strfromurl strget strindex strindexk string2array strlen strlenk strlower strlowerk strrindex strrindexk strset strsub strsubk strtod strtodk strtol strtolk strupper strupperk stsend subinstr subinstrinit sum sumarray svfilter syncgrain syncloop syncphasor system system_i tab tab2array tab2pvs tab_i tabifd table table3 table3kt tablecopy tablefilter tablefilteri tablegpw tablei tableicopy tableigpw tableikt tableimix tablekt tablemix tableng tablera tableseg tableshuffle tableshufflei tablew tablewa tablewkt tablexkt tablexseg tabmorph tabmorpha tabmorphak tabmorphi tabplay tabrec tabrowlin tabsum tabw tabw_i tambourine tan tanh taninv taninv2 tbvcf tempest tempo temposcal tempoval timedseq timeinstk timeinsts timek times tival tlineto tone tonek tonex tradsyn trandom transeg transegb transegr trcross trfilter trhighest trigger trigseq trim trim_i trirand trlowest trmix trscale trshift trsplit turnoff turnoff2 turnon tvconv unirand unwrap upsamp urandom urd vactrol vadd vadd_i vaddv vaddv_i vaget valpass vaset vbap vbapg vbapgmove vbaplsinit vbapmove vbapz vbapzmove vcella vco vco2 vco2ft vco2ift vco2init vcomb vcopy vcopy_i vdel_k vdelay vdelay3 vdelayk vdelayx vdelayxq vdelayxs vdelayxw vdelayxwq vdelayxws vdivv vdivv_i vecdelay veloc vexp vexp_i vexpseg vexpv vexpv_i vibes vibr vibrato vincr vlimit vlinseg vlowres vmap vmirror vmult vmult_i vmultv vmultv_i voice vosim vphaseseg vport vpow vpow_i vpowv vpowv_i vpvoc vrandh vrandi vsubv vsubv_i vtaba vtabi vtabk vtable1k vtablea vtablei vtablek vtablewa vtablewi vtablewk vtabwa vtabwi vtabwk vwrap waveset websocket weibull wgbow wgbowedbar wgbrass wgclar wgflute wgpluck wgpluck2 wguide1 wguide2 wiiconnect wiidata wiirange wiisend window wrap writescratch wterrain xadsr xin xout xscanmap xscans xscansmap xscanu xtratim xyscale zacl zakinit zamod zar zarg zaw zawm zdf_1pole zdf_1pole_mode zdf_2pole zdf_2pole_mode zdf_ladder zfilter2 zir ziw ziwm zkcl zkmod zkr zkw zkwm '''.split()) DEPRECATED_OPCODES = set(''' array bformdec bformenc changed copy2ftab copy2ttab hrtfer ktableseg lentab maxtab mintab pop pop_f ptableiw push push_f scalet sndload soundout soundouts specaddm specdiff specdisp specfilt spechist specptrk specscal specsum spectrum stack sumtab tabgen tableiw tabmap tabmap_i tabslice tb0 tb0_init tb1 tb10 tb10_init tb11 tb11_init tb12 tb12_init tb13 tb13_init tb14 tb14_init tb15 tb15_init tb1_init tb2 tb2_init tb3 tb3_init tb4 tb4_init tb5 tb5_init tb6 tb6_init tb7 tb7_init tb8 tb8_init tb9 tb9_init vbap16 vbap4 vbap4move vbap8 vbap8move xyin '''.split())
8,724
764
{ "website": "https://bloom.co/", "published_on": "2017-11-30", "links": { "reddit": "https://www.reddit.com/r/BloomToken/", "github": "https://github.com/hellobloom", "slack": "https://slack.hellobloom.io/", "telegram": "https://t.me/joinchat/FFWDdQ1hxqIg3jLe7zBVFQ", "twitter": "https://twitter.com/Bloom", "facebook": "https://www.facebook.com/bloomtoken/" }, "overview": { "en": "Decentralized credit scoring powered by Ethereum and IPFS.", "zh": "基于以太坊和 IPFS 的去中心化信用评分系统。" }, "symbol": "BLT", "initial_price": { "ETH": "0.00125 ETH", "USD": "0.87 USD", "BTC": "0.0000689 BTC" }, "whitepaper": "https://bloom.co/whitepaper.pdf", "address": "0x107c4504cd79C5d2696Ea0030a8dD4e92601B82e", "email": "<EMAIL>" }
457
984
<reponame>om-sharma/java-driver /* * Copyright DataStax, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.datastax.oss.driver.internal.core.type.codec; import static org.assertj.core.api.Assertions.assertThat; import com.datastax.oss.driver.api.core.type.codec.TypeCodecs; import com.datastax.oss.driver.api.core.type.reflect.GenericType; import java.util.UUID; import org.junit.Test; public class TimeUuidCodecTest extends CodecTestBase<UUID> { private static final UUID TIME_BASED = new UUID(6342305776366260711L, -5736720392086604862L); private static final UUID NOT_TIME_BASED = new UUID(2, 1); public TimeUuidCodecTest() { this.codec = TypeCodecs.TIMEUUID; assertThat(TIME_BASED.version()).isEqualTo(1); assertThat(NOT_TIME_BASED.version()).isNotEqualTo(1); } @Test public void should_encode_time_uuid() { assertThat(encode(TIME_BASED)).isEqualTo("0x58046580293811e7b0631332a5f033c2"); } @Test(expected = IllegalArgumentException.class) public void should_not_encode_non_time_uuid() { assertThat(codec.accepts(NOT_TIME_BASED)).isFalse(); encode(NOT_TIME_BASED); } @Test public void should_format_time_uuid() { assertThat(format(TIME_BASED)).isEqualTo("58046580-2938-11e7-b063-1332a5f033c2"); } @Test(expected = IllegalArgumentException.class) public void should_not_format_non_time_uuid() { format(NOT_TIME_BASED); } @Test public void should_accept_generic_type() { assertThat(codec.accepts(GenericType.of(UUID.class))).isTrue(); assertThat(codec.accepts(GenericType.of(Integer.class))).isFalse(); } @Test public void should_accept_raw_type() { assertThat(codec.accepts(UUID.class)).isTrue(); assertThat(codec.accepts(Integer.class)).isFalse(); } @Test public void should_accept_object() { assertThat(codec.accepts(TIME_BASED)).isTrue(); assertThat(codec.accepts(Integer.MIN_VALUE)).isFalse(); } }
879
345
/*++ Copyright (c) Didi Research America. All rights reserved. Module Name: trace.h Author: <NAME>, 08-Feb-2017 Revision History: --*/ #ifndef __TRACE_DRIVER_H__ #define __TRACE_DRIVER_H__ // // Driver name and version // #define DRIVER_NAME "Kemon" enum driver_version { ALPHA_VERSION_08_FEB_2017 = 0x01000001, ALPHA_VERSION_12_DEC_2018 = 0x01000010, ALPHA_VERSION_05_JUN_2019 = 0x01000011, ALPHA_VERSION_01_OCT_2019 = 0x01000012, RELEASE_VERSION }; #define CURRENT_VERSION RELEASE_VERSION - 1 // // Kemon framework troubleshooting // #define FRAMEWORK_TROUBLESHOOTING TRUE // // Kernel authorization troubleshooting // #define KAUTH_TROUBLESHOOTING FALSE // // Socket filter troubleshooting // #define SFLT_TROUBLESHOOTING FALSE #define SFLT_TRAFFIC_TROUBLESHOOTING FALSE // // MAC policy troubleshooting // #define MAC_TROUBLESHOOTING TRUE // // Breakpoint instruction // #define BreakPoint() __asm__ volatile ("int3"); // // For macOS 10.14 Mojave // #define SNPRINTF_LENGTH_LIMIT 0xF0 // // Hex printf // #define HEX_PRINTF_B 0x01 #define HEX_PRINTF_W 0x02 #define HEX_PRINTF_D 0x04 #define HEX_PRINTF_Q 0x08 // // Declaration // extern void hex_printf( void *buffer, unsigned long length, unsigned long flag ); #endif
623
777
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef ASH_COMMON_SHELF_SHELF_ITEM_TYPES_H_ #define ASH_COMMON_SHELF_SHELF_ITEM_TYPES_H_ #include <string> #include <vector> #include "ash/ash_export.h" #include "ash/common/shelf/shelf_constants.h" #include "base/strings/string16.h" #include "ui/gfx/image/image_skia.h" namespace ash { typedef int ShelfID; // The type of a shelf item. enum ShelfItemType { // Represents a running app panel. TYPE_APP_PANEL, // Represents a pinned shortcut to an app. TYPE_APP_SHORTCUT, // Toggles visiblity of the app list. TYPE_APP_LIST, // The browser shortcut button. TYPE_BROWSER_SHORTCUT, // Represents an app: Extension "V1" (legacy packaged and hosted) apps, // Extension "V2" (platform) apps, // Arc (App Runtime for Chrome - Android Play Store) apps. TYPE_APP, // Represents a dialog. TYPE_DIALOG, // Default value. TYPE_UNDEFINED, }; // Represents the status of applications in the shelf. enum ShelfItemStatus { // A closed shelf item, i.e. has no live instance. STATUS_CLOSED, // A shelf item that has live instance. STATUS_RUNNING, // An active shelf item that has focus. STATUS_ACTIVE, // A shelf item that needs user's attention. STATUS_ATTENTION, }; struct ASH_EXPORT ShelfItem { ShelfItem(); ShelfItem(const ShelfItem& shelf_item); ~ShelfItem(); ShelfItemType type = TYPE_UNDEFINED; // Image to display in the shelf. gfx::ImageSkia image; // Assigned by the model when the item is added. ShelfID id = kInvalidShelfID; // Running status. ShelfItemStatus status = STATUS_CLOSED; // The application id for this shelf item; only populated for some items. std::string app_id; // The title to display for tooltips, etc. base::string16 title; // Whether the tooltip should be shown on hover; generally true. bool shows_tooltip = true; // Whether the item is pinned by a policy preference (ie. user cannot un-pin). bool pinned_by_policy = false; }; typedef std::vector<ShelfItem> ShelfItems; } // namespace ash #endif // ASH_COMMON_SHELF_SHELF_ITEM_TYPES_H_
799
494
/** * Copyright 2013 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.astyanax.cql.test; import java.util.UUID; import junit.framework.Assert; import org.apache.log4j.Logger; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import com.netflix.astyanax.MutationBatch; import com.netflix.astyanax.connectionpool.OperationResult; import com.netflix.astyanax.connectionpool.exceptions.ConnectionException; import com.netflix.astyanax.cql.reads.model.CqlRangeBuilder; import com.netflix.astyanax.model.Column; import com.netflix.astyanax.model.ColumnFamily; import com.netflix.astyanax.model.ColumnList; import com.netflix.astyanax.query.RowQuery; import com.netflix.astyanax.serializers.StringSerializer; import com.netflix.astyanax.serializers.TimeUUIDSerializer; import com.netflix.astyanax.util.RangeBuilder; import com.netflix.astyanax.util.TimeUUIDUtils; public class TimeUUIDTests extends KeyspaceTests { private static final Logger LOG = Logger.getLogger(TimeUUIDTests.class); public static ColumnFamily<String, UUID> CF_TIME_UUID = ColumnFamily .newColumnFamily( "TimeUUID1", StringSerializer.get(), TimeUUIDSerializer.get()); @BeforeClass public static void init() throws Exception { initContext(); keyspace.createColumnFamily(CF_TIME_UUID, null); CF_TIME_UUID.describe(keyspace); } @AfterClass public static void tearDown() throws Exception { keyspace.dropColumnFamily(CF_TIME_UUID); } @Test public void testTimeUUID() throws Exception { MutationBatch m = keyspace.prepareMutationBatch(); UUID columnName = TimeUUIDUtils.getUniqueTimeUUIDinMillis(); long columnTime = TimeUUIDUtils.getTimeFromUUID(columnName); String rowKey = "Key1"; m.withRow(CF_TIME_UUID, rowKey).delete(); m.execute(); m.discardMutations(); int startTime = 100; int endTime = 200; m.withRow(CF_TIME_UUID, rowKey).putColumn(columnName, 42, null); for (int i = startTime; i < endTime; i++) { // UUID c = TimeUUIDUtils.getTimeUUID(i); LOG.info(TimeUUIDUtils.getTimeUUID(columnTime + i).toString()); m.withRow(CF_TIME_UUID, rowKey).putColumn( TimeUUIDUtils.getTimeUUID(columnTime + i), i, null); } m.execute(); OperationResult<Column<UUID>> result = keyspace .prepareQuery(CF_TIME_UUID).getKey(rowKey) .getColumn(columnName).execute(); Assert.assertEquals(columnName, result.getResult().getName()); Assert.assertTrue(result.getResult().getIntegerValue() == 42); OperationResult<ColumnList<UUID>> result2 = keyspace.prepareQuery(CF_TIME_UUID).getKey(rowKey).execute(); Assert.assertTrue(result2.getResult().size() >= (endTime - startTime)); result2 = keyspace .prepareQuery(CF_TIME_UUID) .getKey(rowKey) .withColumnRange( new RangeBuilder() .setLimit(10) .setStart(TimeUUIDUtils.getTimeUUID(0)) .setEnd(TimeUUIDUtils .getTimeUUID(Long.MAX_VALUE >> 8)) .build()).execute(); Assert.assertEquals(10, result2.getResult().size()); // Test timeUUID pagination RowQuery<String, UUID> query = keyspace .prepareQuery(CF_TIME_UUID) .getKey(rowKey) .withColumnRange( new CqlRangeBuilder<UUID>() .setFetchSize(10) .setStart( TimeUUIDUtils.getTimeUUID(columnTime + startTime)) .setEnd(TimeUUIDUtils.getTimeUUID(columnTime + endTime)).build()).autoPaginate(true); OperationResult<ColumnList<UUID>> result3; int pageCount = 0; int rowCount = 0; try { LOG.info("starting pagination"); while (!(result3 = query.execute()).getResult().isEmpty()) { pageCount++; Assert.assertTrue(result3.getResult().size() <= 10); rowCount += result3.getResult().size(); LOG.info("==== Block ===="); for (Column<UUID> column : result3.getResult()) { LOG.info("Column is " + column.getName()); } } Assert.assertTrue("pagination complete: " + pageCount, pageCount >= 10); Assert.assertTrue("pagination complete ", rowCount <= 100); } catch (ConnectionException e) { Assert.fail(); LOG.info(e.getMessage()); e.printStackTrace(); } } @Test public void testTimeUUID2() throws Exception { CF_TIME_UUID.describe(keyspace); MutationBatch m = keyspace.prepareMutationBatch(); String rowKey = "Key2"; m.withRow(CF_TIME_UUID, rowKey).delete(); m.execute(); m.discardMutations(); long now = System.currentTimeMillis(); long msecPerDay = 86400000; for (int i = 0; i < 100; i++) { m.withRow(CF_TIME_UUID, rowKey).putColumn( TimeUUIDUtils.getTimeUUID(now - i * msecPerDay), i, null); } m.execute(); OperationResult<ColumnList<UUID>> result = keyspace .prepareQuery(CF_TIME_UUID) .getKey(rowKey) .withColumnRange( new RangeBuilder() .setLimit(100) .setStart( TimeUUIDUtils.getTimeUUID(now - 20 * msecPerDay)).build()) .execute(); Assert.assertTrue(result.getResult().size() >= 20); } }
2,963
1,165
/******************************************************************************* * Copyright 2018 T Mobile, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy * of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. ******************************************************************************/ package com.tmobile.pacman.api.asset.domain; import io.swagger.annotations.ApiModelProperty; /** * The Class SearchFilterAttribute. */ public class SearchFilterAttribute extends SearchFilterItem { private String type = "searchFilterAttribute"; @ApiModelProperty(hidden = true) long count; boolean applied; SearchFilterAttributeGroup groupBy; /** * Gets the group by. * * @return the group by */ public SearchFilterAttributeGroup getGroupBy() { return groupBy; } /** * Sets the group by. * * @param groupBy the new group by */ public void setGroupBy(SearchFilterAttributeGroup groupBy) { this.groupBy = groupBy; } /** * Gets the count. * * @return the count */ public long getCount() { return count; } /** * Sets the count. * * @param count the new count */ public void setCount(long count) { this.count = count; } /** * Checks if is applied. * * @return true, if is applied */ public boolean isApplied() { return applied; } /** * Sets the applied. * * @param applied the new applied */ public void setApplied(boolean applied) { this.applied = applied; } /** * Gets the type. * * @return the type */ public String getType() { return type; } /** * Sets the type. * * @param type the new type */ public void setType(String type) { this.type = type; } }
857
848
<filename>tools/Vitis-AI-Library/pointpillars_nuscenes/src/voxelize.hpp<gh_stars>100-1000 /* * Copyright 2019 Xilinx Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <vector> #include <utility> #include <memory> using namespace std; namespace vitis { namespace ai { namespace pointpillars_nus{ //constexpr uint32_t MAX_POINTS_NUM = 64; //constexpr uint32_t MAX_VOXELS_NUM = 40000; class Voxelization { public: static std::unique_ptr<Voxelization> create(const std::vector<float> &input_means, const std::vector<float> &input_scales, int max_points_num, int max_voxels_num); explicit Voxelization(const std::vector<float> &input_means, const std::vector<float> &input_scales, int max_points_num, int max_voxels_num); std::vector<int> voxelize(const vector<float> &points, int dim, int8_t *input_tensor_ptr, size_t input_tensor_size); private: std::vector<int> voxelize_input_internal(const vector<float> &points, int dim, int8_t * input_tensor_ptr, size_t input_tensor_size); int voxelize_input(const std::vector<float> &points, int dim, std::vector<int> &coors, int8_t *input_tensor_ptr); private: std::vector<float> input_means_; std::vector<float> input_scales_; int max_points_num_; int max_voxels_num_; std::vector<float> voxels_size_; std::vector<float> coors_range_; int coors_dim_; // 4 = 3 + padding }; //std::vector<int> preprocess(const vector<float> &points, int dim, // const std::vector<float> &input_mean, const std::vector<float> &input_scale, // int8_t *input_tensor_ptr); } // end of pointpillars_nus }}
1,000
1,325
<reponame>NearlyTRex/BetterEnums<filename>example/8-representation.cc // This file was generated automatically. // Representation // // Let's go over some of the low-level properties of a Better Enum. This time, // we will declare a more unusual enum than the ones we have seen. #include <cassert> #include <iostream> #include <enum.h> BETTER_ENUM(ContentType, short, CompressedVideo = 5, PCM = 8, Subtitles = 17, Comment = 44) // This is for a hypothetical multimedia container file format. Perhaps the // files have sections, and each one has a header: struct Header { ContentType type; short flags; int offset; }; // Here is what we have. int main() { assert(sizeof(ContentType) == 2); // ContentType behaves just like a short, in fact it simply wraps one. This // makes it possible to lay out structures in a predictable fashion: Header header = {ContentType::PCM, 0, 0}; assert(sizeof(header) == 8); assert((size_t)&header.flags - (size_t)&header.type == 2); // uint16_t is called ContentType's underlying or representation type. If you // want to know the representation type of any enum you have declared, it is // available as the member type ::_integral: ContentType::_integral untrusted_value = 44; // Use this if you want a sized field to receive untrusted data, but aren't // willing to call it ContentType yet because you have not validated it. Your // validator will likely call ::_from_integral_nothrow, perform any other // validation your application requires, and then return ContentType. ContentType type = ContentType::_from_integral(untrusted_value); std::cout << type._to_string() << std::endl; // You have probably noticed the initializers on each of the constants in // ContentType. This allows you to declare sparse enums for compatibility with // external protocols or previous versions of your software. The initializers // don't need to be literal integers - they can be anything that the compiler // would accept in a normal enum declaration. If there was a macro called // BIG_FAT_MACRO declared above, we could have written Subtitles = // BIG_FAT_MACRO. We could also have written Subtitles = CompressedVideo. // The in-memory representation of an enum value is simply the number it has // been assigned by the compiler. You should be safe passing enums to functions // like fread and fwrite, and casting memory blocks known to be safe to struct // types containg enums. The enums will behave as expected. return 0; }
758
335
<gh_stars>100-1000 { "word": "Malpractitioner", "definitions": [ "A person who is guilty of malpractice." ], "parts-of-speech": "Noun" }
72
3,531
# # Creating a transition # props = [lv.STYLE.BG_COLOR, lv.STYLE.BORDER_COLOR, lv.STYLE.BORDER_WIDTH, 0] # A default transition # Make it fast (100ms) and start with some delay (200 ms) trans_def = lv.style_transition_dsc_t() trans_def.init(props, lv.anim_t.path_linear, 100, 200, None) # A special transition when going to pressed state # Make it slow (500 ms) but start without delay trans_pr = lv.style_transition_dsc_t() trans_pr.init(props, lv.anim_t.path_linear, 500, 0, None) style_def = lv.style_t() style_def.init() style_def.set_transition(trans_def) style_pr = lv.style_t() style_pr.init() style_pr.set_bg_color(lv.palette_main(lv.PALETTE.RED)) style_pr.set_border_width(6) style_pr.set_border_color(lv.palette_darken(lv.PALETTE.RED, 3)) style_pr.set_transition(trans_pr) # Create an object with the new style_pr obj = lv.obj(lv.scr_act()) obj.add_style(style_def, 0) obj.add_style(style_pr, lv.STATE.PRESSED) obj.center()
399
451
// File Automatically generated by eLiSe #include "StdAfx.h" #include "cEqAppui_GL__TerFix_M2CFour7x2.h" cEqAppui_GL__TerFix_M2CFour7x2::cEqAppui_GL__TerFix_M2CFour7x2(): cElCompiledFonc(2) { AddIntRef (cIncIntervale("Intr",0,14)); AddIntRef (cIncIntervale("Orient",14,20)); Close(false); } void cEqAppui_GL__TerFix_M2CFour7x2::ComputeVal() { double tmp0_ = mCompCoord[14]; double tmp1_ = mCompCoord[15]; double tmp2_ = cos(tmp1_); double tmp3_ = cos(tmp0_); double tmp4_ = tmp3_*tmp2_; double tmp5_ = sin(tmp0_); double tmp6_ = tmp5_*tmp2_; double tmp7_ = sin(tmp1_); double tmp8_ = mCompCoord[16]; double tmp9_ = sin(tmp8_); double tmp10_ = -(tmp9_); double tmp11_ = -(tmp7_); double tmp12_ = cos(tmp8_); double tmp13_ = mCompCoord[17]; double tmp14_ = mLocXTer-tmp13_; double tmp15_ = -(tmp5_); double tmp16_ = tmp15_*tmp10_; double tmp17_ = tmp3_*tmp11_; double tmp18_ = tmp17_*tmp12_; double tmp19_ = tmp16_+tmp18_; double tmp20_ = tmp3_*tmp10_; double tmp21_ = tmp5_*tmp11_; double tmp22_ = tmp21_*tmp12_; double tmp23_ = tmp20_+tmp22_; double tmp24_ = tmp2_*tmp12_; double tmp25_ = mCompCoord[18]; double tmp26_ = mLocYTer-tmp25_; double tmp27_ = mCompCoord[19]; double tmp28_ = mLocZTer-tmp27_; double tmp29_ = tmp15_*tmp12_; double tmp30_ = tmp17_*tmp9_; double tmp31_ = tmp29_+tmp30_; double tmp32_ = tmp3_*tmp12_; double tmp33_ = tmp21_*tmp9_; double tmp34_ = tmp32_+tmp33_; double tmp35_ = tmp2_*tmp9_; double tmp36_ = mCompCoord[0]; double tmp37_ = (tmp19_)*mLocGL_0_0; double tmp38_ = (tmp23_)*mLocGL_1_0; double tmp39_ = tmp37_+tmp38_; double tmp40_ = tmp24_*mLocGL_2_0; double tmp41_ = tmp39_+tmp40_; double tmp42_ = (tmp41_)*(tmp14_); double tmp43_ = (tmp19_)*mLocGL_0_1; double tmp44_ = (tmp23_)*mLocGL_1_1; double tmp45_ = tmp43_+tmp44_; double tmp46_ = tmp24_*mLocGL_2_1; double tmp47_ = tmp45_+tmp46_; double tmp48_ = (tmp47_)*(tmp26_); double tmp49_ = tmp42_+tmp48_; double tmp50_ = (tmp19_)*mLocGL_0_2; double tmp51_ = (tmp23_)*mLocGL_1_2; double tmp52_ = tmp50_+tmp51_; double tmp53_ = tmp24_*mLocGL_2_2; double tmp54_ = tmp52_+tmp53_; double tmp55_ = (tmp54_)*(tmp28_); double tmp56_ = tmp49_+tmp55_; double tmp57_ = tmp36_/(tmp56_); double tmp58_ = tmp4_*mLocGL_0_0; double tmp59_ = tmp6_*mLocGL_1_0; double tmp60_ = tmp58_+tmp59_; double tmp61_ = tmp7_*mLocGL_2_0; double tmp62_ = tmp60_+tmp61_; double tmp63_ = (tmp62_)*(tmp14_); double tmp64_ = tmp4_*mLocGL_0_1; double tmp65_ = tmp6_*mLocGL_1_1; double tmp66_ = tmp64_+tmp65_; double tmp67_ = tmp7_*mLocGL_2_1; double tmp68_ = tmp66_+tmp67_; double tmp69_ = (tmp68_)*(tmp26_); double tmp70_ = tmp63_+tmp69_; double tmp71_ = tmp4_*mLocGL_0_2; double tmp72_ = tmp6_*mLocGL_1_2; double tmp73_ = tmp71_+tmp72_; double tmp74_ = tmp7_*mLocGL_2_2; double tmp75_ = tmp73_+tmp74_; double tmp76_ = (tmp75_)*(tmp28_); double tmp77_ = tmp70_+tmp76_; double tmp78_ = (tmp77_)*(tmp57_); double tmp79_ = mCompCoord[1]; double tmp80_ = tmp78_+tmp79_; double tmp81_ = (tmp80_)-mLocFour7x2_State_1_0; double tmp82_ = (tmp81_)/mLocFour7x2_State_0_0; double tmp83_ = (tmp31_)*mLocGL_0_0; double tmp84_ = (tmp34_)*mLocGL_1_0; double tmp85_ = tmp83_+tmp84_; double tmp86_ = tmp35_*mLocGL_2_0; double tmp87_ = tmp85_+tmp86_; double tmp88_ = (tmp87_)*(tmp14_); double tmp89_ = (tmp31_)*mLocGL_0_1; double tmp90_ = (tmp34_)*mLocGL_1_1; double tmp91_ = tmp89_+tmp90_; double tmp92_ = tmp35_*mLocGL_2_1; double tmp93_ = tmp91_+tmp92_; double tmp94_ = (tmp93_)*(tmp26_); double tmp95_ = tmp88_+tmp94_; double tmp96_ = (tmp31_)*mLocGL_0_2; double tmp97_ = (tmp34_)*mLocGL_1_2; double tmp98_ = tmp96_+tmp97_; double tmp99_ = tmp35_*mLocGL_2_2; double tmp100_ = tmp98_+tmp99_; double tmp101_ = (tmp100_)*(tmp28_); double tmp102_ = tmp95_+tmp101_; double tmp103_ = (tmp102_)*(tmp57_); double tmp104_ = mCompCoord[2]; double tmp105_ = tmp103_+tmp104_; double tmp106_ = (tmp105_)-mLocFour7x2_State_2_0; double tmp107_ = (tmp106_)/mLocFour7x2_State_0_0; double tmp108_ = mCompCoord[9]; double tmp109_ = tmp82_-tmp108_; double tmp110_ = mCompCoord[10]; double tmp111_ = tmp107_-tmp110_; double tmp112_ = (tmp109_)*(tmp109_); double tmp113_ = (tmp111_)*(tmp111_); double tmp114_ = tmp112_+tmp113_; double tmp115_ = (tmp114_)*(tmp114_); double tmp116_ = mCompCoord[3]; double tmp117_ = mCompCoord[4]; double tmp118_ = mCompCoord[5]; double tmp119_ = (tmp82_)*(tmp107_); double tmp120_ = mCompCoord[6]; double tmp121_ = (tmp107_)*(tmp107_); double tmp122_ = (tmp82_)*(tmp82_); double tmp123_ = mCompCoord[11]; double tmp124_ = tmp123_*(tmp114_); double tmp125_ = mCompCoord[12]; double tmp126_ = tmp125_*tmp115_; double tmp127_ = tmp124_+tmp126_; double tmp128_ = mCompCoord[13]; double tmp129_ = tmp115_*(tmp114_); double tmp130_ = tmp128_*tmp129_; double tmp131_ = tmp127_+tmp130_; mVal[0] = ((mLocFour7x2_State_1_0+(((1+tmp116_)*(tmp82_)+tmp117_*(tmp107_))-tmp118_*2*tmp122_+tmp120_*tmp119_+mCompCoord[7]*tmp121_+(tmp109_)*(tmp131_))*mLocFour7x2_State_0_0)-mLocXIm)*mLocScNorm; mVal[1] = ((mLocFour7x2_State_2_0+(((1-tmp116_)*(tmp107_)+tmp117_*(tmp82_)+tmp118_*tmp119_)-tmp120_*2*tmp121_+mCompCoord[8]*tmp122_+(tmp111_)*(tmp131_))*mLocFour7x2_State_0_0)-mLocYIm)*mLocScNorm; } void cEqAppui_GL__TerFix_M2CFour7x2::ComputeValDeriv() { double tmp0_ = mCompCoord[14]; double tmp1_ = mCompCoord[15]; double tmp2_ = cos(tmp1_); double tmp3_ = cos(tmp0_); double tmp4_ = tmp3_*tmp2_; double tmp5_ = sin(tmp0_); double tmp6_ = tmp5_*tmp2_; double tmp7_ = sin(tmp1_); double tmp8_ = mCompCoord[16]; double tmp9_ = sin(tmp8_); double tmp10_ = -(tmp9_); double tmp11_ = -(tmp7_); double tmp12_ = cos(tmp8_); double tmp13_ = mCompCoord[17]; double tmp14_ = mLocXTer-tmp13_; double tmp15_ = -(tmp5_); double tmp16_ = tmp15_*tmp10_; double tmp17_ = tmp3_*tmp11_; double tmp18_ = tmp17_*tmp12_; double tmp19_ = tmp16_+tmp18_; double tmp20_ = tmp3_*tmp10_; double tmp21_ = tmp5_*tmp11_; double tmp22_ = tmp21_*tmp12_; double tmp23_ = tmp20_+tmp22_; double tmp24_ = tmp2_*tmp12_; double tmp25_ = mCompCoord[18]; double tmp26_ = mLocYTer-tmp25_; double tmp27_ = mCompCoord[19]; double tmp28_ = mLocZTer-tmp27_; double tmp29_ = tmp15_*tmp12_; double tmp30_ = tmp17_*tmp9_; double tmp31_ = tmp29_+tmp30_; double tmp32_ = tmp3_*tmp12_; double tmp33_ = tmp21_*tmp9_; double tmp34_ = tmp32_+tmp33_; double tmp35_ = tmp2_*tmp9_; double tmp36_ = mCompCoord[0]; double tmp37_ = (tmp19_)*mLocGL_0_0; double tmp38_ = (tmp23_)*mLocGL_1_0; double tmp39_ = tmp37_+tmp38_; double tmp40_ = tmp24_*mLocGL_2_0; double tmp41_ = tmp39_+tmp40_; double tmp42_ = (tmp41_)*(tmp14_); double tmp43_ = (tmp19_)*mLocGL_0_1; double tmp44_ = (tmp23_)*mLocGL_1_1; double tmp45_ = tmp43_+tmp44_; double tmp46_ = tmp24_*mLocGL_2_1; double tmp47_ = tmp45_+tmp46_; double tmp48_ = (tmp47_)*(tmp26_); double tmp49_ = tmp42_+tmp48_; double tmp50_ = (tmp19_)*mLocGL_0_2; double tmp51_ = (tmp23_)*mLocGL_1_2; double tmp52_ = tmp50_+tmp51_; double tmp53_ = tmp24_*mLocGL_2_2; double tmp54_ = tmp52_+tmp53_; double tmp55_ = (tmp54_)*(tmp28_); double tmp56_ = tmp49_+tmp55_; double tmp57_ = tmp36_/(tmp56_); double tmp58_ = tmp4_*mLocGL_0_0; double tmp59_ = tmp6_*mLocGL_1_0; double tmp60_ = tmp58_+tmp59_; double tmp61_ = tmp7_*mLocGL_2_0; double tmp62_ = tmp60_+tmp61_; double tmp63_ = (tmp62_)*(tmp14_); double tmp64_ = tmp4_*mLocGL_0_1; double tmp65_ = tmp6_*mLocGL_1_1; double tmp66_ = tmp64_+tmp65_; double tmp67_ = tmp7_*mLocGL_2_1; double tmp68_ = tmp66_+tmp67_; double tmp69_ = (tmp68_)*(tmp26_); double tmp70_ = tmp63_+tmp69_; double tmp71_ = tmp4_*mLocGL_0_2; double tmp72_ = tmp6_*mLocGL_1_2; double tmp73_ = tmp71_+tmp72_; double tmp74_ = tmp7_*mLocGL_2_2; double tmp75_ = tmp73_+tmp74_; double tmp76_ = (tmp75_)*(tmp28_); double tmp77_ = tmp70_+tmp76_; double tmp78_ = (tmp77_)*(tmp57_); double tmp79_ = mCompCoord[1]; double tmp80_ = tmp78_+tmp79_; double tmp81_ = (tmp80_)-mLocFour7x2_State_1_0; double tmp82_ = (tmp81_)/mLocFour7x2_State_0_0; double tmp83_ = (tmp31_)*mLocGL_0_0; double tmp84_ = (tmp34_)*mLocGL_1_0; double tmp85_ = tmp83_+tmp84_; double tmp86_ = tmp35_*mLocGL_2_0; double tmp87_ = tmp85_+tmp86_; double tmp88_ = (tmp87_)*(tmp14_); double tmp89_ = (tmp31_)*mLocGL_0_1; double tmp90_ = (tmp34_)*mLocGL_1_1; double tmp91_ = tmp89_+tmp90_; double tmp92_ = tmp35_*mLocGL_2_1; double tmp93_ = tmp91_+tmp92_; double tmp94_ = (tmp93_)*(tmp26_); double tmp95_ = tmp88_+tmp94_; double tmp96_ = (tmp31_)*mLocGL_0_2; double tmp97_ = (tmp34_)*mLocGL_1_2; double tmp98_ = tmp96_+tmp97_; double tmp99_ = tmp35_*mLocGL_2_2; double tmp100_ = tmp98_+tmp99_; double tmp101_ = (tmp100_)*(tmp28_); double tmp102_ = tmp95_+tmp101_; double tmp103_ = (tmp102_)*(tmp57_); double tmp104_ = mCompCoord[2]; double tmp105_ = tmp103_+tmp104_; double tmp106_ = (tmp105_)-mLocFour7x2_State_2_0; double tmp107_ = (tmp106_)/mLocFour7x2_State_0_0; double tmp108_ = mCompCoord[9]; double tmp109_ = tmp82_-tmp108_; double tmp110_ = mCompCoord[10]; double tmp111_ = tmp107_-tmp110_; double tmp112_ = (tmp109_)*(tmp109_); double tmp113_ = (tmp111_)*(tmp111_); double tmp114_ = tmp112_+tmp113_; double tmp115_ = (tmp114_)*(tmp114_); double tmp116_ = mCompCoord[3]; double tmp117_ = 1+tmp116_; double tmp118_ = ElSquare(tmp56_); double tmp119_ = (tmp56_)/tmp118_; double tmp120_ = ElSquare(mLocFour7x2_State_0_0); double tmp121_ = mCompCoord[4]; double tmp122_ = (tmp119_)*(tmp77_); double tmp123_ = tmp122_*mLocFour7x2_State_0_0; double tmp124_ = (tmp123_)/tmp120_; double tmp125_ = (tmp124_)*(tmp82_); double tmp126_ = mCompCoord[5]; double tmp127_ = tmp126_*2; double tmp128_ = (tmp119_)*(tmp102_); double tmp129_ = tmp128_*mLocFour7x2_State_0_0; double tmp130_ = (tmp129_)/tmp120_; double tmp131_ = mCompCoord[6]; double tmp132_ = (tmp130_)*(tmp107_); double tmp133_ = mCompCoord[7]; double tmp134_ = mCompCoord[11]; double tmp135_ = tmp134_*(tmp114_); double tmp136_ = mCompCoord[12]; double tmp137_ = tmp136_*tmp115_; double tmp138_ = tmp135_+tmp137_; double tmp139_ = mCompCoord[13]; double tmp140_ = tmp115_*(tmp114_); double tmp141_ = tmp139_*tmp140_; double tmp142_ = tmp138_+tmp141_; double tmp143_ = (tmp124_)*(tmp109_); double tmp144_ = (tmp130_)*(tmp111_); double tmp145_ = tmp143_+tmp143_; double tmp146_ = tmp144_+tmp144_; double tmp147_ = tmp145_+tmp146_; double tmp148_ = (tmp147_)*(tmp114_); double tmp149_ = tmp148_+tmp148_; double tmp150_ = mLocFour7x2_State_0_0/tmp120_; double tmp151_ = (tmp150_)*(tmp82_); double tmp152_ = (tmp150_)*(tmp109_); double tmp153_ = tmp152_+tmp152_; double tmp154_ = (tmp153_)*(tmp114_); double tmp155_ = tmp154_+tmp154_; double tmp156_ = (tmp150_)*(tmp107_); double tmp157_ = (tmp150_)*(tmp111_); double tmp158_ = tmp157_+tmp157_; double tmp159_ = (tmp158_)*(tmp114_); double tmp160_ = tmp159_+tmp159_; double tmp161_ = (tmp82_)*(tmp82_); double tmp162_ = (tmp82_)*(tmp107_); double tmp163_ = (tmp107_)*(tmp107_); double tmp164_ = -(1); double tmp165_ = tmp164_*(tmp109_); double tmp166_ = tmp165_+tmp165_; double tmp167_ = (tmp166_)*(tmp114_); double tmp168_ = tmp167_+tmp167_; double tmp169_ = tmp164_*(tmp111_); double tmp170_ = tmp169_+tmp169_; double tmp171_ = (tmp170_)*(tmp114_); double tmp172_ = tmp171_+tmp171_; double tmp173_ = tmp164_*tmp5_; double tmp174_ = tmp173_*tmp2_; double tmp175_ = -(tmp3_); double tmp176_ = tmp175_*tmp10_; double tmp177_ = tmp173_*tmp11_; double tmp178_ = tmp177_*tmp12_; double tmp179_ = tmp176_+tmp178_; double tmp180_ = tmp173_*tmp10_; double tmp181_ = tmp180_+tmp18_; double tmp182_ = tmp175_*tmp12_; double tmp183_ = tmp177_*tmp9_; double tmp184_ = tmp182_+tmp183_; double tmp185_ = tmp173_*tmp12_; double tmp186_ = tmp185_+tmp30_; double tmp187_ = (tmp179_)*mLocGL_0_0; double tmp188_ = (tmp181_)*mLocGL_1_0; double tmp189_ = tmp187_+tmp188_; double tmp190_ = (tmp189_)*(tmp14_); double tmp191_ = (tmp179_)*mLocGL_0_1; double tmp192_ = (tmp181_)*mLocGL_1_1; double tmp193_ = tmp191_+tmp192_; double tmp194_ = (tmp193_)*(tmp26_); double tmp195_ = tmp190_+tmp194_; double tmp196_ = (tmp179_)*mLocGL_0_2; double tmp197_ = (tmp181_)*mLocGL_1_2; double tmp198_ = tmp196_+tmp197_; double tmp199_ = (tmp198_)*(tmp28_); double tmp200_ = tmp195_+tmp199_; double tmp201_ = tmp36_*(tmp200_); double tmp202_ = -(tmp201_); double tmp203_ = tmp202_/tmp118_; double tmp204_ = tmp174_*mLocGL_0_0; double tmp205_ = tmp4_*mLocGL_1_0; double tmp206_ = tmp204_+tmp205_; double tmp207_ = (tmp206_)*(tmp14_); double tmp208_ = tmp174_*mLocGL_0_1; double tmp209_ = tmp4_*mLocGL_1_1; double tmp210_ = tmp208_+tmp209_; double tmp211_ = (tmp210_)*(tmp26_); double tmp212_ = tmp207_+tmp211_; double tmp213_ = tmp174_*mLocGL_0_2; double tmp214_ = tmp4_*mLocGL_1_2; double tmp215_ = tmp213_+tmp214_; double tmp216_ = (tmp215_)*(tmp28_); double tmp217_ = tmp212_+tmp216_; double tmp218_ = (tmp217_)*(tmp57_); double tmp219_ = (tmp203_)*(tmp77_); double tmp220_ = tmp218_+tmp219_; double tmp221_ = (tmp220_)*mLocFour7x2_State_0_0; double tmp222_ = (tmp221_)/tmp120_; double tmp223_ = (tmp222_)*(tmp82_); double tmp224_ = (tmp184_)*mLocGL_0_0; double tmp225_ = (tmp186_)*mLocGL_1_0; double tmp226_ = tmp224_+tmp225_; double tmp227_ = (tmp226_)*(tmp14_); double tmp228_ = (tmp184_)*mLocGL_0_1; double tmp229_ = (tmp186_)*mLocGL_1_1; double tmp230_ = tmp228_+tmp229_; double tmp231_ = (tmp230_)*(tmp26_); double tmp232_ = tmp227_+tmp231_; double tmp233_ = (tmp184_)*mLocGL_0_2; double tmp234_ = (tmp186_)*mLocGL_1_2; double tmp235_ = tmp233_+tmp234_; double tmp236_ = (tmp235_)*(tmp28_); double tmp237_ = tmp232_+tmp236_; double tmp238_ = (tmp237_)*(tmp57_); double tmp239_ = (tmp203_)*(tmp102_); double tmp240_ = tmp238_+tmp239_; double tmp241_ = (tmp240_)*mLocFour7x2_State_0_0; double tmp242_ = (tmp241_)/tmp120_; double tmp243_ = (tmp242_)*(tmp107_); double tmp244_ = (tmp222_)*(tmp109_); double tmp245_ = (tmp242_)*(tmp111_); double tmp246_ = tmp244_+tmp244_; double tmp247_ = tmp245_+tmp245_; double tmp248_ = tmp246_+tmp247_; double tmp249_ = (tmp248_)*(tmp114_); double tmp250_ = tmp249_+tmp249_; double tmp251_ = tmp164_*tmp7_; double tmp252_ = tmp251_*tmp3_; double tmp253_ = tmp251_*tmp5_; double tmp254_ = -(tmp2_); double tmp255_ = tmp254_*tmp3_; double tmp256_ = tmp255_*tmp12_; double tmp257_ = tmp254_*tmp5_; double tmp258_ = tmp257_*tmp12_; double tmp259_ = tmp251_*tmp12_; double tmp260_ = tmp255_*tmp9_; double tmp261_ = tmp257_*tmp9_; double tmp262_ = tmp251_*tmp9_; double tmp263_ = tmp256_*mLocGL_0_0; double tmp264_ = tmp258_*mLocGL_1_0; double tmp265_ = tmp263_+tmp264_; double tmp266_ = tmp259_*mLocGL_2_0; double tmp267_ = tmp265_+tmp266_; double tmp268_ = (tmp267_)*(tmp14_); double tmp269_ = tmp256_*mLocGL_0_1; double tmp270_ = tmp258_*mLocGL_1_1; double tmp271_ = tmp269_+tmp270_; double tmp272_ = tmp259_*mLocGL_2_1; double tmp273_ = tmp271_+tmp272_; double tmp274_ = (tmp273_)*(tmp26_); double tmp275_ = tmp268_+tmp274_; double tmp276_ = tmp256_*mLocGL_0_2; double tmp277_ = tmp258_*mLocGL_1_2; double tmp278_ = tmp276_+tmp277_; double tmp279_ = tmp259_*mLocGL_2_2; double tmp280_ = tmp278_+tmp279_; double tmp281_ = (tmp280_)*(tmp28_); double tmp282_ = tmp275_+tmp281_; double tmp283_ = tmp36_*(tmp282_); double tmp284_ = -(tmp283_); double tmp285_ = tmp284_/tmp118_; double tmp286_ = tmp252_*mLocGL_0_0; double tmp287_ = tmp253_*mLocGL_1_0; double tmp288_ = tmp286_+tmp287_; double tmp289_ = tmp2_*mLocGL_2_0; double tmp290_ = tmp288_+tmp289_; double tmp291_ = (tmp290_)*(tmp14_); double tmp292_ = tmp252_*mLocGL_0_1; double tmp293_ = tmp253_*mLocGL_1_1; double tmp294_ = tmp292_+tmp293_; double tmp295_ = tmp2_*mLocGL_2_1; double tmp296_ = tmp294_+tmp295_; double tmp297_ = (tmp296_)*(tmp26_); double tmp298_ = tmp291_+tmp297_; double tmp299_ = tmp252_*mLocGL_0_2; double tmp300_ = tmp253_*mLocGL_1_2; double tmp301_ = tmp299_+tmp300_; double tmp302_ = tmp2_*mLocGL_2_2; double tmp303_ = tmp301_+tmp302_; double tmp304_ = (tmp303_)*(tmp28_); double tmp305_ = tmp298_+tmp304_; double tmp306_ = (tmp305_)*(tmp57_); double tmp307_ = (tmp285_)*(tmp77_); double tmp308_ = tmp306_+tmp307_; double tmp309_ = (tmp308_)*mLocFour7x2_State_0_0; double tmp310_ = (tmp309_)/tmp120_; double tmp311_ = (tmp310_)*(tmp82_); double tmp312_ = tmp260_*mLocGL_0_0; double tmp313_ = tmp261_*mLocGL_1_0; double tmp314_ = tmp312_+tmp313_; double tmp315_ = tmp262_*mLocGL_2_0; double tmp316_ = tmp314_+tmp315_; double tmp317_ = (tmp316_)*(tmp14_); double tmp318_ = tmp260_*mLocGL_0_1; double tmp319_ = tmp261_*mLocGL_1_1; double tmp320_ = tmp318_+tmp319_; double tmp321_ = tmp262_*mLocGL_2_1; double tmp322_ = tmp320_+tmp321_; double tmp323_ = (tmp322_)*(tmp26_); double tmp324_ = tmp317_+tmp323_; double tmp325_ = tmp260_*mLocGL_0_2; double tmp326_ = tmp261_*mLocGL_1_2; double tmp327_ = tmp325_+tmp326_; double tmp328_ = tmp262_*mLocGL_2_2; double tmp329_ = tmp327_+tmp328_; double tmp330_ = (tmp329_)*(tmp28_); double tmp331_ = tmp324_+tmp330_; double tmp332_ = (tmp331_)*(tmp57_); double tmp333_ = (tmp285_)*(tmp102_); double tmp334_ = tmp332_+tmp333_; double tmp335_ = (tmp334_)*mLocFour7x2_State_0_0; double tmp336_ = (tmp335_)/tmp120_; double tmp337_ = (tmp336_)*(tmp107_); double tmp338_ = (tmp310_)*(tmp109_); double tmp339_ = (tmp336_)*(tmp111_); double tmp340_ = tmp338_+tmp338_; double tmp341_ = tmp339_+tmp339_; double tmp342_ = tmp340_+tmp341_; double tmp343_ = (tmp342_)*(tmp114_); double tmp344_ = tmp343_+tmp343_; double tmp345_ = -(tmp12_); double tmp346_ = tmp164_*tmp9_; double tmp347_ = tmp345_*tmp15_; double tmp348_ = tmp346_*tmp17_; double tmp349_ = tmp347_+tmp348_; double tmp350_ = tmp345_*tmp3_; double tmp351_ = tmp346_*tmp21_; double tmp352_ = tmp350_+tmp351_; double tmp353_ = tmp346_*tmp2_; double tmp354_ = tmp346_*tmp15_; double tmp355_ = tmp12_*tmp17_; double tmp356_ = tmp354_+tmp355_; double tmp357_ = tmp346_*tmp3_; double tmp358_ = tmp12_*tmp21_; double tmp359_ = tmp357_+tmp358_; double tmp360_ = tmp12_*tmp2_; double tmp361_ = (tmp349_)*mLocGL_0_0; double tmp362_ = (tmp352_)*mLocGL_1_0; double tmp363_ = tmp361_+tmp362_; double tmp364_ = tmp353_*mLocGL_2_0; double tmp365_ = tmp363_+tmp364_; double tmp366_ = (tmp365_)*(tmp14_); double tmp367_ = (tmp349_)*mLocGL_0_1; double tmp368_ = (tmp352_)*mLocGL_1_1; double tmp369_ = tmp367_+tmp368_; double tmp370_ = tmp353_*mLocGL_2_1; double tmp371_ = tmp369_+tmp370_; double tmp372_ = (tmp371_)*(tmp26_); double tmp373_ = tmp366_+tmp372_; double tmp374_ = (tmp349_)*mLocGL_0_2; double tmp375_ = (tmp352_)*mLocGL_1_2; double tmp376_ = tmp374_+tmp375_; double tmp377_ = tmp353_*mLocGL_2_2; double tmp378_ = tmp376_+tmp377_; double tmp379_ = (tmp378_)*(tmp28_); double tmp380_ = tmp373_+tmp379_; double tmp381_ = tmp36_*(tmp380_); double tmp382_ = -(tmp381_); double tmp383_ = tmp382_/tmp118_; double tmp384_ = (tmp383_)*(tmp77_); double tmp385_ = tmp384_*mLocFour7x2_State_0_0; double tmp386_ = (tmp385_)/tmp120_; double tmp387_ = (tmp386_)*(tmp82_); double tmp388_ = (tmp356_)*mLocGL_0_0; double tmp389_ = (tmp359_)*mLocGL_1_0; double tmp390_ = tmp388_+tmp389_; double tmp391_ = tmp360_*mLocGL_2_0; double tmp392_ = tmp390_+tmp391_; double tmp393_ = (tmp392_)*(tmp14_); double tmp394_ = (tmp356_)*mLocGL_0_1; double tmp395_ = (tmp359_)*mLocGL_1_1; double tmp396_ = tmp394_+tmp395_; double tmp397_ = tmp360_*mLocGL_2_1; double tmp398_ = tmp396_+tmp397_; double tmp399_ = (tmp398_)*(tmp26_); double tmp400_ = tmp393_+tmp399_; double tmp401_ = (tmp356_)*mLocGL_0_2; double tmp402_ = (tmp359_)*mLocGL_1_2; double tmp403_ = tmp401_+tmp402_; double tmp404_ = tmp360_*mLocGL_2_2; double tmp405_ = tmp403_+tmp404_; double tmp406_ = (tmp405_)*(tmp28_); double tmp407_ = tmp400_+tmp406_; double tmp408_ = (tmp407_)*(tmp57_); double tmp409_ = (tmp383_)*(tmp102_); double tmp410_ = tmp408_+tmp409_; double tmp411_ = (tmp410_)*mLocFour7x2_State_0_0; double tmp412_ = (tmp411_)/tmp120_; double tmp413_ = (tmp412_)*(tmp107_); double tmp414_ = (tmp386_)*(tmp109_); double tmp415_ = (tmp412_)*(tmp111_); double tmp416_ = tmp414_+tmp414_; double tmp417_ = tmp415_+tmp415_; double tmp418_ = tmp416_+tmp417_; double tmp419_ = (tmp418_)*(tmp114_); double tmp420_ = tmp419_+tmp419_; double tmp421_ = tmp164_*(tmp41_); double tmp422_ = tmp36_*tmp421_; double tmp423_ = -(tmp422_); double tmp424_ = tmp423_/tmp118_; double tmp425_ = tmp164_*(tmp62_); double tmp426_ = tmp425_*(tmp57_); double tmp427_ = (tmp424_)*(tmp77_); double tmp428_ = tmp426_+tmp427_; double tmp429_ = (tmp428_)*mLocFour7x2_State_0_0; double tmp430_ = (tmp429_)/tmp120_; double tmp431_ = (tmp430_)*(tmp82_); double tmp432_ = tmp164_*(tmp87_); double tmp433_ = tmp432_*(tmp57_); double tmp434_ = (tmp424_)*(tmp102_); double tmp435_ = tmp433_+tmp434_; double tmp436_ = (tmp435_)*mLocFour7x2_State_0_0; double tmp437_ = (tmp436_)/tmp120_; double tmp438_ = (tmp437_)*(tmp107_); double tmp439_ = (tmp430_)*(tmp109_); double tmp440_ = (tmp437_)*(tmp111_); double tmp441_ = tmp439_+tmp439_; double tmp442_ = tmp440_+tmp440_; double tmp443_ = tmp441_+tmp442_; double tmp444_ = (tmp443_)*(tmp114_); double tmp445_ = tmp444_+tmp444_; double tmp446_ = tmp164_*(tmp47_); double tmp447_ = tmp36_*tmp446_; double tmp448_ = -(tmp447_); double tmp449_ = tmp448_/tmp118_; double tmp450_ = tmp164_*(tmp68_); double tmp451_ = tmp450_*(tmp57_); double tmp452_ = (tmp449_)*(tmp77_); double tmp453_ = tmp451_+tmp452_; double tmp454_ = (tmp453_)*mLocFour7x2_State_0_0; double tmp455_ = (tmp454_)/tmp120_; double tmp456_ = (tmp455_)*(tmp82_); double tmp457_ = tmp164_*(tmp93_); double tmp458_ = tmp457_*(tmp57_); double tmp459_ = (tmp449_)*(tmp102_); double tmp460_ = tmp458_+tmp459_; double tmp461_ = (tmp460_)*mLocFour7x2_State_0_0; double tmp462_ = (tmp461_)/tmp120_; double tmp463_ = (tmp462_)*(tmp107_); double tmp464_ = (tmp455_)*(tmp109_); double tmp465_ = (tmp462_)*(tmp111_); double tmp466_ = tmp464_+tmp464_; double tmp467_ = tmp465_+tmp465_; double tmp468_ = tmp466_+tmp467_; double tmp469_ = (tmp468_)*(tmp114_); double tmp470_ = tmp469_+tmp469_; double tmp471_ = tmp164_*(tmp54_); double tmp472_ = tmp36_*tmp471_; double tmp473_ = -(tmp472_); double tmp474_ = tmp473_/tmp118_; double tmp475_ = tmp164_*(tmp75_); double tmp476_ = tmp475_*(tmp57_); double tmp477_ = (tmp474_)*(tmp77_); double tmp478_ = tmp476_+tmp477_; double tmp479_ = (tmp478_)*mLocFour7x2_State_0_0; double tmp480_ = (tmp479_)/tmp120_; double tmp481_ = (tmp480_)*(tmp82_); double tmp482_ = tmp164_*(tmp100_); double tmp483_ = tmp482_*(tmp57_); double tmp484_ = (tmp474_)*(tmp102_); double tmp485_ = tmp483_+tmp484_; double tmp486_ = (tmp485_)*mLocFour7x2_State_0_0; double tmp487_ = (tmp486_)/tmp120_; double tmp488_ = (tmp487_)*(tmp107_); double tmp489_ = (tmp480_)*(tmp109_); double tmp490_ = (tmp487_)*(tmp111_); double tmp491_ = tmp489_+tmp489_; double tmp492_ = tmp490_+tmp490_; double tmp493_ = tmp491_+tmp492_; double tmp494_ = (tmp493_)*(tmp114_); double tmp495_ = tmp494_+tmp494_; double tmp496_ = 1-tmp116_; double tmp497_ = (tmp124_)*(tmp107_); double tmp498_ = (tmp130_)*(tmp82_); double tmp499_ = tmp497_+tmp498_; double tmp500_ = tmp132_+tmp132_; double tmp501_ = tmp131_*2; double tmp502_ = tmp125_+tmp125_; double tmp503_ = mCompCoord[8]; double tmp504_ = (tmp147_)*tmp134_; double tmp505_ = (tmp149_)*tmp136_; double tmp506_ = tmp504_+tmp505_; double tmp507_ = (tmp149_)*(tmp114_); double tmp508_ = (tmp147_)*tmp115_; double tmp509_ = tmp507_+tmp508_; double tmp510_ = (tmp509_)*tmp139_; double tmp511_ = tmp506_+tmp510_; double tmp512_ = (tmp150_)*tmp121_; double tmp513_ = tmp151_+tmp151_; double tmp514_ = (tmp153_)*tmp134_; double tmp515_ = (tmp155_)*tmp136_; double tmp516_ = tmp514_+tmp515_; double tmp517_ = (tmp155_)*(tmp114_); double tmp518_ = (tmp153_)*tmp115_; double tmp519_ = tmp517_+tmp518_; double tmp520_ = (tmp519_)*tmp139_; double tmp521_ = tmp516_+tmp520_; double tmp522_ = tmp156_+tmp156_; double tmp523_ = (tmp150_)*(tmp142_); double tmp524_ = (tmp158_)*tmp134_; double tmp525_ = (tmp160_)*tmp136_; double tmp526_ = tmp524_+tmp525_; double tmp527_ = (tmp160_)*(tmp114_); double tmp528_ = (tmp158_)*tmp115_; double tmp529_ = tmp527_+tmp528_; double tmp530_ = (tmp529_)*tmp139_; double tmp531_ = tmp526_+tmp530_; double tmp532_ = (tmp82_)*mLocFour7x2_State_0_0; double tmp533_ = tmp532_*mLocScNorm; double tmp534_ = tmp162_*mLocFour7x2_State_0_0; double tmp535_ = tmp534_*mLocScNorm; double tmp536_ = (tmp166_)*tmp134_; double tmp537_ = (tmp168_)*tmp136_; double tmp538_ = tmp536_+tmp537_; double tmp539_ = (tmp168_)*(tmp114_); double tmp540_ = (tmp166_)*tmp115_; double tmp541_ = tmp539_+tmp540_; double tmp542_ = (tmp541_)*tmp139_; double tmp543_ = tmp538_+tmp542_; double tmp544_ = tmp164_*(tmp142_); double tmp545_ = (tmp170_)*tmp134_; double tmp546_ = (tmp172_)*tmp136_; double tmp547_ = tmp545_+tmp546_; double tmp548_ = (tmp172_)*(tmp114_); double tmp549_ = (tmp170_)*tmp115_; double tmp550_ = tmp548_+tmp549_; double tmp551_ = (tmp550_)*tmp139_; double tmp552_ = tmp547_+tmp551_; double tmp553_ = (tmp222_)*(tmp107_); double tmp554_ = (tmp242_)*(tmp82_); double tmp555_ = tmp553_+tmp554_; double tmp556_ = tmp243_+tmp243_; double tmp557_ = tmp223_+tmp223_; double tmp558_ = (tmp248_)*tmp134_; double tmp559_ = (tmp250_)*tmp136_; double tmp560_ = tmp558_+tmp559_; double tmp561_ = (tmp250_)*(tmp114_); double tmp562_ = (tmp248_)*tmp115_; double tmp563_ = tmp561_+tmp562_; double tmp564_ = (tmp563_)*tmp139_; double tmp565_ = tmp560_+tmp564_; double tmp566_ = (tmp310_)*(tmp107_); double tmp567_ = (tmp336_)*(tmp82_); double tmp568_ = tmp566_+tmp567_; double tmp569_ = tmp337_+tmp337_; double tmp570_ = tmp311_+tmp311_; double tmp571_ = (tmp342_)*tmp134_; double tmp572_ = (tmp344_)*tmp136_; double tmp573_ = tmp571_+tmp572_; double tmp574_ = (tmp344_)*(tmp114_); double tmp575_ = (tmp342_)*tmp115_; double tmp576_ = tmp574_+tmp575_; double tmp577_ = (tmp576_)*tmp139_; double tmp578_ = tmp573_+tmp577_; double tmp579_ = (tmp386_)*(tmp107_); double tmp580_ = (tmp412_)*(tmp82_); double tmp581_ = tmp579_+tmp580_; double tmp582_ = tmp413_+tmp413_; double tmp583_ = tmp387_+tmp387_; double tmp584_ = (tmp418_)*tmp134_; double tmp585_ = (tmp420_)*tmp136_; double tmp586_ = tmp584_+tmp585_; double tmp587_ = (tmp420_)*(tmp114_); double tmp588_ = (tmp418_)*tmp115_; double tmp589_ = tmp587_+tmp588_; double tmp590_ = (tmp589_)*tmp139_; double tmp591_ = tmp586_+tmp590_; double tmp592_ = (tmp430_)*(tmp107_); double tmp593_ = (tmp437_)*(tmp82_); double tmp594_ = tmp592_+tmp593_; double tmp595_ = tmp438_+tmp438_; double tmp596_ = tmp431_+tmp431_; double tmp597_ = (tmp443_)*tmp134_; double tmp598_ = (tmp445_)*tmp136_; double tmp599_ = tmp597_+tmp598_; double tmp600_ = (tmp445_)*(tmp114_); double tmp601_ = (tmp443_)*tmp115_; double tmp602_ = tmp600_+tmp601_; double tmp603_ = (tmp602_)*tmp139_; double tmp604_ = tmp599_+tmp603_; double tmp605_ = (tmp455_)*(tmp107_); double tmp606_ = (tmp462_)*(tmp82_); double tmp607_ = tmp605_+tmp606_; double tmp608_ = tmp463_+tmp463_; double tmp609_ = tmp456_+tmp456_; double tmp610_ = (tmp468_)*tmp134_; double tmp611_ = (tmp470_)*tmp136_; double tmp612_ = tmp610_+tmp611_; double tmp613_ = (tmp470_)*(tmp114_); double tmp614_ = (tmp468_)*tmp115_; double tmp615_ = tmp613_+tmp614_; double tmp616_ = (tmp615_)*tmp139_; double tmp617_ = tmp612_+tmp616_; double tmp618_ = (tmp480_)*(tmp107_); double tmp619_ = (tmp487_)*(tmp82_); double tmp620_ = tmp618_+tmp619_; double tmp621_ = tmp488_+tmp488_; double tmp622_ = tmp481_+tmp481_; double tmp623_ = (tmp493_)*tmp134_; double tmp624_ = (tmp495_)*tmp136_; double tmp625_ = tmp623_+tmp624_; double tmp626_ = (tmp495_)*(tmp114_); double tmp627_ = (tmp493_)*tmp115_; double tmp628_ = tmp626_+tmp627_; double tmp629_ = (tmp628_)*tmp139_; double tmp630_ = tmp625_+tmp629_; mVal[0] = ((mLocFour7x2_State_1_0+(((tmp117_)*(tmp82_)+tmp121_*(tmp107_))-tmp127_*tmp161_+tmp131_*tmp162_+tmp133_*tmp163_+(tmp109_)*(tmp142_))*mLocFour7x2_State_0_0)-mLocXIm)*mLocScNorm; mCompDer[0][0] = (((tmp124_)*(tmp117_)+(tmp130_)*tmp121_)-(tmp502_)*tmp127_+(tmp499_)*tmp131_+(tmp500_)*tmp133_+(tmp124_)*(tmp142_)+(tmp511_)*(tmp109_))*mLocFour7x2_State_0_0*mLocScNorm; mCompDer[0][1] = ((tmp150_)*(tmp117_)-(tmp513_)*tmp127_+tmp156_*tmp131_+tmp523_+(tmp521_)*(tmp109_))*mLocFour7x2_State_0_0*mLocScNorm; mCompDer[0][2] = (tmp512_+tmp151_*tmp131_+(tmp522_)*tmp133_+(tmp531_)*(tmp109_))*mLocFour7x2_State_0_0*mLocScNorm; mCompDer[0][3] = tmp533_; mCompDer[0][4] = (tmp107_)*mLocFour7x2_State_0_0*mLocScNorm; mCompDer[0][5] = -(2*tmp161_)*mLocFour7x2_State_0_0*mLocScNorm; mCompDer[0][6] = tmp535_; mCompDer[0][7] = tmp163_*mLocFour7x2_State_0_0*mLocScNorm; mCompDer[0][8] = 0; mCompDer[0][9] = (tmp544_+(tmp543_)*(tmp109_))*mLocFour7x2_State_0_0*mLocScNorm; mCompDer[0][10] = (tmp552_)*(tmp109_)*mLocFour7x2_State_0_0*mLocScNorm; mCompDer[0][11] = (tmp114_)*(tmp109_)*mLocFour7x2_State_0_0*mLocScNorm; mCompDer[0][12] = tmp115_*(tmp109_)*mLocFour7x2_State_0_0*mLocScNorm; mCompDer[0][13] = tmp140_*(tmp109_)*mLocFour7x2_State_0_0*mLocScNorm; mCompDer[0][14] = (((tmp222_)*(tmp117_)+(tmp242_)*tmp121_)-(tmp557_)*tmp127_+(tmp555_)*tmp131_+(tmp556_)*tmp133_+(tmp222_)*(tmp142_)+(tmp565_)*(tmp109_))*mLocFour7x2_State_0_0*mLocScNorm; mCompDer[0][15] = (((tmp310_)*(tmp117_)+(tmp336_)*tmp121_)-(tmp570_)*tmp127_+(tmp568_)*tmp131_+(tmp569_)*tmp133_+(tmp310_)*(tmp142_)+(tmp578_)*(tmp109_))*mLocFour7x2_State_0_0*mLocScNorm; mCompDer[0][16] = (((tmp386_)*(tmp117_)+(tmp412_)*tmp121_)-(tmp583_)*tmp127_+(tmp581_)*tmp131_+(tmp582_)*tmp133_+(tmp386_)*(tmp142_)+(tmp591_)*(tmp109_))*mLocFour7x2_State_0_0*mLocScNorm; mCompDer[0][17] = (((tmp430_)*(tmp117_)+(tmp437_)*tmp121_)-(tmp596_)*tmp127_+(tmp594_)*tmp131_+(tmp595_)*tmp133_+(tmp430_)*(tmp142_)+(tmp604_)*(tmp109_))*mLocFour7x2_State_0_0*mLocScNorm; mCompDer[0][18] = (((tmp455_)*(tmp117_)+(tmp462_)*tmp121_)-(tmp609_)*tmp127_+(tmp607_)*tmp131_+(tmp608_)*tmp133_+(tmp455_)*(tmp142_)+(tmp617_)*(tmp109_))*mLocFour7x2_State_0_0*mLocScNorm; mCompDer[0][19] = (((tmp480_)*(tmp117_)+(tmp487_)*tmp121_)-(tmp622_)*tmp127_+(tmp620_)*tmp131_+(tmp621_)*tmp133_+(tmp480_)*(tmp142_)+(tmp630_)*(tmp109_))*mLocFour7x2_State_0_0*mLocScNorm; mVal[1] = ((mLocFour7x2_State_2_0+(((tmp496_)*(tmp107_)+tmp121_*(tmp82_)+tmp126_*tmp162_)-tmp501_*tmp163_+tmp503_*tmp161_+(tmp111_)*(tmp142_))*mLocFour7x2_State_0_0)-mLocYIm)*mLocScNorm; mCompDer[1][0] = (((tmp130_)*(tmp496_)+(tmp124_)*tmp121_+(tmp499_)*tmp126_)-(tmp500_)*tmp501_+(tmp502_)*tmp503_+(tmp130_)*(tmp142_)+(tmp511_)*(tmp111_))*mLocFour7x2_State_0_0*mLocScNorm; mCompDer[1][1] = (tmp512_+tmp156_*tmp126_+(tmp513_)*tmp503_+(tmp521_)*(tmp111_))*mLocFour7x2_State_0_0*mLocScNorm; mCompDer[1][2] = (((tmp150_)*(tmp496_)+tmp151_*tmp126_)-(tmp522_)*tmp501_+tmp523_+(tmp531_)*(tmp111_))*mLocFour7x2_State_0_0*mLocScNorm; mCompDer[1][3] = tmp164_*(tmp107_)*mLocFour7x2_State_0_0*mLocScNorm; mCompDer[1][4] = tmp533_; mCompDer[1][5] = tmp535_; mCompDer[1][6] = -(2*tmp163_)*mLocFour7x2_State_0_0*mLocScNorm; mCompDer[1][7] = 0; mCompDer[1][8] = tmp161_*mLocFour7x2_State_0_0*mLocScNorm; mCompDer[1][9] = (tmp543_)*(tmp111_)*mLocFour7x2_State_0_0*mLocScNorm; mCompDer[1][10] = (tmp544_+(tmp552_)*(tmp111_))*mLocFour7x2_State_0_0*mLocScNorm; mCompDer[1][11] = (tmp114_)*(tmp111_)*mLocFour7x2_State_0_0*mLocScNorm; mCompDer[1][12] = tmp115_*(tmp111_)*mLocFour7x2_State_0_0*mLocScNorm; mCompDer[1][13] = tmp140_*(tmp111_)*mLocFour7x2_State_0_0*mLocScNorm; mCompDer[1][14] = (((tmp242_)*(tmp496_)+(tmp222_)*tmp121_+(tmp555_)*tmp126_)-(tmp556_)*tmp501_+(tmp557_)*tmp503_+(tmp242_)*(tmp142_)+(tmp565_)*(tmp111_))*mLocFour7x2_State_0_0*mLocScNorm; mCompDer[1][15] = (((tmp336_)*(tmp496_)+(tmp310_)*tmp121_+(tmp568_)*tmp126_)-(tmp569_)*tmp501_+(tmp570_)*tmp503_+(tmp336_)*(tmp142_)+(tmp578_)*(tmp111_))*mLocFour7x2_State_0_0*mLocScNorm; mCompDer[1][16] = (((tmp412_)*(tmp496_)+(tmp386_)*tmp121_+(tmp581_)*tmp126_)-(tmp582_)*tmp501_+(tmp583_)*tmp503_+(tmp412_)*(tmp142_)+(tmp591_)*(tmp111_))*mLocFour7x2_State_0_0*mLocScNorm; mCompDer[1][17] = (((tmp437_)*(tmp496_)+(tmp430_)*tmp121_+(tmp594_)*tmp126_)-(tmp595_)*tmp501_+(tmp596_)*tmp503_+(tmp437_)*(tmp142_)+(tmp604_)*(tmp111_))*mLocFour7x2_State_0_0*mLocScNorm; mCompDer[1][18] = (((tmp462_)*(tmp496_)+(tmp455_)*tmp121_+(tmp607_)*tmp126_)-(tmp608_)*tmp501_+(tmp609_)*tmp503_+(tmp462_)*(tmp142_)+(tmp617_)*(tmp111_))*mLocFour7x2_State_0_0*mLocScNorm; mCompDer[1][19] = (((tmp487_)*(tmp496_)+(tmp480_)*tmp121_+(tmp620_)*tmp126_)-(tmp621_)*tmp501_+(tmp622_)*tmp503_+(tmp487_)*(tmp142_)+(tmp630_)*(tmp111_))*mLocFour7x2_State_0_0*mLocScNorm; } void cEqAppui_GL__TerFix_M2CFour7x2::ComputeValDerivHessian() { ELISE_ASSERT(false,"Foncteur cEqAppui_GL__TerFix_M2CFour7x2 Has no Der Sec"); } void cEqAppui_GL__TerFix_M2CFour7x2::SetFour7x2_State_0_0(double aVal){ mLocFour7x2_State_0_0 = aVal;} void cEqAppui_GL__TerFix_M2CFour7x2::SetFour7x2_State_1_0(double aVal){ mLocFour7x2_State_1_0 = aVal;} void cEqAppui_GL__TerFix_M2CFour7x2::SetFour7x2_State_2_0(double aVal){ mLocFour7x2_State_2_0 = aVal;} void cEqAppui_GL__TerFix_M2CFour7x2::SetGL_0_0(double aVal){ mLocGL_0_0 = aVal;} void cEqAppui_GL__TerFix_M2CFour7x2::SetGL_0_1(double aVal){ mLocGL_0_1 = aVal;} void cEqAppui_GL__TerFix_M2CFour7x2::SetGL_0_2(double aVal){ mLocGL_0_2 = aVal;} void cEqAppui_GL__TerFix_M2CFour7x2::SetGL_1_0(double aVal){ mLocGL_1_0 = aVal;} void cEqAppui_GL__TerFix_M2CFour7x2::SetGL_1_1(double aVal){ mLocGL_1_1 = aVal;} void cEqAppui_GL__TerFix_M2CFour7x2::SetGL_1_2(double aVal){ mLocGL_1_2 = aVal;} void cEqAppui_GL__TerFix_M2CFour7x2::SetGL_2_0(double aVal){ mLocGL_2_0 = aVal;} void cEqAppui_GL__TerFix_M2CFour7x2::SetGL_2_1(double aVal){ mLocGL_2_1 = aVal;} void cEqAppui_GL__TerFix_M2CFour7x2::SetGL_2_2(double aVal){ mLocGL_2_2 = aVal;} void cEqAppui_GL__TerFix_M2CFour7x2::SetScNorm(double aVal){ mLocScNorm = aVal;} void cEqAppui_GL__TerFix_M2CFour7x2::SetXIm(double aVal){ mLocXIm = aVal;} void cEqAppui_GL__TerFix_M2CFour7x2::SetXTer(double aVal){ mLocXTer = aVal;} void cEqAppui_GL__TerFix_M2CFour7x2::SetYIm(double aVal){ mLocYIm = aVal;} void cEqAppui_GL__TerFix_M2CFour7x2::SetYTer(double aVal){ mLocYTer = aVal;} void cEqAppui_GL__TerFix_M2CFour7x2::SetZTer(double aVal){ mLocZTer = aVal;} double * cEqAppui_GL__TerFix_M2CFour7x2::AdrVarLocFromString(const std::string & aName) { if (aName == "Four7x2_State_0_0") return & mLocFour7x2_State_0_0; if (aName == "Four7x2_State_1_0") return & mLocFour7x2_State_1_0; if (aName == "Four7x2_State_2_0") return & mLocFour7x2_State_2_0; if (aName == "GL_0_0") return & mLocGL_0_0; if (aName == "GL_0_1") return & mLocGL_0_1; if (aName == "GL_0_2") return & mLocGL_0_2; if (aName == "GL_1_0") return & mLocGL_1_0; if (aName == "GL_1_1") return & mLocGL_1_1; if (aName == "GL_1_2") return & mLocGL_1_2; if (aName == "GL_2_0") return & mLocGL_2_0; if (aName == "GL_2_1") return & mLocGL_2_1; if (aName == "GL_2_2") return & mLocGL_2_2; if (aName == "ScNorm") return & mLocScNorm; if (aName == "XIm") return & mLocXIm; if (aName == "XTer") return & mLocXTer; if (aName == "YIm") return & mLocYIm; if (aName == "YTer") return & mLocYTer; if (aName == "ZTer") return & mLocZTer; return 0; } cElCompiledFonc::cAutoAddEntry cEqAppui_GL__TerFix_M2CFour7x2::mTheAuto("cEqAppui_GL__TerFix_M2CFour7x2",cEqAppui_GL__TerFix_M2CFour7x2::Alloc); cElCompiledFonc * cEqAppui_GL__TerFix_M2CFour7x2::Alloc() { return new cEqAppui_GL__TerFix_M2CFour7x2(); }
17,667
324
<filename>apis/ec2/src/main/java/org/jclouds/ec2/xml/DescribeAddressesResponseHandler.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jclouds.ec2.xml; import static org.jclouds.util.SaxUtils.equalsOrSuffix; import java.util.Map; import java.util.Set; import javax.annotation.Resource; import javax.inject.Inject; import org.jclouds.aws.util.AWSUtils; import org.jclouds.ec2.domain.PublicIpInstanceIdPair; import org.jclouds.http.functions.ParseSax.HandlerForGeneratedRequestWithResult; import org.jclouds.location.Region; import org.jclouds.logging.Logger; import org.xml.sax.Attributes; import com.google.common.base.Supplier; import com.google.common.collect.Sets; public class DescribeAddressesResponseHandler extends HandlerForGeneratedRequestWithResult<Set<PublicIpInstanceIdPair>> { @Resource protected Logger logger = Logger.NULL; private Set<PublicIpInstanceIdPair> pairs = Sets.newLinkedHashSet(); private String ipAddress; private StringBuilder currentText = new StringBuilder(); @Inject @Region Supplier<String> defaultRegion; private String instanceId; private final TagSetHandler tagSetHandler; private boolean inTagSet; private Map<String, String> tagResults; @Inject DescribeAddressesResponseHandler(final TagSetHandler tagSetHandler) { this.tagSetHandler = tagSetHandler; } @Override public void startElement(final String uri, final String name, final String qName, final Attributes attrs) { if (equalsOrSuffix(qName, "tagSet")) { inTagSet = true; } if (inTagSet) { tagSetHandler.startElement(uri, name, qName, attrs); } } protected String currentOrNull() { String returnVal = currentText.toString().trim(); return returnVal.equals("") ? null : returnVal; } @Override public void endElement(final String uri, final String name, final String qName) { if (equalsOrSuffix(qName, "tagSet")) { inTagSet = false; tagResults = tagSetHandler.getResult(); } else if (inTagSet) { tagSetHandler.endElement(uri, name, qName); } else if (qName.equals("publicIp")) { ipAddress = currentOrNull(); } else if (qName.equals("instanceId")) { instanceId = currentOrNull(); } else if (qName.equals("item")) { String region = AWSUtils.findRegionInArgsOrNull(getRequest()); if (region == null) region = defaultRegion.get(); pairs.add(new PublicIpInstanceIdPair(region, ipAddress, instanceId, tagResults)); ipAddress = null; instanceId = null; tagResults = null; } currentText.setLength(0); } @Override public void characters(final char[] ch, final int start, final int length) { if (inTagSet) { tagSetHandler.characters(ch, start, length); } else { currentText.append(ch, start, length); } } @Override public Set<PublicIpInstanceIdPair> getResult() { return pairs; } }
1,331
621
from lazydata.cli.commands.BaseCommand import BaseCommand from lazydata.config.config import Config from lazydata.storage.local import LocalStorage from lazydata.storage.remote import RemoteStorage class PushCommand(BaseCommand): def handle(self, args): config = Config() if "remote" in config.config: remote = RemoteStorage.get_from_config(config) local = LocalStorage() remote.upload(local,config) else: print("ERROR: Remote not specified for this lazydata project. Use `lazydata add-remote` to add it.")
212
1,738
<filename>dev/Gems/LmbrCentral/Code/Source/Rendering/FogVolumeComponent.cpp /* * All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or * its licensors. * * For complete copyright and license terms please see the LICENSE at the root of this * distribution (the "License"). All use of this software is governed by the License, * or, if provided, by the license below or the license accompanying this file. Do not * remove or modify any license notices. This file is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * */ #include "LmbrCentral_precompiled.h" #include <AzCore/Serialization/SerializeContext.h> #include <AzCore/RTTI/BehaviorContext.h> #include <IEntityRenderState.h> #include "FogVolumeComponent.h" namespace LmbrCentral { const float FogVolumeComponent::s_renderNodeRequestBusOrder = 500.f; void FogVolumeComponent::Activate() { const auto entityId = GetEntityId(); m_configuration.SetEntityId(entityId); m_configuration.UpdateSizeFromEntityShape(); m_fogVolume.SetEntityId(entityId); m_fogVolume.CreateFogVolumeRenderNode(m_configuration); RefreshFog(); RenderNodeRequestBus::Handler::BusConnect(entityId); FogVolumeComponentRequestBus::Handler::BusConnect(entityId); ShapeComponentNotificationsBus::Handler::BusConnect(entityId); AZ::TransformNotificationBus::Handler::BusConnect(entityId); } void FogVolumeComponent::Deactivate() { m_fogVolume.DestroyRenderNode(); m_fogVolume.SetEntityId(AZ::EntityId()); m_configuration.SetEntityId(AZ::EntityId()); RenderNodeRequestBus::Handler::BusDisconnect(); FogVolumeComponentRequestBus::Handler::BusDisconnect(); ShapeComponentNotificationsBus::Handler::BusDisconnect(); AZ::TransformNotificationBus::Handler::BusDisconnect(); } void FogVolumeComponent::Reflect(AZ::ReflectContext* context) { FogVolumeConfiguration::Reflect(context); if (AZ::SerializeContext* serializeContext = azrtti_cast<AZ::SerializeContext*>(context)) { serializeContext->Class<FogVolumeComponent, AZ::Component>() ->Version(1) ->Field("FogVolumeConfiguration", &FogVolumeComponent::m_configuration); } if (AZ::BehaviorContext* behaviorContext = azrtti_cast<AZ::BehaviorContext*>(context)) { behaviorContext->Class<FogVolumeComponent>()->RequestBus("FogVolumeComponentRequestBus"); FogVolumeComponent::ExposeRequestsBusInBehaviorContext(behaviorContext, "FogVolumeComponentRequestBus"); } } IRenderNode* FogVolumeComponent::GetRenderNode() { return m_fogVolume.GetRenderNode(); } float FogVolumeComponent::GetRenderNodeRequestBusOrder() const { return s_renderNodeRequestBusOrder; } void FogVolumeComponent::RefreshFog() { m_fogVolume.UpdateFogVolumeProperties(m_configuration); m_fogVolume.UpdateRenderingFlags(m_configuration); m_fogVolume.UpdateFogVolumeTransform(); } void FogVolumeComponent::OnTransformChanged(const AZ::Transform & local, const AZ::Transform & world) { RefreshFog(); } void FogVolumeComponent::OnShapeChanged(ShapeComponentNotifications::ShapeChangeReasons changeReason) { if (changeReason == ShapeComponentNotifications::ShapeChangeReasons::ShapeChanged) { m_configuration.UpdateSizeFromEntityShape(); RefreshFog(); } } void FogVolumeComponent::GetRequiredServices(AZ::ComponentDescriptor::DependencyArrayType & required) { required.push_back(AZ_CRC("BoxShapeService", 0x946a0032)); } void FogVolumeComponent::ExposeRequestsBusInBehaviorContext(AZ::BehaviorContext* behaviorContext, const char* name) { behaviorContext->EBus<FogVolumeComponentRequestBus>(name) ->Attribute(AZ::Script::Attributes::ExcludeFrom, AZ::Script::Attributes::All) ->Event("RefreshFog", &FogVolumeComponentRequestBus::Events::RefreshFog) ->Event("GetVolumeType", &FogVolumeComponentRequestBus::Events::GetVolumeType) ->Event("SetVolumeType", &FogVolumeComponentRequestBus::Events::SetVolumeType) ->VirtualProperty("VolumeType", "GetVolumeType", "SetVolumeType") ->Event("GetColor", &FogVolumeComponentRequestBus::Events::GetColor) ->Event("SetColor", &FogVolumeComponentRequestBus::Events::SetColor) ->VirtualProperty("Color", "GetColor", "SetColor") ->Event("GetHdrDynamic", &FogVolumeComponentRequestBus::Events::GetHdrDynamic) ->Event("SetHdrDynamic", &FogVolumeComponentRequestBus::Events::SetHdrDynamic) ->VirtualProperty("HdrDynamic", "GetHdrDynamic", "SetHdrDynamic") ->Event("GetUseGlobalFogColor", &FogVolumeComponentRequestBus::Events::GetUseGlobalFogColor) ->Event("SetUseGlobalFogColor", &FogVolumeComponentRequestBus::Events::SetUseGlobalFogColor) ->VirtualProperty("UseGlobalFogColor", "GetUseGlobalFogColor", "SetUseGlobalFogColor") ->Event("GetGlobalDensity", &FogVolumeComponentRequestBus::Events::GetGlobalDensity) ->Event("SetGlobalDensity", &FogVolumeComponentRequestBus::Events::SetGlobalDensity) ->VirtualProperty("GlobalDensity", "GetGlobalDensity", "SetGlobalDensity") ->Event("GetDensityOffset", &FogVolumeComponentRequestBus::Events::GetDensityOffset) ->Event("SetDensityOffset", &FogVolumeComponentRequestBus::Events::SetDensityOffset) ->VirtualProperty("DensityOffset", "GetDensityOffset", "SetDensityOffset") ->Event("GetNearCutoff", &FogVolumeComponentRequestBus::Events::GetNearCutoff) ->Event("SetNearCutoff", &FogVolumeComponentRequestBus::Events::SetNearCutoff) ->VirtualProperty("NearCutoff", "GetNearCutoff", "SetNearCutoff") ->Event("GetFallOffDirLong", &FogVolumeComponentRequestBus::Events::GetFallOffDirLong) ->Event("SetFallOffDirLong", &FogVolumeComponentRequestBus::Events::SetFallOffDirLong) ->VirtualProperty("FallOffDirLong", "GetFallOffDirLong", "SetFallOffDirLong") ->Event("GetFallOffDirLatitude", &FogVolumeComponentRequestBus::Events::GetFallOffDirLatitude) ->Event("SetFallOffDirLatitude", &FogVolumeComponentRequestBus::Events::SetFallOffDirLatitude) ->VirtualProperty("FallOffDirLatitude", "GetFallOffDirLatitude", "SetFallOffDirLatitude") ->Event("GetFallOffShift", &FogVolumeComponentRequestBus::Events::GetFallOffShift) ->Event("SetFallOffShift", &FogVolumeComponentRequestBus::Events::SetFallOffShift) ->VirtualProperty("FallOffShift", "GetFallOffShift", "SetFallOffShift") ->Event("GetFallOffScale", &FogVolumeComponentRequestBus::Events::GetFallOffScale) ->Event("SetFallOffScale", &FogVolumeComponentRequestBus::Events::SetFallOffScale) ->VirtualProperty("FallOffScale", "GetFallOffScale", "SetFallOffScale") ->Event("GetSoftEdges", &FogVolumeComponentRequestBus::Events::GetSoftEdges) ->Event("SetSoftEdges", &FogVolumeComponentRequestBus::Events::SetSoftEdges) ->VirtualProperty("SoftEdges", "GetSoftEdges", "SetSoftEdges") ->Event("GetRampStart", &FogVolumeComponentRequestBus::Events::GetRampStart) ->Event("SetRampStart", &FogVolumeComponentRequestBus::Events::SetRampStart) ->VirtualProperty("RampStart", "GetRampStart", "SetRampStart") ->Event("GetRampEnd", &FogVolumeComponentRequestBus::Events::GetRampEnd) ->Event("SetRampEnd", &FogVolumeComponentRequestBus::Events::SetRampEnd) ->VirtualProperty("RampEnd", "GetRampEnd", "SetRampEnd") ->Event("GetRampInfluence", &FogVolumeComponentRequestBus::Events::GetRampInfluence) ->Event("SetRampInfluence", &FogVolumeComponentRequestBus::Events::SetRampInfluence) ->VirtualProperty("RampInfluence", "GetRampInfluence", "SetRampInfluence") ->Event("GetWindInfluence", &FogVolumeComponentRequestBus::Events::GetWindInfluence) ->Event("SetWindInfluence", &FogVolumeComponentRequestBus::Events::SetWindInfluence) ->VirtualProperty("WindInfluence", "GetWindInfluence", "SetWindInfluence") ->Event("GetDensityNoiseScale", &FogVolumeComponentRequestBus::Events::GetDensityNoiseScale) ->Event("SetDensityNoiseScale", &FogVolumeComponentRequestBus::Events::SetDensityNoiseScale) ->VirtualProperty("DensityNoiseScale", "GetDensityNoiseScale", "SetDensityNoiseScale") ->Event("GetDensityNoiseOffset", &FogVolumeComponentRequestBus::Events::GetDensityNoiseOffset) ->Event("SetDensityNoiseOffset", &FogVolumeComponentRequestBus::Events::SetDensityNoiseOffset) ->VirtualProperty("DensityNoiseOffset", "GetDensityNoiseOffset", "SetDensityNoiseOffset") ->Event("GetDensityNoiseTimeFrequency", &FogVolumeComponentRequestBus::Events::GetDensityNoiseTimeFrequency) ->Event("SetDensityNoiseTimeFrequency", &FogVolumeComponentRequestBus::Events::SetDensityNoiseTimeFrequency) ->VirtualProperty("DensityNoiseTimeFrequency", "GetDensityNoiseTimeFrequency", "SetDensityNoiseTimeFrequency") ->Event("GetDensityNoiseFrequency", &FogVolumeComponentRequestBus::Events::GetDensityNoiseFrequency) ->Event("SetDensityNoiseFrequency", &FogVolumeComponentRequestBus::Events::SetDensityNoiseFrequency) ->VirtualProperty("DensityNoiseFrequency", "GetDensityNoiseFrequency", "SetDensityNoiseFrequency") ->Event("GetIgnoresVisAreas", &FogVolumeComponentRequestBus::Events::GetIgnoresVisAreas) ->Event("SetIgnoresVisAreas", &FogVolumeComponentRequestBus::Events::SetIgnoresVisAreas) ->VirtualProperty("IgnoresVisAreas", "GetIgnoresVisAreas", "SetIgnoresVisAreas") ->Event("GetAffectsThisAreaOnly", &FogVolumeComponentRequestBus::Events::GetAffectsThisAreaOnly) ->Event("SetAffectsThisAreaOnly", &FogVolumeComponentRequestBus::Events::SetAffectsThisAreaOnly) ->VirtualProperty("AffectsThisAreaOnly", "GetAffectsThisAreaOnly", "SetAffectsThisAreaOnly") ; } }
3,908
9,516
<filename>src/graph/subgraph.cc<gh_stars>1000+ /*! * Copyright (c) 2020 by Contributors * \file graph/subgraph.cc * \brief Functions for extracting subgraphs. */ #include "./heterograph.h" using namespace dgl::runtime; namespace dgl { HeteroSubgraph InEdgeGraphRelabelNodes( const HeteroGraphPtr graph, const std::vector<IdArray>& vids) { CHECK_EQ(vids.size(), graph->NumVertexTypes()) << "Invalid input: the input list size must be the same as the number of vertex types."; std::vector<IdArray> eids(graph->NumEdgeTypes()); for (dgl_type_t etype = 0; etype < graph->NumEdgeTypes(); ++etype) { auto pair = graph->meta_graph()->FindEdge(etype); const dgl_type_t dst_vtype = pair.second; if (aten::IsNullArray(vids[dst_vtype])) { eids[etype] = IdArray::Empty({0}, graph->DataType(), graph->Context()); } else { const auto& earr = graph->InEdges(etype, {vids[dst_vtype]}); eids[etype] = earr.id; } } return graph->EdgeSubgraph(eids, false); } HeteroSubgraph InEdgeGraphNoRelabelNodes( const HeteroGraphPtr graph, const std::vector<IdArray>& vids) { // TODO(mufei): This should also use EdgeSubgraph once it is supported for CSR graphs CHECK_EQ(vids.size(), graph->NumVertexTypes()) << "Invalid input: the input list size must be the same as the number of vertex types."; std::vector<HeteroGraphPtr> subrels(graph->NumEdgeTypes()); std::vector<IdArray> induced_edges(graph->NumEdgeTypes()); for (dgl_type_t etype = 0; etype < graph->NumEdgeTypes(); ++etype) { auto pair = graph->meta_graph()->FindEdge(etype); const dgl_type_t src_vtype = pair.first; const dgl_type_t dst_vtype = pair.second; auto relgraph = graph->GetRelationGraph(etype); if (aten::IsNullArray(vids[dst_vtype])) { // create a placeholder graph subrels[etype] = UnitGraph::Empty( relgraph->NumVertexTypes(), graph->NumVertices(src_vtype), graph->NumVertices(dst_vtype), graph->DataType(), graph->Context()); induced_edges[etype] = IdArray::Empty({0}, graph->DataType(), graph->Context()); } else { const auto& earr = graph->InEdges(etype, {vids[dst_vtype]}); subrels[etype] = UnitGraph::CreateFromCOO( relgraph->NumVertexTypes(), graph->NumVertices(src_vtype), graph->NumVertices(dst_vtype), earr.src, earr.dst); induced_edges[etype] = earr.id; } } HeteroSubgraph ret; ret.graph = CreateHeteroGraph(graph->meta_graph(), subrels, graph->NumVerticesPerType()); ret.induced_edges = std::move(induced_edges); return ret; } HeteroSubgraph InEdgeGraph( const HeteroGraphPtr graph, const std::vector<IdArray>& vids, bool relabel_nodes) { if (relabel_nodes) { return InEdgeGraphRelabelNodes(graph, vids); } else { return InEdgeGraphNoRelabelNodes(graph, vids); } } HeteroSubgraph OutEdgeGraphRelabelNodes( const HeteroGraphPtr graph, const std::vector<IdArray>& vids) { CHECK_EQ(vids.size(), graph->NumVertexTypes()) << "Invalid input: the input list size must be the same as the number of vertex types."; std::vector<IdArray> eids(graph->NumEdgeTypes()); for (dgl_type_t etype = 0; etype < graph->NumEdgeTypes(); ++etype) { auto pair = graph->meta_graph()->FindEdge(etype); const dgl_type_t src_vtype = pair.first; if (aten::IsNullArray(vids[src_vtype])) { eids[etype] = IdArray::Empty({0}, graph->DataType(), graph->Context()); } else { const auto& earr = graph->OutEdges(etype, {vids[src_vtype]}); eids[etype] = earr.id; } } return graph->EdgeSubgraph(eids, false); } HeteroSubgraph OutEdgeGraphNoRelabelNodes( const HeteroGraphPtr graph, const std::vector<IdArray>& vids) { // TODO(mufei): This should also use EdgeSubgraph once it is supported for CSR graphs CHECK_EQ(vids.size(), graph->NumVertexTypes()) << "Invalid input: the input list size must be the same as the number of vertex types."; std::vector<HeteroGraphPtr> subrels(graph->NumEdgeTypes()); std::vector<IdArray> induced_edges(graph->NumEdgeTypes()); for (dgl_type_t etype = 0; etype < graph->NumEdgeTypes(); ++etype) { auto pair = graph->meta_graph()->FindEdge(etype); const dgl_type_t src_vtype = pair.first; const dgl_type_t dst_vtype = pair.second; auto relgraph = graph->GetRelationGraph(etype); if (aten::IsNullArray(vids[src_vtype])) { // create a placeholder graph subrels[etype] = UnitGraph::Empty( relgraph->NumVertexTypes(), graph->NumVertices(src_vtype), graph->NumVertices(dst_vtype), graph->DataType(), graph->Context()); induced_edges[etype] = IdArray::Empty({0}, graph->DataType(), graph->Context()); } else { const auto& earr = graph->OutEdges(etype, {vids[src_vtype]}); subrels[etype] = UnitGraph::CreateFromCOO( relgraph->NumVertexTypes(), graph->NumVertices(src_vtype), graph->NumVertices(dst_vtype), earr.src, earr.dst); induced_edges[etype] = earr.id; } } HeteroSubgraph ret; ret.graph = CreateHeteroGraph(graph->meta_graph(), subrels, graph->NumVerticesPerType()); ret.induced_edges = std::move(induced_edges); return ret; } HeteroSubgraph OutEdgeGraph( const HeteroGraphPtr graph, const std::vector<IdArray>& vids, bool relabel_nodes) { if (relabel_nodes) { return OutEdgeGraphRelabelNodes(graph, vids); } else { return OutEdgeGraphNoRelabelNodes(graph, vids); } } } // namespace dgl
2,210
1,666
//----------------------------------------------------------------------------// // // // ozz-animation is hosted at http://github.com/guillaumeblanc/ozz-animation // // and distributed under the MIT License (MIT). // // // // Copyright (c) <NAME> // // // // Permission is hereby granted, free of charge, to any person obtaining a // // copy of this software and associated documentation files (the "Software"), // // to deal in the Software without restriction, including without limitation // // the rights to use, copy, modify, merge, publish, distribute, sublicense, // // and/or sell copies of the Software, and to permit persons to whom the // // Software is furnished to do so, subject to the following conditions: // // // // The above copyright notice and this permission notice shall be included in // // all copies or substantial portions of the Software. // // // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL // // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // // DEALINGS IN THE SOFTWARE. // // // //----------------------------------------------------------------------------// #include "ozz/base/maths/box.h" #include <limits> #include "ozz/base/maths/math_ex.h" #include "ozz/base/maths/simd_math.h" namespace ozz { namespace math { Box::Box() : min(std::numeric_limits<float>::max()), max(-std::numeric_limits<float>::max()) {} Box::Box(const Float3* _points, size_t _stride, size_t _count) { assert(_stride >= sizeof(Float3) && "_stride must be greater or equal to sizeof(Float3)"); Float3 local_min(std::numeric_limits<float>::max()); Float3 local_max(-std::numeric_limits<float>::max()); const Float3* end = PointerStride(_points, _stride * _count); for (; _points < end; _points = PointerStride(_points, _stride)) { local_min = Min(local_min, *_points); local_max = Max(local_max, *_points); } min = local_min; max = local_max; } Box TransformBox(const Float4x4& _matrix, const Box& _box) { const SimdFloat4 min = simd_float4::Load3PtrU(&_box.min.x); const SimdFloat4 max = simd_float4::Load3PtrU(&_box.max.x); // Transforms min and max. const SimdFloat4 ta = TransformPoint(_matrix, min); const SimdFloat4 tb = TransformPoint(_matrix, max); // Finds new min and max and store them in box. Box tbox; math::Store3PtrU(Min(ta, tb), &tbox.min.x); math::Store3PtrU(Max(ta, tb), &tbox.max.x); return tbox; } } // namespace math } // namespace ozz
1,531
2,056
/* gdbm.h - The include file for dbm users. -*- c -*- */ /* This file is part of GDBM, the GNU data base manager, by <NAME>. Copyright (C) 1990, 1991, 1993, 2011 Free Software Foundation, Inc. GDBM is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GDBM is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GDBM. If not, see <http://www.gnu.org/licenses/>. You may contact the author by: e-mail: <EMAIL> us-mail: <NAME> Computer Science Department Western Washington University Bellingham, WA 98226 *************************************************************************/ /* Protection for multiple includes. */ #ifndef _GDBM_H_ # define _GDBM_H_ # include <stdio.h> /* GDBM C++ support */ # if defined(__cplusplus) || defined(c_plusplus) extern "C" { # endif /* Parameters to gdbm_open for READERS, WRITERS, and WRITERS who can create the database. */ # define GDBM_READER 0 /* A reader. */ # define GDBM_WRITER 1 /* A writer. */ # define GDBM_WRCREAT 2 /* A writer. Create the db if needed. */ # define GDBM_NEWDB 3 /* A writer. Always create a new db. */ # define GDBM_OPENMASK 7 /* Mask for the above. */ # define GDBM_FAST 0x010 /* Write fast! => No fsyncs. OBSOLETE. */ # define GDBM_SYNC 0x020 /* Sync operations to the disk. */ # define GDBM_NOLOCK 0x040 /* Don't do file locking operations. */ # define GDBM_NOMMAP 0x080 /* Don't use mmap(). */ # define GDBM_CLOEXEC 0x100 /* Close the underlying fd on exec(3) */ /* Parameters to gdbm_store for simple insertion or replacement in the case that the key is already in the database. */ # define GDBM_INSERT 0 /* Never replace old data with new. */ # define GDBM_REPLACE 1 /* Always replace old data with new. */ /* Parameters to gdbm_setopt, specifing the type of operation to perform. */ # define GDBM_SETCACHESIZE 1 /* Set the cache size. */ # define GDBM_FASTMODE 2 /* Toggle fast mode. OBSOLETE. */ # define GDBM_SETSYNCMODE 3 /* Turn on or off sync operations. */ # define GDBM_SETCENTFREE 4 /* Keep all free blocks in the header. */ # define GDBM_SETCOALESCEBLKS 5 /* Attempt to coalesce free blocks. */ # define GDBM_SETMAXMAPSIZE 6 /* Set maximum mapped memory size */ # define GDBM_SETMMAP 7 /* Toggle mmap mode */ /* Compatibility defines: */ # define GDBM_CACHESIZE GDBM_SETCACHESIZE # define GDBM_SYNCMODE GDBM_SETSYNCMODE # define GDBM_CENTFREE GDBM_SETCENTFREE # define GDBM_COALESCEBLKS GDBM_SETCOALESCEBLKS # define GDBM_GETFLAGS 8 /* Get gdbm_open flags */ # define GDBM_GETMMAP 9 /* Get mmap status */ # define GDBM_GETCACHESIZE 10 /* Get current cache side */ # define GDBM_GETSYNCMODE 11 /* Get synch mode */ # define GDBM_GETCENTFREE 12 /* Get "centfree" status */ # define GDBM_GETCOALESCEBLKS 13 /* Get free block coalesce status */ # define GDBM_GETMAXMAPSIZE 14 /* Get maximum mapped memory size */ # define GDBM_GETDBNAME 15 /* Return database file name */ typedef unsigned long long int gdbm_count_t; /* The data and key structure. */ typedef struct { char *dptr; int dsize; } datum; /* A pointer to the GDBM file. */ typedef struct gdbm_file_info *GDBM_FILE; /* External variable, the gdbm build release string. */ extern const char *gdbm_version; # define GDBM_VERSION_MAJOR 1 # define GDBM_VERSION_MINOR 11 # define GDBM_VERSION_PATCH 0 extern int const gdbm_version_number[3]; /* GDBM external functions. */ extern GDBM_FILE gdbm_open (const char *, int, int, int, void (*)(const char *)); extern void gdbm_close (GDBM_FILE); extern int gdbm_store (GDBM_FILE, datum, datum, int); extern datum gdbm_fetch (GDBM_FILE, datum); extern int gdbm_delete (GDBM_FILE, datum); extern datum gdbm_firstkey (GDBM_FILE); extern datum gdbm_nextkey (GDBM_FILE, datum); extern int gdbm_reorganize (GDBM_FILE); extern void gdbm_sync (GDBM_FILE); extern int gdbm_exists (GDBM_FILE, datum); extern int gdbm_setopt (GDBM_FILE, int, void *, int); extern int gdbm_fdesc (GDBM_FILE); extern int gdbm_export (GDBM_FILE, const char *, int, int); extern int gdbm_export_to_file (GDBM_FILE dbf, FILE *fp); extern int gdbm_import (GDBM_FILE, const char *, int); extern int gdbm_import_from_file (GDBM_FILE dbf, FILE *fp, int flag); extern int gdbm_count (GDBM_FILE dbf, gdbm_count_t *pcount); #define GDBM_DUMP_FMT_BINARY 0 #define GDBM_DUMP_FMT_ASCII 1 #define GDBM_META_MASK_MODE 0x01 #define GDBM_META_MASK_OWNER 0x02 extern int gdbm_dump (GDBM_FILE, const char *, int fmt, int open_flags, int mode); extern int gdbm_dump_to_file (GDBM_FILE, FILE *, int fmt); extern int gdbm_load (GDBM_FILE *, const char *, int replace, int meta_flags, unsigned long *line); extern int gdbm_load_from_file (GDBM_FILE *, FILE *, int replace, int meta_flags, unsigned long *line); # define GDBM_NO_ERROR 0 # define GDBM_MALLOC_ERROR 1 # define GDBM_BLOCK_SIZE_ERROR 2 # define GDBM_FILE_OPEN_ERROR 3 # define GDBM_FILE_WRITE_ERROR 4 # define GDBM_FILE_SEEK_ERROR 5 # define GDBM_FILE_READ_ERROR 6 # define GDBM_BAD_MAGIC_NUMBER 7 # define GDBM_EMPTY_DATABASE 8 # define GDBM_CANT_BE_READER 9 # define GDBM_CANT_BE_WRITER 10 # define GDBM_READER_CANT_DELETE 11 # define GDBM_READER_CANT_STORE 12 # define GDBM_READER_CANT_REORGANIZE 13 # define GDBM_UNKNOWN_UPDATE 14 # define GDBM_ITEM_NOT_FOUND 15 # define GDBM_REORGANIZE_FAILED 16 # define GDBM_CANNOT_REPLACE 17 # define GDBM_ILLEGAL_DATA 18 # define GDBM_OPT_ALREADY_SET 19 # define GDBM_OPT_ILLEGAL 20 # define GDBM_BYTE_SWAPPED 21 # define GDBM_BAD_FILE_OFFSET 22 # define GDBM_BAD_OPEN_FLAGS 23 # define GDBM_FILE_STAT_ERROR 24 # define GDBM_FILE_EOF 25 # define GDBM_NO_DBNAME 26 # define GDBM_ERR_FILE_OWNER 27 # define GDBM_ERR_FILE_MODE 28 # define _GDBM_MIN_ERRNO 0 # define _GDBM_MAX_ERRNO GDBM_ERR_FILE_MODE typedef int gdbm_error; /* For compatibilities sake. */ extern gdbm_error gdbm_errno; extern const char * const gdbm_errlist[]; /* extra prototypes */ extern const char *gdbm_strerror (gdbm_error); extern int gdbm_version_cmp (int const a[], int const b[]); # if defined(__cplusplus) || defined(c_plusplus) } # endif #endif
2,741
372
/* * Copyright © BeyondTrust Software 2004 - 2019 * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * BEYONDTRUST MAKES THIS SOFTWARE AVAILABLE UNDER OTHER LICENSING TERMS AS * WELL. IF YOU HAVE ENTERED INTO A SEPARATE LICENSE AGREEMENT WITH * BEYONDTRUST, THEN YOU MAY ELECT TO USE THE SOFTWARE UNDER THE TERMS OF THAT * SOFTWARE LICENSE AGREEMENT INSTEAD OF THE TERMS OF THE APACHE LICENSE, * NOTWITHSTANDING THE ABOVE NOTICE. IF YOU HAVE QUESTIONS, OR WISH TO REQUEST * A COPY OF THE ALTERNATE LICENSING TERMS OFFERED BY BEYONDTRUST, PLEASE CONTACT * BEYONDTRUST AT beyondtrust.com/contact */ /* * Module Name: * * threadpool-poll.c * * Abstract: * * Thread pool API (poll backend) * * Authors: <NAME> (<EMAIL>) * */ #include "includes.h" #include "threadpool-poll.h" /* Maximum number of ticks (task function invocations) to process each iteration of the event loop */ #define MAX_TICKS 1000 static VOID TaskDelete( PPOLL_TASK pTask ) { RTL_FREE(&pTask->pUnixSignal); RtlMemoryFree(pTask); } /* * Wakes up a thread. Call with the thread lock held */ static VOID SignalThread( PPOLL_THREAD pThread ) { char c = 0; if (!pThread->bSignalled) { int res = write(pThread->SignalFds[1], &c, sizeof(c)); if (res != sizeof(c)) assert(res == sizeof(c)); pThread->bSignalled = TRUE; } } static VOID LockAllThreads( PLW_THREAD_POOL pPool ) { ULONG i = 0; for (i = 0; i < pPool->ulEventThreadCount; i++) { LOCK_THREAD(&pPool->pEventThreads[i]); } } static VOID UnlockAllThreads( PLW_THREAD_POOL pPool ) { ULONG i = 0; for (i = 0; i < pPool->ulEventThreadCount; i++) { UNLOCK_THREAD(&pPool->pEventThreads[i]); } } /* * Runs one tick of a task. */ static VOID RunTask( PPOLL_TASK pTask, LONG64 llNow ) { LONG64 llNewTime = 0; /* If task had a deadline, set the time we pass into the function to the time remaining */ if (pTask->llDeadline != 0) { llNewTime = pTask->llDeadline - llNow; if (llNewTime < 0) { llNewTime = 0; } } pTask->pfnFunc( pTask, pTask->pFuncContext, pTask->EventArgs, &pTask->EventWait, &llNewTime); /* Clear event arguments except sticky bits */ pTask->EventArgs &= STICKY_EVENTS; /* If the function gave us a valid time, update the task deadline */ if (llNewTime != 0) { pTask->llDeadline = llNow + llNewTime; } else { pTask->llDeadline = 0; } } /* * Updates the poll set with the events a task is waiting on. */ static VOID UpdateEventWait( PPOLL_TASK pTask ) { struct pollfd* pPoll = &pTask->pThread->pPoll[pTask->PollIndex]; if (pTask->Fd != -1) { pPoll->events = 0; if (pTask->EventWait & LW_TASK_EVENT_FD_READABLE) { pPoll->events |= POLLIN; } if (pTask->EventWait & LW_TASK_EVENT_FD_WRITABLE) { pPoll->events |= POLLOUT; } if (pTask->EventWait & LW_TASK_EVENT_FD_EXCEPTION) { pPoll->events |= POLLERR; } } } /* * Updates the event args on tasks from epoll results and * schedules them to run. */ static BOOLEAN UpdateEventArgs( PPOLL_TASK pTask, struct pollfd* pPoll ) { if (pTask->Fd >= 0 && pPoll[pTask->PollIndex].revents) { if (pPoll[pTask->PollIndex].revents & POLLIN) { pTask->EventArgs |= LW_TASK_EVENT_FD_READABLE; } if (pPoll[pTask->PollIndex].revents & (POLLOUT | POLLHUP)) { pTask->EventArgs |= LW_TASK_EVENT_FD_WRITABLE; } if (pPoll[pTask->PollIndex].revents & POLLERR) { pTask->EventArgs |= LW_TASK_EVENT_FD_EXCEPTION; } return TRUE; } return FALSE; } static VOID ScheduleWaitingTasks( struct pollfd* pPoll, int eventCount, PRING pWaiting, PRING pTimed, PRING pRunnable, PBOOLEAN pbSignalled ) { PRING pRing = NULL; PRING pNext = NULL; PLW_TASK pTask = NULL; if (pPoll[0].revents & POLLIN) { *pbSignalled = TRUE; } for (pRing = pWaiting->pNext; eventCount && pRing != pWaiting; pRing = pNext) { pNext = pRing->pNext; pTask = LW_STRUCT_FROM_FIELD(pRing, POLL_TASK, QueueRing); if (UpdateEventArgs(pTask, pPoll)) { RingRemove(pRing); RingEnqueue(pRunnable, pRing); eventCount--; } } for (pRing = pTimed->pNext; eventCount && pRing != pTimed; pRing = pNext) { pNext = pRing->pNext; pTask = LW_STRUCT_FROM_FIELD(pRing, POLL_TASK, QueueRing); if (UpdateEventArgs(pTask, pPoll)) { RingRemove(pRing); RingEnqueue(pRunnable, pRing); eventCount--; } } } static VOID ScheduleTimedTasks( PRING pTimed, LONG64 llNow, PRING pRunnable ) { PLW_TASK pTask = NULL; PRING pRing = NULL; PRING pNext = NULL; for (pRing = pTimed->pNext; pRing != pTimed; pRing = pNext) { pNext = pRing->pNext; pTask = LW_STRUCT_FROM_FIELD(pRing, POLL_TASK, QueueRing); /* No more tasks in the queue are past the deadline since the queue is sorted */ if (pTask->llDeadline > llNow) { break; } RingRemove(&pTask->QueueRing); RingEnqueue(pRunnable, &pTask->QueueRing); pTask->EventArgs |= LW_TASK_EVENT_TIME; } } static VOID InsertTimedQueue( PRING pTimed, PLW_TASK pInsert ) { PLW_TASK pTask = NULL; PRING pRing = NULL; /* Find the first task in the queue with a later deadline than the task to insert */ for (pRing = pTimed->pNext; pRing != pTimed; pRing = pRing->pNext) { pTask = LW_STRUCT_FROM_FIELD(pRing, POLL_TASK, QueueRing); if (pTask->llDeadline > pInsert->llDeadline) break; } /* Insert the task */ RingInsertBefore(pRing, &pInsert->QueueRing); } static NTSTATUS Poll( IN PCLOCK pClock, IN OUT PLONG64 pllNow, IN LONG64 llNextDeadline, IN struct pollfd* pPoll, IN nfds_t PollSize, OUT int* pReady ) { NTSTATUS status = STATUS_SUCCESS; int ready = 0; int timeout = 0; do { if (llNextDeadline >= 0) { /* Convert to timeout in milliseconds */ timeout = (llNextDeadline - *pllNow) / 1000000ll; if (timeout < 0) { timeout = 0; } } else { timeout = -1; } ready = poll(pPoll, PollSize, timeout); if (ready < 0 && (errno == EINTR || errno == EAGAIN)) { /* Update current time so the next timeout calculation is correct */ status = ClockGetMonotonicTime(pClock, pllNow); GOTO_ERROR_ON_STATUS(status); } } while (ready < 0 && (errno == EINTR || errno == EAGAIN)); if (ready < 0) { status = LwErrnoToNtStatus(errno); GOTO_ERROR_ON_STATUS(status); } *pReady = ready; error: return status; } static NTSTATUS ProcessRunnable( PPOLL_THREAD pThread, PRING pRunnable, PRING pTimed, PRING pWaiting, LONG64 llNow ) { NTSTATUS status = STATUS_SUCCESS; ULONG ulTicks = MAX_TICKS; PLW_TASK pTask = NULL; PLW_TASK_GROUP pGroup = NULL; PRING pRing = NULL; PRING pNext = NULL; /* We are guaranteed to run each task at least once. If tasks remain on the runnable list by yielding, we will continue to run them all in a round robin until our ticks are depleted. */ while (ulTicks && !RingIsEmpty(pRunnable)) { for (pRing = pRunnable->pNext; pRing != pRunnable; pRing = pNext) { pNext = pRing->pNext; pTask = LW_STRUCT_FROM_FIELD(pRing, POLL_TASK, QueueRing); RunTask(pTask, llNow); if (ulTicks) { ulTicks--; } if (pTask->EventWait != LW_TASK_EVENT_COMPLETE) { /* Task is still waiting to be runnable, update events in poll set */ UpdateEventWait(pTask); if (pTask->EventWait & LW_TASK_EVENT_YIELD) { /* Task is yielding. Set YIELD in its trigger arguments and and leave it on the runnable list for the next iteration */ pTask->EventArgs |= LW_TASK_EVENT_YIELD; } else if (pTask->EventWait & LW_TASK_EVENT_TIME) { /* If the task is waiting for a timeout, insert it into the timed queue */ RingRemove(&pTask->QueueRing); InsertTimedQueue(pTimed, pTask); } else { /* Otherwise, put it in the generic waiting queue */ RingRemove(&pTask->QueueRing); RingEnqueue(pWaiting, &pTask->QueueRing); } } else { /* Task is complete */ RingRemove(&pTask->QueueRing); /* Turn off any fd in the poll set */ if (pTask->Fd >= 0) { status = LwRtlSetTaskFd(pTask, pTask->Fd, 0); GOTO_ERROR_ON_STATUS(status); } /* Unsubscribe task from any UNIX signals */ if (pTask->pUnixSignal) { RegisterTaskUnixSignal(pTask, 0, FALSE); } LOCK_POOL(pThread->pPool); pThread->ulLoad--; UNLOCK_POOL(pThread->pPool); pGroup = pTask->pGroup; /* If task was in a task group, remove it and notify anyone waiting on the group */ if (pGroup) { LOCK_GROUP(pGroup); pTask->pGroup = NULL; RingRemove(&pTask->GroupRing); pthread_cond_broadcast(&pGroup->Event); UNLOCK_GROUP(pGroup); } LOCK_THREAD(pThread); if (--pTask->ulRefCount) { /* The task still has a reference, so mark it as completed and notify anyone waiting on it */ pTask->EventSignal = TASK_COMPLETE_MASK; pthread_cond_broadcast(&pThread->Event); UNLOCK_THREAD(pThread); } else { /* We held the last reference to the task, so delete it */ RingRemove(&pTask->SignalRing); UNLOCK_THREAD(pThread); TaskDelete(pTask); } } } } error: return status; } static VOID ScheduleSignalled( PPOLL_THREAD pThread, PRING pRunnable, PBOOLEAN pbShutdown ) { PRING pRing = NULL; PRING pNext = NULL; PLW_TASK pTask = NULL; char c = 0; int res = 0; LOCK_THREAD(pThread); if (pThread->bSignalled) { pThread->bSignalled = FALSE; res = read(pThread->SignalFds[0], &c, sizeof(c)); if (res != sizeof(c)) assert(res == sizeof(c)); /* Add all signalled tasks to the runnable list */ for (pRing = pThread->Tasks.pNext; pRing != &pThread->Tasks; pRing = pNext) { pNext = pRing->pNext; pTask = LW_STRUCT_FROM_FIELD(pRing, POLL_TASK, SignalRing); RingRemove(&pTask->SignalRing); RingRemove(&pTask->QueueRing); if (pTask->EventSignal != TASK_COMPLETE_MASK) { RingEnqueue(pRunnable, &pTask->QueueRing); /* Transfer the signal bits into the event args */ pTask->EventArgs |= pTask->EventSignal; pTask->EventSignal = 0; } } if (pThread->bShutdown && !*pbShutdown) { *pbShutdown = pThread->bShutdown; } } UNLOCK_THREAD(pThread); } static NTSTATUS EventLoop( PPOLL_THREAD pThread ) { NTSTATUS status = STATUS_SUCCESS; RING timed; RING runnable; RING waiting; CLOCK clock = {0}; LONG64 llNow = 0; LONG64 llNextDeadline = 0; int ready = 0; BOOLEAN bShutdown = FALSE; BOOLEAN bSignalled = FALSE; RingInit(&runnable); RingInit(&timed); RingInit(&waiting); for (;;) { /* Get current time for this iteration */ status = ClockGetMonotonicTime(&clock, &llNow); GOTO_ERROR_ON_STATUS(status); /* Schedule any timed tasks that have reached their deadline */ ScheduleTimedTasks( &timed, llNow, &runnable); /* Schedule any waiting tasks that epoll indicated are ready and check if the thread received a signal */ ScheduleWaitingTasks( pThread->pPoll, ready, &waiting, &timed, &runnable, &bSignalled); if (bSignalled) { /* Schedule explicitly-signalled tasks and check if we have been told to shut down */ ScheduleSignalled( pThread, &runnable, &bShutdown); } /* Process runnable tasks */ status = ProcessRunnable( pThread, &runnable, &timed, &waiting, llNow); GOTO_ERROR_ON_STATUS(status); if (!RingIsEmpty(&runnable)) { /* If there are still runnable tasks, set the next deadline to now so we can check for other tasks becoming runnable but do not block in Poll() */ llNextDeadline = llNow; } else if (!RingIsEmpty(&timed)) { /* There are timed tasks, so set our next deadline to the deadline of the first task in the queue */ llNextDeadline = LW_STRUCT_FROM_FIELD(timed.pNext, POLL_TASK, QueueRing)->llDeadline; } else if (!RingIsEmpty(&waiting) || !bShutdown) { /* There are waiting tasks or we are not shutting down, so poll indefinitely */ llNextDeadline = -1; } else { /* We are shutting down and there are no remaining tasks, so leave */ break; } /* Wait (or check) for activity */ status = Poll( &clock, &llNow, llNextDeadline, pThread->pPoll, pThread->PollSize, &ready); GOTO_ERROR_ON_STATUS(status); } error: return status; } static PVOID EventThread( PVOID pContext ) { NTSTATUS status = STATUS_SUCCESS; status = EventLoop((PPOLL_THREAD) pContext); if (!NT_SUCCESS(status)) { LW_RTL_LOG_ERROR( "Task thread exiting with fatal error: %s (0x%x)", LwNtStatusToName(status), status); abort(); } return NULL; } NTSTATUS LwRtlCreateTask( PLW_THREAD_POOL pPool, PLW_TASK* ppTask, PLW_TASK_GROUP pGroup, LW_TASK_FUNCTION pfnFunc, PVOID pContext ) { NTSTATUS status = STATUS_SUCCESS; PPOLL_TASK pTask = NULL; PPOLL_THREAD pThread = NULL; ULONG ulMinLoad = 0xFFFFFFFF; ULONG ulIndex = 0; if (pPool->pDelegate) { return LwRtlCreateTask(pPool->pDelegate, ppTask, pGroup, pfnFunc, pContext); } status = LW_RTL_ALLOCATE_AUTO(&pTask); GOTO_ERROR_ON_STATUS(status); RingInit(&pTask->GroupRing); RingInit(&pTask->QueueRing); RingInit(&pTask->SignalRing); pTask->pGroup = pGroup; pTask->ulRefCount = 2; pTask->pfnFunc = pfnFunc; pTask->pFuncContext = pContext; pTask->Fd = -1; pTask->EventArgs = LW_TASK_EVENT_INIT; pTask->EventWait = LW_TASK_EVENT_EXPLICIT; pTask->llDeadline = 0; LOCK_POOL(pPool); for (ulIndex = 0; ulIndex < pPool->ulEventThreadCount; ulIndex++) { if (pPool->pEventThreads[ulIndex].ulLoad < ulMinLoad) { pThread = &pPool->pEventThreads[ulIndex]; ulMinLoad = pThread->ulLoad; } } pTask->pThread = pThread; if (pGroup) { LOCK_GROUP(pGroup); if (pGroup->bCancelled) { UNLOCK_GROUP(pGroup); UNLOCK_POOL(pPool); status = STATUS_CANCELLED; GOTO_ERROR_ON_STATUS(status); } RingInsertBefore(&pGroup->Tasks, &pTask->GroupRing); UNLOCK_GROUP(pGroup); } pThread->ulLoad++; UNLOCK_POOL(pPool); *ppTask = pTask; cleanup: return status; error: if (pTask) { TaskDelete(pTask); } *ppTask = NULL; goto cleanup; } NTSTATUS LwRtlCreateTaskGroup( PLW_THREAD_POOL pPool, PLW_TASK_GROUP* ppGroup ) { NTSTATUS status = STATUS_SUCCESS; PLW_TASK_GROUP pGroup = NULL; if (pPool->pDelegate) { return LwRtlCreateTaskGroup(pPool->pDelegate, ppGroup); } status = LW_RTL_ALLOCATE_AUTO(&pGroup); GOTO_ERROR_ON_STATUS(status); pGroup->pPool = pPool; RingInit(&pGroup->Tasks); status = LwErrnoToNtStatus(pthread_mutex_init(&pGroup->Lock, NULL)); GOTO_ERROR_ON_STATUS(status); pGroup->bLockInit = TRUE; status = LwErrnoToNtStatus(pthread_cond_init(&pGroup->Event, NULL)); GOTO_ERROR_ON_STATUS(status); pGroup->bEventInit = TRUE; *ppGroup = pGroup; cleanup: return status; error: LwRtlFreeTaskGroup(&pGroup); *ppGroup = NULL; goto cleanup; } VOID LwRtlReleaseTask( PLW_TASK* ppTask ) { PLW_TASK pTask = *ppTask; int ulRefCount = 0; if (pTask) { LOCK_THREAD(pTask->pThread); ulRefCount = --pTask->ulRefCount; if (ulRefCount == 0) { RingRemove(&pTask->SignalRing); } UNLOCK_THREAD(pTask->pThread); if (ulRefCount == 0) { TaskDelete(pTask); } *ppTask = NULL; } } VOID RetainTask( PLW_TASK pTask ) { if (pTask) { LOCK_THREAD(pTask->pThread); ++pTask->ulRefCount; UNLOCK_THREAD(pTask->pThread); } } VOID LwRtlFreeTaskGroup( PLW_TASK_GROUP* ppGroup ) { PLW_TASK_GROUP pGroup = *ppGroup; if (pGroup) { if (pGroup->bLockInit) { pthread_mutex_destroy(&pGroup->Lock); } if (pGroup->bEventInit) { pthread_cond_destroy(&pGroup->Event); } RtlMemoryFree(pGroup); *ppGroup = NULL; } } static NTSTATUS AddPollFd( PPOLL_THREAD pThread, int Fd, nfds_t* pIndex ) { NTSTATUS status = STATUS_SUCCESS; nfds_t index = 0; nfds_t NewCapacity = 0; struct pollfd* pNewPoll = NULL; for (index = 0; index < pThread->PollCapacity; index++) { if (pThread->pPoll[index].fd == -1) { break; } } if (index == pThread->PollCapacity) { NewCapacity = pThread->PollCapacity * 2; if (NewCapacity == 0) { NewCapacity = 16; } pNewPoll = LwRtlMemoryRealloc(pThread->pPoll, NewCapacity * sizeof(*pNewPoll)); if (!pNewPoll) { status = STATUS_INSUFFICIENT_RESOURCES; GOTO_ERROR_ON_STATUS(status); } for (index = pThread->PollCapacity; index < NewCapacity; index++) { memset(&pNewPoll[index], 0, sizeof(*pNewPoll)); pNewPoll[index].fd = -1; } index = pThread->PollCapacity; pThread->pPoll = pNewPoll; pThread->PollCapacity = NewCapacity; } if (index >= pThread->PollSize) { pThread->PollSize = index + 1; } memset(&pThread->pPoll[index], 0, sizeof(pThread->pPoll[index])); pThread->pPoll[index].fd = Fd; *pIndex = index; error: return status; } static VOID RemovePollFd( PPOLL_THREAD pThread, nfds_t index ) { pThread->pPoll[index].fd = -1; if (index == pThread->PollSize - 1) { pThread->PollSize--; } } LW_NTSTATUS LwRtlSetTaskFd( PLW_TASK pTask, int Fd, LW_TASK_EVENT_MASK Mask ) { NTSTATUS status = STATUS_SUCCESS; if (Fd < 0) { status = STATUS_INVALID_HANDLE; GOTO_ERROR_ON_STATUS(status); } if (Fd == pTask->Fd) { if (Mask == 0) { RemovePollFd(pTask->pThread, pTask->PollIndex); pTask->Fd = -1; pTask->PollIndex = 0; } } else if (Mask) { if (pTask->Fd >= 0) { /* Only one fd is supported */ status = STATUS_INSUFFICIENT_RESOURCES; GOTO_ERROR_ON_STATUS(status); } status = AddPollFd(pTask->pThread, Fd, &pTask->PollIndex); GOTO_ERROR_ON_STATUS(status); pTask->Fd = Fd; } error: return status; } NTSTATUS LwRtlQueryTaskFd( PLW_TASK pTask, int Fd, PLW_TASK_EVENT_MASK pMask ) { NTSTATUS status = STATUS_SUCCESS; if (Fd < 0 || Fd != pTask->Fd) { status = STATUS_INVALID_HANDLE; GOTO_ERROR_ON_STATUS(status); } *pMask = pTask->EventArgs & (LW_TASK_EVENT_FD_READABLE | LW_TASK_EVENT_FD_WRITABLE | LW_TASK_EVENT_FD_EXCEPTION); cleanup: return status; error: *pMask = 0; goto cleanup; } VOID LwRtlWakeTask( PLW_TASK pTask ) { LOCK_THREAD(pTask->pThread); if (pTask->EventSignal != TASK_COMPLETE_MASK) { pTask->EventSignal |= LW_TASK_EVENT_EXPLICIT; RingRemove(&pTask->SignalRing); RingEnqueue(&pTask->pThread->Tasks, &pTask->SignalRing); SignalThread(pTask->pThread); } UNLOCK_THREAD(pTask->pThread); } LW_NTSTATUS LwRtlSetTaskUnixSignal( LW_IN PLW_TASK pTask, LW_IN int Sig, LW_IN LW_BOOLEAN bSubscribe ) { NTSTATUS status = STATUS_SUCCESS; if (bSubscribe && !pTask->pUnixSignal) { status = LW_RTL_ALLOCATE_AUTO(&pTask->pUnixSignal); GOTO_ERROR_ON_STATUS(status); } status = RegisterTaskUnixSignal(pTask, Sig, bSubscribe); GOTO_ERROR_ON_STATUS(status); error: return status; } void NotifyTaskUnixSignal( PLW_TASK pTask, siginfo_t* pInfo ) { LOCK_THREAD(pTask->pThread); if (pTask->EventSignal != TASK_COMPLETE_MASK) { while (pTask->pUnixSignal->si_signo) { pthread_cond_wait(&pTask->pThread->Event, &pTask->pThread->Lock); if (pTask->EventSignal == TASK_COMPLETE_MASK) { goto cleanup; } } *pTask->pUnixSignal = *pInfo; pTask->EventSignal |= LW_TASK_EVENT_UNIX_SIGNAL; RingRemove(&pTask->SignalRing); RingEnqueue(&pTask->pThread->Tasks, &pTask->SignalRing); SignalThread(pTask->pThread); } cleanup: UNLOCK_THREAD(pTask->pThread); } LW_BOOLEAN LwRtlNextTaskUnixSignal( LW_IN PLW_TASK pTask, LW_OUT siginfo_t* pInfo ) { BOOLEAN bResult = FALSE; LOCK_THREAD(pTask->pThread); if (pTask->pUnixSignal == NULL || pTask->pUnixSignal->si_signo == 0) { bResult = FALSE; } else { if (pInfo) { *pInfo = *pTask->pUnixSignal; } pTask->pUnixSignal->si_signo = 0; pthread_cond_broadcast(&pTask->pThread->Event); bResult = TRUE; } UNLOCK_THREAD(pTask->pThread); return bResult; } VOID LwRtlCancelTask( PLW_TASK pTask ) { LOCK_THREAD(pTask->pThread); if (pTask->EventSignal != TASK_COMPLETE_MASK) { pTask->EventSignal |= LW_TASK_EVENT_EXPLICIT | LW_TASK_EVENT_CANCEL; RingRemove(&pTask->SignalRing); RingEnqueue(&pTask->pThread->Tasks, &pTask->SignalRing); SignalThread(pTask->pThread); } UNLOCK_THREAD(pTask->pThread); } VOID LwRtlWaitTask( PLW_TASK pTask ) { LOCK_THREAD(pTask->pThread); while (pTask->EventSignal != TASK_COMPLETE_MASK) { pthread_cond_wait(&pTask->pThread->Event, &pTask->pThread->Lock); } UNLOCK_THREAD(pTask->pThread); } VOID LwRtlWakeTaskGroup( PLW_TASK_GROUP pGroup ) { PRING ring = NULL; PLW_TASK pTask = NULL; LOCK_GROUP(pGroup); LockAllThreads(pGroup->pPool); for (ring = pGroup->Tasks.pNext; ring != &pGroup->Tasks; ring = ring->pNext) { pTask = LW_STRUCT_FROM_FIELD(ring, POLL_TASK, GroupRing); if (pTask->EventSignal != TASK_COMPLETE_MASK) { pTask->EventSignal |= LW_TASK_EVENT_EXPLICIT; RingRemove(&pTask->SignalRing); RingEnqueue(&pTask->pThread->Tasks, &pTask->SignalRing); SignalThread(pTask->pThread); } } UnlockAllThreads(pGroup->pPool); UNLOCK_GROUP(pGroup); } VOID LwRtlCancelTaskGroup( PLW_TASK_GROUP pGroup ) { PRING ring = NULL; PLW_TASK pTask = NULL; LOCK_GROUP(pGroup); pGroup->bCancelled = TRUE; LockAllThreads(pGroup->pPool); for (ring = pGroup->Tasks.pNext; ring != &pGroup->Tasks; ring = ring->pNext) { pTask = LW_STRUCT_FROM_FIELD(ring, POLL_TASK, GroupRing); if (pTask->EventSignal != TASK_COMPLETE_MASK) { pTask->EventSignal |= LW_TASK_EVENT_EXPLICIT | LW_TASK_EVENT_CANCEL; RingRemove(&pTask->SignalRing); RingEnqueue(&pTask->pThread->Tasks, &pTask->SignalRing); SignalThread(pTask->pThread); } } UnlockAllThreads(pGroup->pPool); UNLOCK_GROUP(pGroup); } VOID LwRtlWaitTaskGroup( PLW_TASK_GROUP pGroup ) { PRING pRing = NULL; PLW_TASK pTask = NULL; BOOLEAN bStillAlive = TRUE; LOCK_GROUP(pGroup); while (bStillAlive) { bStillAlive = FALSE; LockAllThreads(pGroup->pPool); for (pRing = pGroup->Tasks.pNext; !bStillAlive && pRing != &pGroup->Tasks; pRing = pRing->pNext) { pTask = LW_STRUCT_FROM_FIELD(pRing, POLL_TASK, GroupRing); if (pTask->EventSignal != TASK_COMPLETE_MASK) { bStillAlive = TRUE; } } UnlockAllThreads(pGroup->pPool); if (bStillAlive) { pthread_cond_wait(&pGroup->Event, &pGroup->Lock); } } UNLOCK_GROUP(pGroup); } static NTSTATUS InitEventThread( PPOLL_POOL pPool, PLW_THREAD_POOL_ATTRIBUTES pAttrs, PPOLL_THREAD pThread, ULONG ulCpu ) { NTSTATUS status = STATUS_SUCCESS; pthread_attr_t threadAttr; BOOLEAN bThreadAttrInit = FALSE; nfds_t index = 0; status = LwErrnoToNtStatus(pthread_attr_init(&threadAttr)); GOTO_ERROR_ON_STATUS(status); bThreadAttrInit = TRUE; pThread->pPool = pPool; status = LwErrnoToNtStatus(pthread_mutex_init(&pThread->Lock, NULL)); GOTO_ERROR_ON_STATUS(status); status = LwErrnoToNtStatus(pthread_cond_init(&pThread->Event, NULL)); GOTO_ERROR_ON_STATUS(status); if (pipe(pThread->SignalFds) < 0) { status = LwErrnoToNtStatus(errno); GOTO_ERROR_ON_STATUS(status); } SetCloseOnExec(pThread->SignalFds[0]); SetCloseOnExec(pThread->SignalFds[1]); status = AddPollFd(pThread, pThread->SignalFds[0], &index); GOTO_ERROR_ON_STATUS(status); assert(index == 0); pThread->pPoll[index].events = POLLIN; RingInit(&pThread->Tasks); if (pAttrs && pAttrs->ulTaskThreadStackSize) { status = LwErrnoToNtStatus( pthread_attr_setstacksize(&threadAttr, pAttrs->ulTaskThreadStackSize)); GOTO_ERROR_ON_STATUS(status); } status = LwErrnoToNtStatus( pthread_create( &pThread->Thread, &threadAttr, EventThread, pThread)); GOTO_ERROR_ON_STATUS(status); error: if (bThreadAttrInit) { pthread_attr_destroy(&threadAttr); } return status; } static VOID DestroyEventThread( PPOLL_THREAD pThread ) { pthread_mutex_destroy(&pThread->Lock); pthread_cond_destroy(&pThread->Event); RTL_FREE(&pThread->pPoll); if (pThread->SignalFds[0] >= 0) { close(pThread->SignalFds[0]); } if (pThread->SignalFds[1] >= 0) { close(pThread->SignalFds[1]); } } NTSTATUS LwRtlCreateThreadPool( PLW_THREAD_POOL* ppPool, PLW_THREAD_POOL_ATTRIBUTES pAttrs ) { NTSTATUS status = STATUS_SUCCESS; PLW_THREAD_POOL pPool = NULL; int i = 0; int numCpus = 0; status = LW_RTL_ALLOCATE_AUTO(&pPool); GOTO_ERROR_ON_STATUS(status); status = LwErrnoToNtStatus(pthread_mutex_init(&pPool->Lock, NULL)); GOTO_ERROR_ON_STATUS(status); status = LwErrnoToNtStatus(pthread_cond_init(&pPool->Event, NULL)); GOTO_ERROR_ON_STATUS(status); numCpus = LwRtlGetCpuCount(); if (GetDelegateAttr(pAttrs)) { status = AcquireDelegatePool(&pPool->pDelegate); GOTO_ERROR_ON_STATUS(status); } else { pPool->ulEventThreadCount = GetTaskThreadsAttr(pAttrs, numCpus); if (pPool->ulEventThreadCount) { status = LW_RTL_ALLOCATE_ARRAY_AUTO( &pPool->pEventThreads, pPool->ulEventThreadCount); GOTO_ERROR_ON_STATUS(status); for (i = 0; i < pPool->ulEventThreadCount; i++) { status = InitEventThread(pPool, pAttrs, &pPool->pEventThreads[i], i % numCpus); GOTO_ERROR_ON_STATUS(status); } } } status = InitWorkThreads(&pPool->WorkThreads, pAttrs, numCpus); GOTO_ERROR_ON_STATUS(status); *ppPool = pPool; cleanup: return status; error: LwRtlFreeThreadPool(&pPool); goto cleanup; } VOID LwRtlFreeThreadPool( PLW_THREAD_POOL* ppPool ) { PLW_THREAD_POOL pPool = *ppPool; PPOLL_THREAD pThread = NULL; int i = 0; if (pPool) { LOCK_POOL(pPool); pPool->bShutdown = TRUE; pthread_cond_broadcast(&pPool->Event); UNLOCK_POOL(pPool); if (pPool->pEventThreads) { for (i = 0; i < pPool->ulEventThreadCount; i++) { pThread = &pPool->pEventThreads[i]; LOCK_THREAD(pThread); pThread->bShutdown = TRUE; SignalThread(pThread); UNLOCK_THREAD(pThread); pthread_join(pThread->Thread, NULL); DestroyEventThread(pThread); } RtlMemoryFree(pPool->pEventThreads); } if (pPool->pDelegate) { ReleaseDelegatePool(&pPool->pDelegate); } pthread_cond_destroy(&pPool->Event); pthread_mutex_destroy(&pPool->Lock); DestroyWorkThreads(&pPool->WorkThreads); RtlMemoryFree(pPool); *ppPool = NULL; } } LW_NTSTATUS LwRtlCreateWorkItem( LW_IN PLW_THREAD_POOL pPool, LW_OUT PLW_WORK_ITEM* ppWorkItem, LW_IN LW_WORK_ITEM_FUNCTION pfnFunc, LW_IN PVOID pContext ) { return CreateWorkItem(&pPool->WorkThreads, ppWorkItem, pfnFunc, pContext); } LW_VOID LwRtlFreeWorkItem( LW_IN LW_OUT PLW_WORK_ITEM* ppWorkItem ) { FreeWorkItem(ppWorkItem); } LW_VOID LwRtlScheduleWorkItem( LW_IN PLW_WORK_ITEM pWorkItem, LW_IN LW_SCHEDULE_FLAGS Flags ) { ScheduleWorkItem(NULL, pWorkItem, Flags); } LW_VOID LwRtlWaitWorkItems( LW_IN PLW_THREAD_POOL pPool ) { WaitWorkItems(&pPool->WorkThreads); }
16,936
2,996
<filename>engine/src/main/java/org/terasology/engine/rendering/nui/layers/mainMenu/UniverseWrapper.java<gh_stars>1000+ // Copyright 2021 The Terasology Foundation // SPDX-License-Identifier: Apache-2.0 package org.terasology.engine.rendering.nui.layers.mainMenu; import org.terasology.engine.rendering.world.WorldSetupWrapper; /** * A class which stores the universe level properties for a game like whether * the game is single-player or multi-player, seed value and the game name. */ public class UniverseWrapper { private String seed; private boolean loadingAsServer; private String gameName; private WorldSetupWrapper targetWorld; public void setSeed(String seed) { this.seed = seed; } public String getSeed() { return seed; } public void setLoadingAsServer(boolean loadingAsServer) { this.loadingAsServer = loadingAsServer; } public boolean getLoadingAsServer() { return loadingAsServer; } public void setGameName(String gameName) { this.gameName = gameName; } public String getGameName() { return gameName; } public void setTargetWorld(WorldSetupWrapper targetWorld) { this.targetWorld = targetWorld; } public WorldSetupWrapper getTargetWorld() { return targetWorld; } }
468
348
{"nom":"Varaize","circ":"3ème circonscription","dpt":"Charente-Maritime","inscrits":429,"abs":236,"votants":193,"blancs":6,"nuls":25,"exp":162,"res":[{"nuance":"REM","nom":"<NAME>","voix":104},{"nuance":"LR","nom":"<NAME>","voix":58}]}
92
381
// Update the VARIANT arg in docker-compose.yml to pick a Node.js version: 10, 12, 14 { "name": "RSSMonster", "dockerComposeFile": "docker-compose.yml", "service": "app", "runServices": ["app", "client", "db"], "workspaceFolder": "/workspace", // Set *default* container specific settings.json values on container create. "settings": { "sqltools.connections": [{ "name": "Container database", "driver": "MySQL", "previewLimit": 50, "server": "localhost", "port": 3306, "database": "rssmonster", "username": "rssmonster", "password": "password" }] }, "remoteEnv": { "VUE_APP_HOSTNAME": "http://localhost:3000" }, // Add the IDs of extensions you want installed when the container is created. "extensions": [ "dbaeumer.vscode-eslint", "mtxr.sqltools" ], // Use 'forwardPorts' to make a list of ports inside the container available locally. "forwardPorts": [3000, 3306, 8080], // Use 'postCreateCommand' to run commands after the container is created. // "postCreateCommand": "yarn install", // Comment out connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root. "remoteUser": "node" }
419
364
<reponame>matus-chochlik/oglplus // File include/oglplus/enums/ext/debug_output_severity.ipp // // Automatically generated file, DO NOT modify manually. // Edit the source 'source/enums/oglplus/ext/debug_output_severity.txt' // or the 'source/enums/make_enum.py' script instead. // // Copyright 2010-2019 <NAME>. // Distributed under the Boost Software License, Version 1.0. // See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt // #if OGLPLUS_DOCUMENTATION_ONLY /// DEBUG_SEVERITY_HIGH_ARB High, /// DEBUG_SEVERITY_MEDIUM_ARB Medium, /// DEBUG_SEVERITY_LOW_ARB Low, /// DONT_CARE DontCare #else // !OGLPLUS_DOCUMENTATION_ONLY #include <oglplus/enums/ext/debug_output_severity_def.ipp> #endif
283
348
<filename>docs/data/leg-t2/055/05502164.json {"nom":"Douaumont","circ":"2ème circonscription","dpt":"Meuse","inscrits":6,"abs":4,"votants":2,"blancs":1,"nuls":0,"exp":1,"res":[{"nuance":"REM","nom":"<NAME>","voix":1},{"nuance":"FN","nom":"<NAME>","voix":0}]}
109
6,457
<reponame>Moktarino/librealsense<filename>src/device-calibration.h // License: Apache 2.0. See LICENSE file in root directory. // Copyright(c) 2020 Intel Corporation. All Rights Reserved. #pragma once #include "types.h" #include "core/streaming.h" namespace librealsense { // This extension should allow generic calibrations using the same interface // by adding to rs2_calibration_type instead of adding new function calls class calibration_change_device { public: virtual void register_calibration_change_callback(calibration_change_callback_ptr) = 0; }; MAP_EXTENSION(RS2_EXTENSION_CALIBRATION_CHANGE_DEVICE, calibration_change_device); // This extension should allow generic calibrations using the same interface // by adding to rs2_calibration_type instead of adding new function calls class device_calibration : public calibration_change_device { public: virtual void trigger_device_calibration( rs2_calibration_type ) = 0; }; MAP_EXTENSION(RS2_EXTENSION_DEVICE_CALIBRATION, device_calibration ); }
362
476
import json import os import sublime from . import log from .Common import Singleton __file__ = os.path.normpath(os.path.abspath(__file__)) __path__ = os.path.dirname(__file__) class SettingEventSource(object): def __init__(self): self._subscribers = {} def notify(self, evt_type, *evt_args, **evt_kwargs): if evt_type not in self._subscribers: return for subscriber in self._subscribers[evt_type]: try: subscriber(*evt_args, **evt_kwargs) except: log.exception("Error on calling event subscriber for setting: %s", str(subscriber)) def subscribe(self, evt_type, subscriber): if evt_type not in self._subscribers: self._subscribers[evt_type] = set() self._subscribers[evt_type].add(subscriber) def clear_subscribers(self): self._subscribers.clear() @Singleton class Setting(SettingEventSource): def __init__(self): SettingEventSource.__init__(self) @staticmethod def _read_default_settings(): default_settings_filename = os.path.join( __path__, '../', 'default_settings.json') default_settings_filename = os.path.normpath(default_settings_filename) with open(default_settings_filename) as f: settings_obj = json.load(f) return settings_obj def _fix_setting_type(self): type_conversion_map = { 'ignored_renderers': set, } for attr, typ in type_conversion_map.items(): v = getattr(self, attr) setattr(self, attr, typ(v)) def load_setting(self): PLUGIN_NAME = 'OmniMarkupPreviewer' settings = sublime.load_settings(PLUGIN_NAME + '.sublime-settings') settings.clear_on_change(PLUGIN_NAME) settings.add_on_change(PLUGIN_NAME, self.sublime_settings_on_change) self._sublime_settings = settings # Merge new settings into the default settings default_settings = self._read_default_settings() for k, v in default_settings.items(): if isinstance(v, dict): v.update(settings.get(k, {})) else: v = settings.get(k, v) setattr(self, k, v) self._fix_setting_type() def get_setting(self, k, default=None): return getattr(self, k, default) def init(self): self.clear_subscribers() self.load_setting() def sublime_settings_on_change(self): log.info('Reloading settings...') self.notify('changing', setting=self) self.load_setting() self.notify('changed', setting=self)
1,184
686
<reponame>yakoder/java.watson-developer-cloud.java-sdk<filename>discovery/src/main/java/com/ibm/watson/discovery/v2/Discovery.java /* * (C) Copyright IBM Corp. 2019, 2021. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ /* * IBM OpenAPI SDK Code Generator Version: 3.38.0-07189efd-20210827-205025 */ package com.ibm.watson.discovery.v2; import com.google.gson.JsonObject; import com.ibm.cloud.sdk.core.http.RequestBuilder; import com.ibm.cloud.sdk.core.http.ResponseConverter; import com.ibm.cloud.sdk.core.http.ServiceCall; import com.ibm.cloud.sdk.core.security.Authenticator; import com.ibm.cloud.sdk.core.security.ConfigBasedAuthenticatorFactory; import com.ibm.cloud.sdk.core.service.BaseService; import com.ibm.cloud.sdk.core.util.RequestUtils; import com.ibm.cloud.sdk.core.util.ResponseConverterUtils; import com.ibm.watson.common.SdkCommon; import com.ibm.watson.discovery.v2.model.AddDocumentOptions; import com.ibm.watson.discovery.v2.model.AnalyzeDocumentOptions; import com.ibm.watson.discovery.v2.model.AnalyzedDocument; import com.ibm.watson.discovery.v2.model.CollectionDetails; import com.ibm.watson.discovery.v2.model.Completions; import com.ibm.watson.discovery.v2.model.ComponentSettingsResponse; import com.ibm.watson.discovery.v2.model.CreateCollectionOptions; import com.ibm.watson.discovery.v2.model.CreateEnrichmentOptions; import com.ibm.watson.discovery.v2.model.CreateProjectOptions; import com.ibm.watson.discovery.v2.model.CreateTrainingQueryOptions; import com.ibm.watson.discovery.v2.model.DeleteCollectionOptions; import com.ibm.watson.discovery.v2.model.DeleteDocumentOptions; import com.ibm.watson.discovery.v2.model.DeleteDocumentResponse; import com.ibm.watson.discovery.v2.model.DeleteEnrichmentOptions; import com.ibm.watson.discovery.v2.model.DeleteProjectOptions; import com.ibm.watson.discovery.v2.model.DeleteTrainingQueriesOptions; import com.ibm.watson.discovery.v2.model.DeleteTrainingQueryOptions; import com.ibm.watson.discovery.v2.model.DeleteUserDataOptions; import com.ibm.watson.discovery.v2.model.DocumentAccepted; import com.ibm.watson.discovery.v2.model.Enrichment; import com.ibm.watson.discovery.v2.model.Enrichments; import com.ibm.watson.discovery.v2.model.GetAutocompletionOptions; import com.ibm.watson.discovery.v2.model.GetCollectionOptions; import com.ibm.watson.discovery.v2.model.GetComponentSettingsOptions; import com.ibm.watson.discovery.v2.model.GetEnrichmentOptions; import com.ibm.watson.discovery.v2.model.GetProjectOptions; import com.ibm.watson.discovery.v2.model.GetTrainingQueryOptions; import com.ibm.watson.discovery.v2.model.ListCollectionsOptions; import com.ibm.watson.discovery.v2.model.ListCollectionsResponse; import com.ibm.watson.discovery.v2.model.ListEnrichmentsOptions; import com.ibm.watson.discovery.v2.model.ListFieldsOptions; import com.ibm.watson.discovery.v2.model.ListFieldsResponse; import com.ibm.watson.discovery.v2.model.ListProjectsOptions; import com.ibm.watson.discovery.v2.model.ListProjectsResponse; import com.ibm.watson.discovery.v2.model.ListTrainingQueriesOptions; import com.ibm.watson.discovery.v2.model.ProjectDetails; import com.ibm.watson.discovery.v2.model.QueryCollectionNoticesOptions; import com.ibm.watson.discovery.v2.model.QueryNoticesOptions; import com.ibm.watson.discovery.v2.model.QueryNoticesResponse; import com.ibm.watson.discovery.v2.model.QueryOptions; import com.ibm.watson.discovery.v2.model.QueryResponse; import com.ibm.watson.discovery.v2.model.TrainingQuery; import com.ibm.watson.discovery.v2.model.TrainingQuerySet; import com.ibm.watson.discovery.v2.model.UpdateCollectionOptions; import com.ibm.watson.discovery.v2.model.UpdateDocumentOptions; import com.ibm.watson.discovery.v2.model.UpdateEnrichmentOptions; import com.ibm.watson.discovery.v2.model.UpdateProjectOptions; import com.ibm.watson.discovery.v2.model.UpdateTrainingQueryOptions; import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; import okhttp3.MultipartBody; /** * IBM Watson&amp;trade; Discovery is a cognitive search and content analytics engine that you can * add to applications to identify patterns, trends and actionable insights to drive better * decision-making. Securely unify structured and unstructured data with pre-enriched content, and * use a simplified query language to eliminate the need for manual filtering of results. * * <p>API Version: 2.0 See: https://cloud.ibm.com/docs/discovery-data */ public class Discovery extends BaseService { public static final String DEFAULT_SERVICE_NAME = "discovery"; public static final String DEFAULT_SERVICE_URL = "https://api.us-south.discovery.watson.cloud.ibm.com"; private String version; /** * Constructs an instance of the `Discovery` client. The default service name is used to configure * the client instance. * * @param version Release date of the version of the API you want to use. Specify dates in * YYYY-MM-DD format. The current version is `2020-08-30`. */ public Discovery(String version) { this( version, DEFAULT_SERVICE_NAME, ConfigBasedAuthenticatorFactory.getAuthenticator(DEFAULT_SERVICE_NAME)); } /** * Constructs an instance of the `Discovery` client. The default service name and specified * authenticator are used to configure the client instance. * * @param version Release date of the version of the API you want to use. Specify dates in * YYYY-MM-DD format. The current version is `2020-08-30`. * @param authenticator the {@link Authenticator} instance to be configured for this client */ public Discovery(String version, Authenticator authenticator) { this(version, DEFAULT_SERVICE_NAME, authenticator); } /** * Constructs an instance of the `Discovery` client. The specified service name is used to * configure the client instance. * * @param version Release date of the version of the API you want to use. Specify dates in * YYYY-MM-DD format. The current version is `2020-08-30`. * @param serviceName the service name to be used when configuring the client instance */ public Discovery(String version, String serviceName) { this(version, serviceName, ConfigBasedAuthenticatorFactory.getAuthenticator(serviceName)); } /** * Constructs an instance of the `Discovery` client. The specified service name and authenticator * are used to configure the client instance. * * @param version Release date of the version of the API you want to use. Specify dates in * YYYY-MM-DD format. The current version is `2020-08-30`. * @param serviceName the service name to be used when configuring the client instance * @param authenticator the {@link Authenticator} instance to be configured for this client */ public Discovery(String version, String serviceName, Authenticator authenticator) { super(serviceName, authenticator); setServiceUrl(DEFAULT_SERVICE_URL); setVersion(version); this.configureService(serviceName); } /** * Gets the version. * * <p>Release date of the version of the API you want to use. Specify dates in YYYY-MM-DD format. * The current version is `2020-08-30`. * * @return the version */ public String getVersion() { return this.version; } /** * Sets the version. * * @param version the new version */ public void setVersion(final String version) { com.ibm.cloud.sdk.core.util.Validator.notEmpty(version, "version cannot be empty."); this.version = version; } /** * List collections. * * <p>Lists existing collections for the specified project. * * @param listCollectionsOptions the {@link ListCollectionsOptions} containing the options for the * call * @return a {@link ServiceCall} with a result of type {@link ListCollectionsResponse} */ public ServiceCall<ListCollectionsResponse> listCollections( ListCollectionsOptions listCollectionsOptions) { com.ibm.cloud.sdk.core.util.Validator.notNull( listCollectionsOptions, "listCollectionsOptions cannot be null"); Map<String, String> pathParamsMap = new HashMap<String, String>(); pathParamsMap.put("project_id", listCollectionsOptions.projectId()); RequestBuilder builder = RequestBuilder.get( RequestBuilder.resolveRequestUrl( getServiceUrl(), "/v2/projects/{project_id}/collections", pathParamsMap)); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v2", "listCollections"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.header("Accept", "application/json"); builder.query("version", String.valueOf(this.version)); ResponseConverter<ListCollectionsResponse> responseConverter = ResponseConverterUtils.getValue( new com.google.gson.reflect.TypeToken<ListCollectionsResponse>() {}.getType()); return createServiceCall(builder.build(), responseConverter); } /** * Create a collection. * * <p>Create a new collection in the specified project. * * @param createCollectionOptions the {@link CreateCollectionOptions} containing the options for * the call * @return a {@link ServiceCall} with a result of type {@link CollectionDetails} */ public ServiceCall<CollectionDetails> createCollection( CreateCollectionOptions createCollectionOptions) { com.ibm.cloud.sdk.core.util.Validator.notNull( createCollectionOptions, "createCollectionOptions cannot be null"); Map<String, String> pathParamsMap = new HashMap<String, String>(); pathParamsMap.put("project_id", createCollectionOptions.projectId()); RequestBuilder builder = RequestBuilder.post( RequestBuilder.resolveRequestUrl( getServiceUrl(), "/v2/projects/{project_id}/collections", pathParamsMap)); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v2", "createCollection"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.header("Accept", "application/json"); builder.query("version", String.valueOf(this.version)); final JsonObject contentJson = new JsonObject(); contentJson.addProperty("name", createCollectionOptions.name()); if (createCollectionOptions.description() != null) { contentJson.addProperty("description", createCollectionOptions.description()); } if (createCollectionOptions.language() != null) { contentJson.addProperty("language", createCollectionOptions.language()); } if (createCollectionOptions.enrichments() != null) { contentJson.add( "enrichments", com.ibm.cloud.sdk.core.util.GsonSingleton.getGson() .toJsonTree(createCollectionOptions.enrichments())); } builder.bodyJson(contentJson); ResponseConverter<CollectionDetails> responseConverter = ResponseConverterUtils.getValue( new com.google.gson.reflect.TypeToken<CollectionDetails>() {}.getType()); return createServiceCall(builder.build(), responseConverter); } /** * Get collection. * * <p>Get details about the specified collection. * * @param getCollectionOptions the {@link GetCollectionOptions} containing the options for the * call * @return a {@link ServiceCall} with a result of type {@link CollectionDetails} */ public ServiceCall<CollectionDetails> getCollection(GetCollectionOptions getCollectionOptions) { com.ibm.cloud.sdk.core.util.Validator.notNull( getCollectionOptions, "getCollectionOptions cannot be null"); Map<String, String> pathParamsMap = new HashMap<String, String>(); pathParamsMap.put("project_id", getCollectionOptions.projectId()); pathParamsMap.put("collection_id", getCollectionOptions.collectionId()); RequestBuilder builder = RequestBuilder.get( RequestBuilder.resolveRequestUrl( getServiceUrl(), "/v2/projects/{project_id}/collections/{collection_id}", pathParamsMap)); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v2", "getCollection"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.header("Accept", "application/json"); builder.query("version", String.valueOf(this.version)); ResponseConverter<CollectionDetails> responseConverter = ResponseConverterUtils.getValue( new com.google.gson.reflect.TypeToken<CollectionDetails>() {}.getType()); return createServiceCall(builder.build(), responseConverter); } /** * Update a collection. * * <p>Updates the specified collection's name, description, and enrichments. * * @param updateCollectionOptions the {@link UpdateCollectionOptions} containing the options for * the call * @return a {@link ServiceCall} with a result of type {@link CollectionDetails} */ public ServiceCall<CollectionDetails> updateCollection( UpdateCollectionOptions updateCollectionOptions) { com.ibm.cloud.sdk.core.util.Validator.notNull( updateCollectionOptions, "updateCollectionOptions cannot be null"); Map<String, String> pathParamsMap = new HashMap<String, String>(); pathParamsMap.put("project_id", updateCollectionOptions.projectId()); pathParamsMap.put("collection_id", updateCollectionOptions.collectionId()); RequestBuilder builder = RequestBuilder.post( RequestBuilder.resolveRequestUrl( getServiceUrl(), "/v2/projects/{project_id}/collections/{collection_id}", pathParamsMap)); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v2", "updateCollection"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.header("Accept", "application/json"); builder.query("version", String.valueOf(this.version)); final JsonObject contentJson = new JsonObject(); if (updateCollectionOptions.name() != null) { contentJson.addProperty("name", updateCollectionOptions.name()); } if (updateCollectionOptions.description() != null) { contentJson.addProperty("description", updateCollectionOptions.description()); } if (updateCollectionOptions.enrichments() != null) { contentJson.add( "enrichments", com.ibm.cloud.sdk.core.util.GsonSingleton.getGson() .toJsonTree(updateCollectionOptions.enrichments())); } builder.bodyJson(contentJson); ResponseConverter<CollectionDetails> responseConverter = ResponseConverterUtils.getValue( new com.google.gson.reflect.TypeToken<CollectionDetails>() {}.getType()); return createServiceCall(builder.build(), responseConverter); } /** * Delete a collection. * * <p>Deletes the specified collection from the project. All documents stored in the specified * collection and not shared is also deleted. * * @param deleteCollectionOptions the {@link DeleteCollectionOptions} containing the options for * the call * @return a {@link ServiceCall} with a void result */ public ServiceCall<Void> deleteCollection(DeleteCollectionOptions deleteCollectionOptions) { com.ibm.cloud.sdk.core.util.Validator.notNull( deleteCollectionOptions, "deleteCollectionOptions cannot be null"); Map<String, String> pathParamsMap = new HashMap<String, String>(); pathParamsMap.put("project_id", deleteCollectionOptions.projectId()); pathParamsMap.put("collection_id", deleteCollectionOptions.collectionId()); RequestBuilder builder = RequestBuilder.delete( RequestBuilder.resolveRequestUrl( getServiceUrl(), "/v2/projects/{project_id}/collections/{collection_id}", pathParamsMap)); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v2", "deleteCollection"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.query("version", String.valueOf(this.version)); ResponseConverter<Void> responseConverter = ResponseConverterUtils.getVoid(); return createServiceCall(builder.build(), responseConverter); } /** * Query a project. * * <p>By using this method, you can construct queries. For details, see the [Discovery * documentation](https://cloud.ibm.com/docs/discovery-data?topic=discovery-data-query-concepts). * The default query parameters are defined by the settings for this project, see the [Discovery * documentation](https://cloud.ibm.com/docs/discovery-data?topic=discovery-data-project-defaults) * for an overview of the standard default settings, and see [the Projects API * documentation](#create-project) for details about how to set custom default query settings. * * @param queryOptions the {@link QueryOptions} containing the options for the call * @return a {@link ServiceCall} with a result of type {@link QueryResponse} */ public ServiceCall<QueryResponse> query(QueryOptions queryOptions) { com.ibm.cloud.sdk.core.util.Validator.notNull(queryOptions, "queryOptions cannot be null"); Map<String, String> pathParamsMap = new HashMap<String, String>(); pathParamsMap.put("project_id", queryOptions.projectId()); RequestBuilder builder = RequestBuilder.post( RequestBuilder.resolveRequestUrl( getServiceUrl(), "/v2/projects/{project_id}/query", pathParamsMap)); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v2", "query"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.header("Accept", "application/json"); builder.query("version", String.valueOf(this.version)); final JsonObject contentJson = new JsonObject(); if (queryOptions.collectionIds() != null) { contentJson.add( "collection_ids", com.ibm.cloud.sdk.core.util.GsonSingleton.getGson() .toJsonTree(queryOptions.collectionIds())); } if (queryOptions.filter() != null) { contentJson.addProperty("filter", queryOptions.filter()); } if (queryOptions.query() != null) { contentJson.addProperty("query", queryOptions.query()); } if (queryOptions.naturalLanguageQuery() != null) { contentJson.addProperty("natural_language_query", queryOptions.naturalLanguageQuery()); } if (queryOptions.aggregation() != null) { contentJson.addProperty("aggregation", queryOptions.aggregation()); } if (queryOptions.count() != null) { contentJson.addProperty("count", queryOptions.count()); } if (queryOptions.xReturn() != null) { contentJson.add( "return", com.ibm.cloud.sdk.core.util.GsonSingleton.getGson().toJsonTree(queryOptions.xReturn())); } if (queryOptions.offset() != null) { contentJson.addProperty("offset", queryOptions.offset()); } if (queryOptions.sort() != null) { contentJson.addProperty("sort", queryOptions.sort()); } if (queryOptions.highlight() != null) { contentJson.addProperty("highlight", queryOptions.highlight()); } if (queryOptions.spellingSuggestions() != null) { contentJson.addProperty("spelling_suggestions", queryOptions.spellingSuggestions()); } if (queryOptions.tableResults() != null) { contentJson.add( "table_results", com.ibm.cloud.sdk.core.util.GsonSingleton.getGson() .toJsonTree(queryOptions.tableResults())); } if (queryOptions.suggestedRefinements() != null) { contentJson.add( "suggested_refinements", com.ibm.cloud.sdk.core.util.GsonSingleton.getGson() .toJsonTree(queryOptions.suggestedRefinements())); } if (queryOptions.passages() != null) { contentJson.add( "passages", com.ibm.cloud.sdk.core.util.GsonSingleton.getGson().toJsonTree(queryOptions.passages())); } builder.bodyJson(contentJson); ResponseConverter<QueryResponse> responseConverter = ResponseConverterUtils.getValue( new com.google.gson.reflect.TypeToken<QueryResponse>() {}.getType()); return createServiceCall(builder.build(), responseConverter); } /** * Get Autocomplete Suggestions. * * <p>Returns completion query suggestions for the specified prefix. * * @param getAutocompletionOptions the {@link GetAutocompletionOptions} containing the options for * the call * @return a {@link ServiceCall} with a result of type {@link Completions} */ public ServiceCall<Completions> getAutocompletion( GetAutocompletionOptions getAutocompletionOptions) { com.ibm.cloud.sdk.core.util.Validator.notNull( getAutocompletionOptions, "getAutocompletionOptions cannot be null"); Map<String, String> pathParamsMap = new HashMap<String, String>(); pathParamsMap.put("project_id", getAutocompletionOptions.projectId()); RequestBuilder builder = RequestBuilder.get( RequestBuilder.resolveRequestUrl( getServiceUrl(), "/v2/projects/{project_id}/autocompletion", pathParamsMap)); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v2", "getAutocompletion"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.header("Accept", "application/json"); builder.query("version", String.valueOf(this.version)); builder.query("prefix", String.valueOf(getAutocompletionOptions.prefix())); if (getAutocompletionOptions.collectionIds() != null) { builder.query( "collection_ids", RequestUtils.join(getAutocompletionOptions.collectionIds(), ",")); } if (getAutocompletionOptions.field() != null) { builder.query("field", String.valueOf(getAutocompletionOptions.field())); } if (getAutocompletionOptions.count() != null) { builder.query("count", String.valueOf(getAutocompletionOptions.count())); } ResponseConverter<Completions> responseConverter = ResponseConverterUtils.getValue( new com.google.gson.reflect.TypeToken<Completions>() {}.getType()); return createServiceCall(builder.build(), responseConverter); } /** * Query collection notices. * * <p>Finds collection-level notices (errors and warnings) that are generated when documents are * ingested. * * @param queryCollectionNoticesOptions the {@link QueryCollectionNoticesOptions} containing the * options for the call * @return a {@link ServiceCall} with a result of type {@link QueryNoticesResponse} */ public ServiceCall<QueryNoticesResponse> queryCollectionNotices( QueryCollectionNoticesOptions queryCollectionNoticesOptions) { com.ibm.cloud.sdk.core.util.Validator.notNull( queryCollectionNoticesOptions, "queryCollectionNoticesOptions cannot be null"); Map<String, String> pathParamsMap = new HashMap<String, String>(); pathParamsMap.put("project_id", queryCollectionNoticesOptions.projectId()); pathParamsMap.put("collection_id", queryCollectionNoticesOptions.collectionId()); RequestBuilder builder = RequestBuilder.get( RequestBuilder.resolveRequestUrl( getServiceUrl(), "/v2/projects/{project_id}/collections/{collection_id}/notices", pathParamsMap)); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v2", "queryCollectionNotices"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.header("Accept", "application/json"); builder.query("version", String.valueOf(this.version)); if (queryCollectionNoticesOptions.filter() != null) { builder.query("filter", String.valueOf(queryCollectionNoticesOptions.filter())); } if (queryCollectionNoticesOptions.query() != null) { builder.query("query", String.valueOf(queryCollectionNoticesOptions.query())); } if (queryCollectionNoticesOptions.naturalLanguageQuery() != null) { builder.query( "natural_language_query", String.valueOf(queryCollectionNoticesOptions.naturalLanguageQuery())); } if (queryCollectionNoticesOptions.count() != null) { builder.query("count", String.valueOf(queryCollectionNoticesOptions.count())); } if (queryCollectionNoticesOptions.offset() != null) { builder.query("offset", String.valueOf(queryCollectionNoticesOptions.offset())); } ResponseConverter<QueryNoticesResponse> responseConverter = ResponseConverterUtils.getValue( new com.google.gson.reflect.TypeToken<QueryNoticesResponse>() {}.getType()); return createServiceCall(builder.build(), responseConverter); } /** * Query project notices. * * <p>Finds project-level notices (errors and warnings). Currently, project-level notices are * generated by relevancy training. * * @param queryNoticesOptions the {@link QueryNoticesOptions} containing the options for the call * @return a {@link ServiceCall} with a result of type {@link QueryNoticesResponse} */ public ServiceCall<QueryNoticesResponse> queryNotices(QueryNoticesOptions queryNoticesOptions) { com.ibm.cloud.sdk.core.util.Validator.notNull( queryNoticesOptions, "queryNoticesOptions cannot be null"); Map<String, String> pathParamsMap = new HashMap<String, String>(); pathParamsMap.put("project_id", queryNoticesOptions.projectId()); RequestBuilder builder = RequestBuilder.get( RequestBuilder.resolveRequestUrl( getServiceUrl(), "/v2/projects/{project_id}/notices", pathParamsMap)); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v2", "queryNotices"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.header("Accept", "application/json"); builder.query("version", String.valueOf(this.version)); if (queryNoticesOptions.filter() != null) { builder.query("filter", String.valueOf(queryNoticesOptions.filter())); } if (queryNoticesOptions.query() != null) { builder.query("query", String.valueOf(queryNoticesOptions.query())); } if (queryNoticesOptions.naturalLanguageQuery() != null) { builder.query( "natural_language_query", String.valueOf(queryNoticesOptions.naturalLanguageQuery())); } if (queryNoticesOptions.count() != null) { builder.query("count", String.valueOf(queryNoticesOptions.count())); } if (queryNoticesOptions.offset() != null) { builder.query("offset", String.valueOf(queryNoticesOptions.offset())); } ResponseConverter<QueryNoticesResponse> responseConverter = ResponseConverterUtils.getValue( new com.google.gson.reflect.TypeToken<QueryNoticesResponse>() {}.getType()); return createServiceCall(builder.build(), responseConverter); } /** * List fields. * * <p>Gets a list of the unique fields (and their types) stored in the the specified collections. * * @param listFieldsOptions the {@link ListFieldsOptions} containing the options for the call * @return a {@link ServiceCall} with a result of type {@link ListFieldsResponse} */ public ServiceCall<ListFieldsResponse> listFields(ListFieldsOptions listFieldsOptions) { com.ibm.cloud.sdk.core.util.Validator.notNull( listFieldsOptions, "listFieldsOptions cannot be null"); Map<String, String> pathParamsMap = new HashMap<String, String>(); pathParamsMap.put("project_id", listFieldsOptions.projectId()); RequestBuilder builder = RequestBuilder.get( RequestBuilder.resolveRequestUrl( getServiceUrl(), "/v2/projects/{project_id}/fields", pathParamsMap)); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v2", "listFields"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.header("Accept", "application/json"); builder.query("version", String.valueOf(this.version)); if (listFieldsOptions.collectionIds() != null) { builder.query("collection_ids", RequestUtils.join(listFieldsOptions.collectionIds(), ",")); } ResponseConverter<ListFieldsResponse> responseConverter = ResponseConverterUtils.getValue( new com.google.gson.reflect.TypeToken<ListFieldsResponse>() {}.getType()); return createServiceCall(builder.build(), responseConverter); } /** * List component settings. * * <p>Returns default configuration settings for components. * * @param getComponentSettingsOptions the {@link GetComponentSettingsOptions} containing the * options for the call * @return a {@link ServiceCall} with a result of type {@link ComponentSettingsResponse} */ public ServiceCall<ComponentSettingsResponse> getComponentSettings( GetComponentSettingsOptions getComponentSettingsOptions) { com.ibm.cloud.sdk.core.util.Validator.notNull( getComponentSettingsOptions, "getComponentSettingsOptions cannot be null"); Map<String, String> pathParamsMap = new HashMap<String, String>(); pathParamsMap.put("project_id", getComponentSettingsOptions.projectId()); RequestBuilder builder = RequestBuilder.get( RequestBuilder.resolveRequestUrl( getServiceUrl(), "/v2/projects/{project_id}/component_settings", pathParamsMap)); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v2", "getComponentSettings"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.header("Accept", "application/json"); builder.query("version", String.valueOf(this.version)); ResponseConverter<ComponentSettingsResponse> responseConverter = ResponseConverterUtils.getValue( new com.google.gson.reflect.TypeToken<ComponentSettingsResponse>() {}.getType()); return createServiceCall(builder.build(), responseConverter); } /** * Add a document. * * <p>Add a document to a collection with optional metadata. * * <p>Returns immediately after the system has accepted the document for processing. * * <p>* The user must provide document content, metadata, or both. If the request is missing both * document content and metadata, it is rejected. * * <p>* You can set the **Content-Type** parameter on the **file** part to indicate the media type * of the document. If the **Content-Type** parameter is missing or is one of the generic media * types (for example, `application/octet-stream`), then the service attempts to automatically * detect the document's media type. * * <p>* The following field names are reserved and are filtered out if present after * normalization: `id`, `score`, `highlight`, and any field with the prefix of: `_`, `+`, or `-` * * <p>* Fields with empty name values after normalization are filtered out before indexing. * * <p>* Fields that contain the following characters after normalization are filtered out before * indexing: `#` and `,` * * <p>If the document is uploaded to a collection that shares its data with another collection, * the **X-Watson-Discovery-Force** header must be set to `true`. * * <p>**Note:** You can assign an ID to a document that you add by appending the ID to the * endpoint (`/v2/projects/{project_id}/collections/{collection_id}/documents/{document_id}`). If * a document already exists with the specified ID, it is replaced. * * <p>**Note:** This operation works with a file upload collection. It cannot be used to modify a * collection that crawls an external data source. * * @param addDocumentOptions the {@link AddDocumentOptions} containing the options for the call * @return a {@link ServiceCall} with a result of type {@link DocumentAccepted} */ public ServiceCall<DocumentAccepted> addDocument(AddDocumentOptions addDocumentOptions) { com.ibm.cloud.sdk.core.util.Validator.notNull( addDocumentOptions, "addDocumentOptions cannot be null"); com.ibm.cloud.sdk.core.util.Validator.isTrue( (addDocumentOptions.file() != null) || (addDocumentOptions.metadata() != null), "At least one of file or metadata must be supplied."); Map<String, String> pathParamsMap = new HashMap<String, String>(); pathParamsMap.put("project_id", addDocumentOptions.projectId()); pathParamsMap.put("collection_id", addDocumentOptions.collectionId()); RequestBuilder builder = RequestBuilder.post( RequestBuilder.resolveRequestUrl( getServiceUrl(), "/v2/projects/{project_id}/collections/{collection_id}/documents", pathParamsMap)); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v2", "addDocument"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.header("Accept", "application/json"); if (addDocumentOptions.xWatsonDiscoveryForce() != null) { builder.header("X-Watson-Discovery-Force", addDocumentOptions.xWatsonDiscoveryForce()); } builder.query("version", String.valueOf(this.version)); MultipartBody.Builder multipartBuilder = new MultipartBody.Builder(); multipartBuilder.setType(MultipartBody.FORM); if (addDocumentOptions.file() != null) { okhttp3.RequestBody fileBody = RequestUtils.inputStreamBody( addDocumentOptions.file(), addDocumentOptions.fileContentType()); multipartBuilder.addFormDataPart("file", addDocumentOptions.filename(), fileBody); } if (addDocumentOptions.metadata() != null) { multipartBuilder.addFormDataPart("metadata", addDocumentOptions.metadata()); } builder.body(multipartBuilder.build()); ResponseConverter<DocumentAccepted> responseConverter = ResponseConverterUtils.getValue( new com.google.gson.reflect.TypeToken<DocumentAccepted>() {}.getType()); return createServiceCall(builder.build(), responseConverter); } /** * Update a document. * * <p>Replace an existing document or add a document with a specified **document_id**. Starts * ingesting a document with optional metadata. * * <p>If the document is uploaded to a collection that shares its data with another collection, * the **X-Watson-Discovery-Force** header must be set to `true`. * * <p>**Note:** When uploading a new document with this method it automatically replaces any * document stored with the same **document_id** if it exists. * * <p>**Note:** This operation only works on collections created to accept direct file uploads. It * cannot be used to modify a collection that connects to an external source such as Microsoft * SharePoint. * * <p>**Note:** If an uploaded document is segmented, all segments are overwritten, even if the * updated version of the document has fewer segments. * * @param updateDocumentOptions the {@link UpdateDocumentOptions} containing the options for the * call * @return a {@link ServiceCall} with a result of type {@link DocumentAccepted} */ public ServiceCall<DocumentAccepted> updateDocument(UpdateDocumentOptions updateDocumentOptions) { com.ibm.cloud.sdk.core.util.Validator.notNull( updateDocumentOptions, "updateDocumentOptions cannot be null"); com.ibm.cloud.sdk.core.util.Validator.isTrue( (updateDocumentOptions.file() != null) || (updateDocumentOptions.metadata() != null), "At least one of file or metadata must be supplied."); Map<String, String> pathParamsMap = new HashMap<String, String>(); pathParamsMap.put("project_id", updateDocumentOptions.projectId()); pathParamsMap.put("collection_id", updateDocumentOptions.collectionId()); pathParamsMap.put("document_id", updateDocumentOptions.documentId()); RequestBuilder builder = RequestBuilder.post( RequestBuilder.resolveRequestUrl( getServiceUrl(), "/v2/projects/{project_id}/collections/{collection_id}/documents/{document_id}", pathParamsMap)); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v2", "updateDocument"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.header("Accept", "application/json"); if (updateDocumentOptions.xWatsonDiscoveryForce() != null) { builder.header("X-Watson-Discovery-Force", updateDocumentOptions.xWatsonDiscoveryForce()); } builder.query("version", String.valueOf(this.version)); MultipartBody.Builder multipartBuilder = new MultipartBody.Builder(); multipartBuilder.setType(MultipartBody.FORM); if (updateDocumentOptions.file() != null) { okhttp3.RequestBody fileBody = RequestUtils.inputStreamBody( updateDocumentOptions.file(), updateDocumentOptions.fileContentType()); multipartBuilder.addFormDataPart("file", updateDocumentOptions.filename(), fileBody); } if (updateDocumentOptions.metadata() != null) { multipartBuilder.addFormDataPart("metadata", updateDocumentOptions.metadata()); } builder.body(multipartBuilder.build()); ResponseConverter<DocumentAccepted> responseConverter = ResponseConverterUtils.getValue( new com.google.gson.reflect.TypeToken<DocumentAccepted>() {}.getType()); return createServiceCall(builder.build(), responseConverter); } /** * Delete a document. * * <p>If the given document ID is invalid, or if the document is not found, then the a success * response is returned (HTTP status code `200`) with the status set to 'deleted'. * * <p>**Note:** This operation only works on collections created to accept direct file uploads. It * cannot be used to modify a collection that connects to an external source such as Microsoft * SharePoint. * * <p>**Note:** Segments of an uploaded document cannot be deleted individually. Delete all * segments by deleting using the `parent_document_id` of a segment result. * * @param deleteDocumentOptions the {@link DeleteDocumentOptions} containing the options for the * call * @return a {@link ServiceCall} with a result of type {@link DeleteDocumentResponse} */ public ServiceCall<DeleteDocumentResponse> deleteDocument( DeleteDocumentOptions deleteDocumentOptions) { com.ibm.cloud.sdk.core.util.Validator.notNull( deleteDocumentOptions, "deleteDocumentOptions cannot be null"); Map<String, String> pathParamsMap = new HashMap<String, String>(); pathParamsMap.put("project_id", deleteDocumentOptions.projectId()); pathParamsMap.put("collection_id", deleteDocumentOptions.collectionId()); pathParamsMap.put("document_id", deleteDocumentOptions.documentId()); RequestBuilder builder = RequestBuilder.delete( RequestBuilder.resolveRequestUrl( getServiceUrl(), "/v2/projects/{project_id}/collections/{collection_id}/documents/{document_id}", pathParamsMap)); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v2", "deleteDocument"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.header("Accept", "application/json"); if (deleteDocumentOptions.xWatsonDiscoveryForce() != null) { builder.header("X-Watson-Discovery-Force", deleteDocumentOptions.xWatsonDiscoveryForce()); } builder.query("version", String.valueOf(this.version)); ResponseConverter<DeleteDocumentResponse> responseConverter = ResponseConverterUtils.getValue( new com.google.gson.reflect.TypeToken<DeleteDocumentResponse>() {}.getType()); return createServiceCall(builder.build(), responseConverter); } /** * List training queries. * * <p>List the training queries for the specified project. * * @param listTrainingQueriesOptions the {@link ListTrainingQueriesOptions} containing the options * for the call * @return a {@link ServiceCall} with a result of type {@link TrainingQuerySet} */ public ServiceCall<TrainingQuerySet> listTrainingQueries( ListTrainingQueriesOptions listTrainingQueriesOptions) { com.ibm.cloud.sdk.core.util.Validator.notNull( listTrainingQueriesOptions, "listTrainingQueriesOptions cannot be null"); Map<String, String> pathParamsMap = new HashMap<String, String>(); pathParamsMap.put("project_id", listTrainingQueriesOptions.projectId()); RequestBuilder builder = RequestBuilder.get( RequestBuilder.resolveRequestUrl( getServiceUrl(), "/v2/projects/{project_id}/training_data/queries", pathParamsMap)); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v2", "listTrainingQueries"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.header("Accept", "application/json"); builder.query("version", String.valueOf(this.version)); ResponseConverter<TrainingQuerySet> responseConverter = ResponseConverterUtils.getValue( new com.google.gson.reflect.TypeToken<TrainingQuerySet>() {}.getType()); return createServiceCall(builder.build(), responseConverter); } /** * Delete training queries. * * <p>Removes all training queries for the specified project. * * @param deleteTrainingQueriesOptions the {@link DeleteTrainingQueriesOptions} containing the * options for the call * @return a {@link ServiceCall} with a void result */ public ServiceCall<Void> deleteTrainingQueries( DeleteTrainingQueriesOptions deleteTrainingQueriesOptions) { com.ibm.cloud.sdk.core.util.Validator.notNull( deleteTrainingQueriesOptions, "deleteTrainingQueriesOptions cannot be null"); Map<String, String> pathParamsMap = new HashMap<String, String>(); pathParamsMap.put("project_id", deleteTrainingQueriesOptions.projectId()); RequestBuilder builder = RequestBuilder.delete( RequestBuilder.resolveRequestUrl( getServiceUrl(), "/v2/projects/{project_id}/training_data/queries", pathParamsMap)); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v2", "deleteTrainingQueries"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.query("version", String.valueOf(this.version)); ResponseConverter<Void> responseConverter = ResponseConverterUtils.getVoid(); return createServiceCall(builder.build(), responseConverter); } /** * Create training query. * * <p>Add a query to the training data for this project. The query can contain a filter and * natural language query. * * @param createTrainingQueryOptions the {@link CreateTrainingQueryOptions} containing the options * for the call * @return a {@link ServiceCall} with a result of type {@link TrainingQuery} */ public ServiceCall<TrainingQuery> createTrainingQuery( CreateTrainingQueryOptions createTrainingQueryOptions) { com.ibm.cloud.sdk.core.util.Validator.notNull( createTrainingQueryOptions, "createTrainingQueryOptions cannot be null"); Map<String, String> pathParamsMap = new HashMap<String, String>(); pathParamsMap.put("project_id", createTrainingQueryOptions.projectId()); RequestBuilder builder = RequestBuilder.post( RequestBuilder.resolveRequestUrl( getServiceUrl(), "/v2/projects/{project_id}/training_data/queries", pathParamsMap)); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v2", "createTrainingQuery"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.header("Accept", "application/json"); builder.query("version", String.valueOf(this.version)); final JsonObject contentJson = new JsonObject(); contentJson.addProperty( "natural_language_query", createTrainingQueryOptions.naturalLanguageQuery()); contentJson.add( "examples", com.ibm.cloud.sdk.core.util.GsonSingleton.getGson() .toJsonTree(createTrainingQueryOptions.examples())); if (createTrainingQueryOptions.filter() != null) { contentJson.addProperty("filter", createTrainingQueryOptions.filter()); } builder.bodyJson(contentJson); ResponseConverter<TrainingQuery> responseConverter = ResponseConverterUtils.getValue( new com.google.gson.reflect.TypeToken<TrainingQuery>() {}.getType()); return createServiceCall(builder.build(), responseConverter); } /** * Get a training data query. * * <p>Get details for a specific training data query, including the query string and all examples. * * @param getTrainingQueryOptions the {@link GetTrainingQueryOptions} containing the options for * the call * @return a {@link ServiceCall} with a result of type {@link TrainingQuery} */ public ServiceCall<TrainingQuery> getTrainingQuery( GetTrainingQueryOptions getTrainingQueryOptions) { com.ibm.cloud.sdk.core.util.Validator.notNull( getTrainingQueryOptions, "getTrainingQueryOptions cannot be null"); Map<String, String> pathParamsMap = new HashMap<String, String>(); pathParamsMap.put("project_id", getTrainingQueryOptions.projectId()); pathParamsMap.put("query_id", getTrainingQueryOptions.queryId()); RequestBuilder builder = RequestBuilder.get( RequestBuilder.resolveRequestUrl( getServiceUrl(), "/v2/projects/{project_id}/training_data/queries/{query_id}", pathParamsMap)); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v2", "getTrainingQuery"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.header("Accept", "application/json"); builder.query("version", String.valueOf(this.version)); ResponseConverter<TrainingQuery> responseConverter = ResponseConverterUtils.getValue( new com.google.gson.reflect.TypeToken<TrainingQuery>() {}.getType()); return createServiceCall(builder.build(), responseConverter); } /** * Update a training query. * * <p>Updates an existing training query and it's examples. * * @param updateTrainingQueryOptions the {@link UpdateTrainingQueryOptions} containing the options * for the call * @return a {@link ServiceCall} with a result of type {@link TrainingQuery} */ public ServiceCall<TrainingQuery> updateTrainingQuery( UpdateTrainingQueryOptions updateTrainingQueryOptions) { com.ibm.cloud.sdk.core.util.Validator.notNull( updateTrainingQueryOptions, "updateTrainingQueryOptions cannot be null"); Map<String, String> pathParamsMap = new HashMap<String, String>(); pathParamsMap.put("project_id", updateTrainingQueryOptions.projectId()); pathParamsMap.put("query_id", updateTrainingQueryOptions.queryId()); RequestBuilder builder = RequestBuilder.post( RequestBuilder.resolveRequestUrl( getServiceUrl(), "/v2/projects/{project_id}/training_data/queries/{query_id}", pathParamsMap)); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v2", "updateTrainingQuery"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.header("Accept", "application/json"); builder.query("version", String.valueOf(this.version)); final JsonObject contentJson = new JsonObject(); contentJson.addProperty( "natural_language_query", updateTrainingQueryOptions.naturalLanguageQuery()); contentJson.add( "examples", com.ibm.cloud.sdk.core.util.GsonSingleton.getGson() .toJsonTree(updateTrainingQueryOptions.examples())); if (updateTrainingQueryOptions.filter() != null) { contentJson.addProperty("filter", updateTrainingQueryOptions.filter()); } builder.bodyJson(contentJson); ResponseConverter<TrainingQuery> responseConverter = ResponseConverterUtils.getValue( new com.google.gson.reflect.TypeToken<TrainingQuery>() {}.getType()); return createServiceCall(builder.build(), responseConverter); } /** * Delete a training data query. * * <p>Removes details from a training data query, including the query string and all examples. * * @param deleteTrainingQueryOptions the {@link DeleteTrainingQueryOptions} containing the options * for the call * @return a {@link ServiceCall} with a void result */ public ServiceCall<Void> deleteTrainingQuery( DeleteTrainingQueryOptions deleteTrainingQueryOptions) { com.ibm.cloud.sdk.core.util.Validator.notNull( deleteTrainingQueryOptions, "deleteTrainingQueryOptions cannot be null"); Map<String, String> pathParamsMap = new HashMap<String, String>(); pathParamsMap.put("project_id", deleteTrainingQueryOptions.projectId()); pathParamsMap.put("query_id", deleteTrainingQueryOptions.queryId()); RequestBuilder builder = RequestBuilder.delete( RequestBuilder.resolveRequestUrl( getServiceUrl(), "/v2/projects/{project_id}/training_data/queries/{query_id}", pathParamsMap)); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v2", "deleteTrainingQuery"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.query("version", String.valueOf(this.version)); ResponseConverter<Void> responseConverter = ResponseConverterUtils.getVoid(); return createServiceCall(builder.build(), responseConverter); } /** * Analyze a Document. * * <p>Process a document and return it for realtime use. Supports JSON files only. * * <p>The document is processed according to the collection's configuration settings but is not * stored in the collection. * * <p>**Note:** This method is supported on installed instances of Discovery only. * * @param analyzeDocumentOptions the {@link AnalyzeDocumentOptions} containing the options for the * call * @return a {@link ServiceCall} with a result of type {@link AnalyzedDocument} */ public ServiceCall<AnalyzedDocument> analyzeDocument( AnalyzeDocumentOptions analyzeDocumentOptions) { com.ibm.cloud.sdk.core.util.Validator.notNull( analyzeDocumentOptions, "analyzeDocumentOptions cannot be null"); com.ibm.cloud.sdk.core.util.Validator.isTrue( (analyzeDocumentOptions.file() != null) || (analyzeDocumentOptions.metadata() != null), "At least one of file or metadata must be supplied."); Map<String, String> pathParamsMap = new HashMap<String, String>(); pathParamsMap.put("project_id", analyzeDocumentOptions.projectId()); pathParamsMap.put("collection_id", analyzeDocumentOptions.collectionId()); RequestBuilder builder = RequestBuilder.post( RequestBuilder.resolveRequestUrl( getServiceUrl(), "/v2/projects/{project_id}/collections/{collection_id}/analyze", pathParamsMap)); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v2", "analyzeDocument"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.header("Accept", "application/json"); builder.query("version", String.valueOf(this.version)); MultipartBody.Builder multipartBuilder = new MultipartBody.Builder(); multipartBuilder.setType(MultipartBody.FORM); if (analyzeDocumentOptions.file() != null) { okhttp3.RequestBody fileBody = RequestUtils.inputStreamBody( analyzeDocumentOptions.file(), analyzeDocumentOptions.fileContentType()); multipartBuilder.addFormDataPart("file", analyzeDocumentOptions.filename(), fileBody); } if (analyzeDocumentOptions.metadata() != null) { multipartBuilder.addFormDataPart("metadata", analyzeDocumentOptions.metadata()); } builder.body(multipartBuilder.build()); ResponseConverter<AnalyzedDocument> responseConverter = ResponseConverterUtils.getValue( new com.google.gson.reflect.TypeToken<AnalyzedDocument>() {}.getType()); return createServiceCall(builder.build(), responseConverter); } /** * List Enrichments. * * <p>Lists the enrichments available to this project. The *Part of Speech* and *Sentiment of * Phrases* enrichments might be listed, but are reserved for internal use only. * * @param listEnrichmentsOptions the {@link ListEnrichmentsOptions} containing the options for the * call * @return a {@link ServiceCall} with a result of type {@link Enrichments} */ public ServiceCall<Enrichments> listEnrichments(ListEnrichmentsOptions listEnrichmentsOptions) { com.ibm.cloud.sdk.core.util.Validator.notNull( listEnrichmentsOptions, "listEnrichmentsOptions cannot be null"); Map<String, String> pathParamsMap = new HashMap<String, String>(); pathParamsMap.put("project_id", listEnrichmentsOptions.projectId()); RequestBuilder builder = RequestBuilder.get( RequestBuilder.resolveRequestUrl( getServiceUrl(), "/v2/projects/{project_id}/enrichments", pathParamsMap)); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v2", "listEnrichments"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.header("Accept", "application/json"); builder.query("version", String.valueOf(this.version)); ResponseConverter<Enrichments> responseConverter = ResponseConverterUtils.getValue( new com.google.gson.reflect.TypeToken<Enrichments>() {}.getType()); return createServiceCall(builder.build(), responseConverter); } /** * Create an enrichment. * * <p>Create an enrichment for use with the specified project. * * @param createEnrichmentOptions the {@link CreateEnrichmentOptions} containing the options for * the call * @return a {@link ServiceCall} with a result of type {@link Enrichment} */ public ServiceCall<Enrichment> createEnrichment(CreateEnrichmentOptions createEnrichmentOptions) { com.ibm.cloud.sdk.core.util.Validator.notNull( createEnrichmentOptions, "createEnrichmentOptions cannot be null"); Map<String, String> pathParamsMap = new HashMap<String, String>(); pathParamsMap.put("project_id", createEnrichmentOptions.projectId()); RequestBuilder builder = RequestBuilder.post( RequestBuilder.resolveRequestUrl( getServiceUrl(), "/v2/projects/{project_id}/enrichments", pathParamsMap)); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v2", "createEnrichment"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.header("Accept", "application/json"); builder.query("version", String.valueOf(this.version)); MultipartBody.Builder multipartBuilder = new MultipartBody.Builder(); multipartBuilder.setType(MultipartBody.FORM); multipartBuilder.addFormDataPart("enrichment", createEnrichmentOptions.enrichment().toString()); if (createEnrichmentOptions.file() != null) { okhttp3.RequestBody fileBody = RequestUtils.inputStreamBody(createEnrichmentOptions.file(), "application/octet-stream"); multipartBuilder.addFormDataPart("file", "filename", fileBody); } builder.body(multipartBuilder.build()); ResponseConverter<Enrichment> responseConverter = ResponseConverterUtils.getValue( new com.google.gson.reflect.TypeToken<Enrichment>() {}.getType()); return createServiceCall(builder.build(), responseConverter); } /** * Get enrichment. * * <p>Get details about a specific enrichment. * * @param getEnrichmentOptions the {@link GetEnrichmentOptions} containing the options for the * call * @return a {@link ServiceCall} with a result of type {@link Enrichment} */ public ServiceCall<Enrichment> getEnrichment(GetEnrichmentOptions getEnrichmentOptions) { com.ibm.cloud.sdk.core.util.Validator.notNull( getEnrichmentOptions, "getEnrichmentOptions cannot be null"); Map<String, String> pathParamsMap = new HashMap<String, String>(); pathParamsMap.put("project_id", getEnrichmentOptions.projectId()); pathParamsMap.put("enrichment_id", getEnrichmentOptions.enrichmentId()); RequestBuilder builder = RequestBuilder.get( RequestBuilder.resolveRequestUrl( getServiceUrl(), "/v2/projects/{project_id}/enrichments/{enrichment_id}", pathParamsMap)); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v2", "getEnrichment"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.header("Accept", "application/json"); builder.query("version", String.valueOf(this.version)); ResponseConverter<Enrichment> responseConverter = ResponseConverterUtils.getValue( new com.google.gson.reflect.TypeToken<Enrichment>() {}.getType()); return createServiceCall(builder.build(), responseConverter); } /** * Update an enrichment. * * <p>Updates an existing enrichment's name and description. * * @param updateEnrichmentOptions the {@link UpdateEnrichmentOptions} containing the options for * the call * @return a {@link ServiceCall} with a result of type {@link Enrichment} */ public ServiceCall<Enrichment> updateEnrichment(UpdateEnrichmentOptions updateEnrichmentOptions) { com.ibm.cloud.sdk.core.util.Validator.notNull( updateEnrichmentOptions, "updateEnrichmentOptions cannot be null"); Map<String, String> pathParamsMap = new HashMap<String, String>(); pathParamsMap.put("project_id", updateEnrichmentOptions.projectId()); pathParamsMap.put("enrichment_id", updateEnrichmentOptions.enrichmentId()); RequestBuilder builder = RequestBuilder.post( RequestBuilder.resolveRequestUrl( getServiceUrl(), "/v2/projects/{project_id}/enrichments/{enrichment_id}", pathParamsMap)); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v2", "updateEnrichment"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.header("Accept", "application/json"); builder.query("version", String.valueOf(this.version)); final JsonObject contentJson = new JsonObject(); contentJson.addProperty("name", updateEnrichmentOptions.name()); if (updateEnrichmentOptions.description() != null) { contentJson.addProperty("description", updateEnrichmentOptions.description()); } builder.bodyJson(contentJson); ResponseConverter<Enrichment> responseConverter = ResponseConverterUtils.getValue( new com.google.gson.reflect.TypeToken<Enrichment>() {}.getType()); return createServiceCall(builder.build(), responseConverter); } /** * Delete an enrichment. * * <p>Deletes an existing enrichment from the specified project. * * <p>**Note:** Only enrichments that have been manually created can be deleted. * * @param deleteEnrichmentOptions the {@link DeleteEnrichmentOptions} containing the options for * the call * @return a {@link ServiceCall} with a void result */ public ServiceCall<Void> deleteEnrichment(DeleteEnrichmentOptions deleteEnrichmentOptions) { com.ibm.cloud.sdk.core.util.Validator.notNull( deleteEnrichmentOptions, "deleteEnrichmentOptions cannot be null"); Map<String, String> pathParamsMap = new HashMap<String, String>(); pathParamsMap.put("project_id", deleteEnrichmentOptions.projectId()); pathParamsMap.put("enrichment_id", deleteEnrichmentOptions.enrichmentId()); RequestBuilder builder = RequestBuilder.delete( RequestBuilder.resolveRequestUrl( getServiceUrl(), "/v2/projects/{project_id}/enrichments/{enrichment_id}", pathParamsMap)); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v2", "deleteEnrichment"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.query("version", String.valueOf(this.version)); ResponseConverter<Void> responseConverter = ResponseConverterUtils.getVoid(); return createServiceCall(builder.build(), responseConverter); } /** * List projects. * * <p>Lists existing projects for this instance. * * @param listProjectsOptions the {@link ListProjectsOptions} containing the options for the call * @return a {@link ServiceCall} with a result of type {@link ListProjectsResponse} */ public ServiceCall<ListProjectsResponse> listProjects(ListProjectsOptions listProjectsOptions) { RequestBuilder builder = RequestBuilder.get(RequestBuilder.resolveRequestUrl(getServiceUrl(), "/v2/projects")); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v2", "listProjects"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.header("Accept", "application/json"); builder.query("version", String.valueOf(this.version)); ResponseConverter<ListProjectsResponse> responseConverter = ResponseConverterUtils.getValue( new com.google.gson.reflect.TypeToken<ListProjectsResponse>() {}.getType()); return createServiceCall(builder.build(), responseConverter); } /** * List projects. * * <p>Lists existing projects for this instance. * * @return a {@link ServiceCall} with a result of type {@link ListProjectsResponse} */ public ServiceCall<ListProjectsResponse> listProjects() { return listProjects(null); } /** * Create a Project. * * <p>Create a new project for this instance. * * @param createProjectOptions the {@link CreateProjectOptions} containing the options for the * call * @return a {@link ServiceCall} with a result of type {@link ProjectDetails} */ public ServiceCall<ProjectDetails> createProject(CreateProjectOptions createProjectOptions) { com.ibm.cloud.sdk.core.util.Validator.notNull( createProjectOptions, "createProjectOptions cannot be null"); RequestBuilder builder = RequestBuilder.post(RequestBuilder.resolveRequestUrl(getServiceUrl(), "/v2/projects")); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v2", "createProject"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.header("Accept", "application/json"); builder.query("version", String.valueOf(this.version)); final JsonObject contentJson = new JsonObject(); contentJson.addProperty("name", createProjectOptions.name()); contentJson.addProperty("type", createProjectOptions.type()); if (createProjectOptions.defaultQueryParameters() != null) { contentJson.add( "default_query_parameters", com.ibm.cloud.sdk.core.util.GsonSingleton.getGson() .toJsonTree(createProjectOptions.defaultQueryParameters())); } builder.bodyJson(contentJson); ResponseConverter<ProjectDetails> responseConverter = ResponseConverterUtils.getValue( new com.google.gson.reflect.TypeToken<ProjectDetails>() {}.getType()); return createServiceCall(builder.build(), responseConverter); } /** * Get project. * * <p>Get details on the specified project. * * @param getProjectOptions the {@link GetProjectOptions} containing the options for the call * @return a {@link ServiceCall} with a result of type {@link ProjectDetails} */ public ServiceCall<ProjectDetails> getProject(GetProjectOptions getProjectOptions) { com.ibm.cloud.sdk.core.util.Validator.notNull( getProjectOptions, "getProjectOptions cannot be null"); Map<String, String> pathParamsMap = new HashMap<String, String>(); pathParamsMap.put("project_id", getProjectOptions.projectId()); RequestBuilder builder = RequestBuilder.get( RequestBuilder.resolveRequestUrl( getServiceUrl(), "/v2/projects/{project_id}", pathParamsMap)); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v2", "getProject"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.header("Accept", "application/json"); builder.query("version", String.valueOf(this.version)); ResponseConverter<ProjectDetails> responseConverter = ResponseConverterUtils.getValue( new com.google.gson.reflect.TypeToken<ProjectDetails>() {}.getType()); return createServiceCall(builder.build(), responseConverter); } /** * Update a project. * * <p>Update the specified project's name. * * @param updateProjectOptions the {@link UpdateProjectOptions} containing the options for the * call * @return a {@link ServiceCall} with a result of type {@link ProjectDetails} */ public ServiceCall<ProjectDetails> updateProject(UpdateProjectOptions updateProjectOptions) { com.ibm.cloud.sdk.core.util.Validator.notNull( updateProjectOptions, "updateProjectOptions cannot be null"); Map<String, String> pathParamsMap = new HashMap<String, String>(); pathParamsMap.put("project_id", updateProjectOptions.projectId()); RequestBuilder builder = RequestBuilder.post( RequestBuilder.resolveRequestUrl( getServiceUrl(), "/v2/projects/{project_id}", pathParamsMap)); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v2", "updateProject"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.header("Accept", "application/json"); builder.query("version", String.valueOf(this.version)); final JsonObject contentJson = new JsonObject(); if (updateProjectOptions.name() != null) { contentJson.addProperty("name", updateProjectOptions.name()); } builder.bodyJson(contentJson); ResponseConverter<ProjectDetails> responseConverter = ResponseConverterUtils.getValue( new com.google.gson.reflect.TypeToken<ProjectDetails>() {}.getType()); return createServiceCall(builder.build(), responseConverter); } /** * Delete a project. * * <p>Deletes the specified project. * * <p>**Important:** Deleting a project deletes everything that is part of the specified project, * including all collections. * * @param deleteProjectOptions the {@link DeleteProjectOptions} containing the options for the * call * @return a {@link ServiceCall} with a void result */ public ServiceCall<Void> deleteProject(DeleteProjectOptions deleteProjectOptions) { com.ibm.cloud.sdk.core.util.Validator.notNull( deleteProjectOptions, "deleteProjectOptions cannot be null"); Map<String, String> pathParamsMap = new HashMap<String, String>(); pathParamsMap.put("project_id", deleteProjectOptions.projectId()); RequestBuilder builder = RequestBuilder.delete( RequestBuilder.resolveRequestUrl( getServiceUrl(), "/v2/projects/{project_id}", pathParamsMap)); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v2", "deleteProject"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.query("version", String.valueOf(this.version)); ResponseConverter<Void> responseConverter = ResponseConverterUtils.getVoid(); return createServiceCall(builder.build(), responseConverter); } /** * Delete labeled data. * * <p>Deletes all data associated with a specified customer ID. The method has no effect if no * data is associated with the customer ID. * * <p>You associate a customer ID with data by passing the **X-Watson-Metadata** header with a * request that passes data. For more information about personal data and customer IDs, see * [Information * security](https://cloud.ibm.com/docs/discovery-data?topic=discovery-data-information-security#information-security). * * <p>**Note:** This method is only supported on IBM Cloud instances of Discovery. * * @param deleteUserDataOptions the {@link DeleteUserDataOptions} containing the options for the * call * @return a {@link ServiceCall} with a void result */ public ServiceCall<Void> deleteUserData(DeleteUserDataOptions deleteUserDataOptions) { com.ibm.cloud.sdk.core.util.Validator.notNull( deleteUserDataOptions, "deleteUserDataOptions cannot be null"); RequestBuilder builder = RequestBuilder.delete(RequestBuilder.resolveRequestUrl(getServiceUrl(), "/v2/user_data")); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("discovery", "v2", "deleteUserData"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.query("version", String.valueOf(this.version)); builder.query("customer_id", String.valueOf(deleteUserDataOptions.customerId())); ResponseConverter<Void> responseConverter = ResponseConverterUtils.getVoid(); return createServiceCall(builder.build(), responseConverter); } }
24,717
455
/* Copyright (c) 2003-2004, <NAME> * Copyright (c) 2004-2006, <NAME>, <NAME>. * Copyright (c) 2007-2019, The Tor Project, Inc. */ /* See LICENSE for licensing information */ /** * \file addsub.h * * \brief Header for addsub.c **/ #ifndef TOR_INTMATH_ADDSUB_H #define TOR_INTMATH_ADDSUB_H #include "lib/cc/torint.h" uint32_t tor_add_u32_nowrap(uint32_t a, uint32_t b); #endif /* !defined(TOR_INTMATH_MULDIV_H) */
176
19,438
<gh_stars>1000+ /* * Copyright (c) 2020, <NAME> <<EMAIL>> * * SPDX-License-Identifier: BSD-2-Clause */ #pragma once #include <AK/Forward.h> #include <LibGUI/Forward.h> #include <sys/types.h> namespace GUI { class FileIconProvider { public: static Icon icon_for_path(const String&, mode_t); static Icon icon_for_path(const String&); static Icon icon_for_executable(const String&); static Icon filetype_image_icon(); static Icon directory_icon(); static Icon directory_open_icon(); static Icon home_directory_icon(); static Icon home_directory_open_icon(); static Icon desktop_directory_icon(); }; }
232
9,782
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.facebook.presto.verifier.prestoaction; import com.facebook.presto.jdbc.QueryStats; import java.util.Optional; import static java.util.Objects.requireNonNull; public class QueryActionStats { public static final QueryActionStats EMPTY_STATS = new QueryActionStats(Optional.empty(), Optional.empty()); private final Optional<QueryStats> queryStats; private final Optional<String> extraStats; public QueryActionStats(Optional<QueryStats> queryStats, Optional<String> extraStats) { this.queryStats = requireNonNull(queryStats, "queryStats is null"); this.extraStats = requireNonNull(extraStats, "extraStats is null"); } public Optional<QueryStats> getQueryStats() { return queryStats; } public Optional<String> getExtraStats() { return extraStats; } }
428
377
{ "(additional {{amount}} pending)": "", "Cancel": "Annulla", "Confirm Request": "Conferma richiesta", "Connected since {{created, LL}}": "Connessə dal {{created, LL}}", "Contact request sent and pending": { "": "Richieste di contatto inviate e in attesa." }, "Decline Request": "Rifiuta la richiesta", "Decline contact request?": "Rifiutare la richiesta di contatto?", "From <2>{{locationFrom}}</2>": "Da <2>{{locationFrom}}</2>", "Lives in <2>{{locationLiving}}</2>": "Vive a <2>{{locationLiving}}</2>", "No contacts yet": { "": "Ancora nessun contatto." }, "One contact": "Un contatto", "Remove contact?": "Rimuovere il contatto?", "Requested {{created, LL}}": "Richiesta del {{created, LL}}", "Revoke Request": "Annulla la richiesta", "Revoke contact request?": "Annullare la richiesta di contatto?", "Since {{created, LL}}": "Dal {{created, LL}}", "Wait a moment": { "": { "": { "": "Aspetta un momento..." } } }, "Yes, decline request": "Sì, rifiutare la richiesta", "Yes, remove contact": "Sì, rimuovi il contatto", "Yes, revoke request": "Sì, annulla la richiesta", "You received a contact request": { "": "Hai ricevuto una richiesta di contatto." }, "{{amount}} contacts": "{{amount}} contatti", "{{count}} contacts in common": "Un contatto in comune", "{{count}} contacts in common_plural": "{{count}} contatti in comune", "{{count}} contacts": "{{count}} contatto", "{{count}} contacts_plural": "{{count}} contatti", "Search contacts": "Ricerca contatti", "(additional {{count}} pending)": "(ancora {{count}} in attesa)", "(additional {{count}} pending)_plural": "(ancora {{count}} in attesa)", "Wait a moment…": "Si prega di attendere…", "Requested {{date, LL}}": "Richiesta del {{date, LL}}", "Connected since {{date, LL}}": "Connessi dal {{date, LL}}" }
797
1,442
# Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== ''' Speech Postprocess ''' import os from absl import logging from delta.utils.postprocess.base_postproc import PostProc from delta.utils.register import registers #pylint: disable=too-many-instance-attributes, too-few-public-methods @registers.postprocess.register class SavePredPostProc(PostProc): '''Save the result of inference.''' #pylint: disable=arguments-differ, unused-argument def call(self, predictions, log_verbose=False): ''' main func entrypoint''' logits = predictions["logits"] preds = predictions["preds"] output_index = predictions["output_index"] if output_index is None: res_file = self.config["solver"]["postproc"].get("res_file", "") else: res_file = self.config["solver"]["postproc"][output_index].get( "res_file", "") if res_file == "": logging.info( "Infer res not saved. You can check 'res_file' in your config.") return res_dir = os.path.dirname(res_file) if not os.path.exists(res_dir): os.makedirs(res_dir) logging.info("Save inference result to: {}".format(res_file)) with open(res_file, "w") as in_f: for logit, pred in zip(logits, preds): in_f.write(" ".join(["{:.3f}".format(num) for num in logit]) + "\t{}\n".format(pred))
686
1,444
package mage.sets; import mage.cards.ExpansionSet; import mage.constants.Rarity; import mage.constants.SetType; /** * https://scryfall.com/sets/purl */ public class URLConventionPromos extends ExpansionSet { private static final URLConventionPromos instance = new URLConventionPromos(); public static URLConventionPromos getInstance() { return instance; } private URLConventionPromos() { super("URL/Convention Promos", "PURL", ExpansionSet.buildDate(2015, 1, 23), SetType.PROMOTIONAL); this.hasBoosters = false; this.hasBasicLands = false; cards.add(new SetCardInfo("Aeronaut Tinkerer", 8, Rarity.COMMON, mage.cards.a.AeronautTinkerer.class)); cards.add(new SetCardInfo("Bloodthrone Vampire", 3, Rarity.RARE, mage.cards.b.BloodthroneVampire.class)); cards.add(new SetCardInfo("Chandra's Fury", 5, Rarity.RARE, mage.cards.c.ChandrasFury.class)); cards.add(new SetCardInfo("Kor Skyfisher", 2, Rarity.RARE, mage.cards.k.KorSkyfisher.class)); cards.add(new SetCardInfo("Merfolk Mesmerist", 4, Rarity.RARE, mage.cards.m.MerfolkMesmerist.class)); // Italian-only printing //cards.add(new SetCardInfo("Relentless Rats", 9, Rarity.RARE, mage.cards.r.RelentlessRats.class)); // Japanese-only printing //cards.add(new SetCardInfo("Shepherd of the Lost", "34*", Rarity.UNCOMMON, mage.cards.s.ShepherdOfTheLost.class)); cards.add(new SetCardInfo("Stealer of Secrets", 7, Rarity.RARE, mage.cards.s.StealerOfSecrets.class)); cards.add(new SetCardInfo("Steward of Valeron", 1, Rarity.RARE, mage.cards.s.StewardOfValeron.class)); } }
646
326
<filename>tests/algorithms/test_util.py # stdlib import math # pydp absolute import pydp as dp def test_default_epsilon(): assert dp.util.default_epsilon() == math.log(3) def test_next_power_positive(): kTolerance = 1e-5 npp1 = dp.util.get_next_power_of_two(3.0) npp2 = dp.util.get_next_power_of_two(5.0) npp3 = dp.util.get_next_power_of_two(7.9) assert abs(npp1 - 4) < kTolerance assert abs(npp2 - 8) < kTolerance assert abs(npp3 - 8) < kTolerance def test_next_power_exact_positive(): kTolerance = 1e-5 npep1 = dp.util.get_next_power_of_two(2.0) npep2 = dp.util.get_next_power_of_two(8.0) assert abs(npep1 - 2) < kTolerance assert abs(npep2 - 8) < kTolerance def test_next_power_one(): kTolerance = 1e-5 npo = dp.util.get_next_power_of_two(1.0) assert abs(npo - 1) < kTolerance def test_next_power_negative(): kTolerance = 1e-5 npn1 = dp.util.get_next_power_of_two(0.4) npn2 = dp.util.get_next_power_of_two(0.2) assert abs(npn1 - 0.5) < kTolerance assert abs(npn2 - 0.25) < kTolerance def test_next_power_exact_negative(): kTolerance = 1e-5 npn1 = dp.util.get_next_power_of_two(0.5) npn2 = dp.util.get_next_power_of_two(0.125) assert abs(npn1 - 0.5) < kTolerance assert abs(npn2 - 0.125) < kTolerance # def test_round_positive(): # kTolerance = 1e-5 # rp1 = dp.util.round_to_nearest_multiple(4.9, 2.0) # rp2 = dp.util.round_to_nearest_multiple(5.1, 2.0) # assert abs(rp1 - 4) < kTolerance # assert abs(rp2 - 6) < kTolerance # def test_round_negative(): # kTolerance = 1e-5 # rn1 = dp.util.round_to_nearest_multiple(-4.9, 2.0) # rn2 = dp.util.round_to_nearest_multiple(-5.1, 2.0) # assert abs(rn1 + 4) < kTolerance # assert abs(rn2 + 6) < kTolerance # def test_round_positive_ties(): # kTolerance = 1e-5 # rpt = dp.util.round_to_nearest_multiple(5.0, 2.0) # assert abs(rpt - 6.0) < kTolerance # def test_round_negative_ties(): # kTolerance = 1e-5 # rnt = dp.util.round_to_nearest_multiple(-5.0, 2.0) # assert abs(rnt + 4.0) < kTolerance def test_statistics(): a = [1.0, 5.0, 7.0, 9.0, 13.0] assert dp.util.mean(a) == 7.0 assert dp.util.variance(a) == 16.0 assert dp.util.standard_deviation(a) == 4.0 assert dp.util.order_statistics(0.6, a) == 8.0 assert dp.util.order_statistics(0, a) == 1.0 assert dp.util.order_statistics(1, a) == 13.0 def test_vector_filter(): v = [1.0, 2.0, 2.0, 3.0] selection = [False, True, True, False] expected = [2.0, 2.0] assert expected == dp.util.vector_filter(v, selection) def test_vector_to_string(): v = [1.0, 2.0, 2.0, 3.0] expected = "[1, 2, 2, 3]" assert dp.util.vector_to_string(v) == expected
1,361
921
# Copyright 2014-2016 Presslabs SRL # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re import os import errno from fuse import FuseOSError from gitfs.utils.decorators.not_in import not_in from gitfs.utils.decorators.write_operation import write_operation from gitfs.log import log from gitfs.events import writers from .passthrough import PassthroughView, STATS class CurrentView(PassthroughView): def __init__(self, *args, **kwargs): super(CurrentView, self).__init__(*args, **kwargs) self.dirty = {} self.current_path = kwargs.get("current_path", "current") @write_operation @not_in("ignore", check=["old", "new"]) def rename(self, old, new): new = re.sub(self.regex, "", new) result = super(CurrentView, self).rename(old, new) message = "Rename {} to {}".format(old, new) self._stage(**{"remove": os.path.split(old)[1], "add": new, "message": message}) log.debug("CurrentView: Renamed %s to %s", old, new) return result @write_operation @not_in("ignore", check=["target"]) def symlink(self, name, target): result = os.symlink(target, self.repo._full_path(name)) message = "Create symlink to {} for {}".format(target, name) self._stage(add=name, message=message) log.debug("CurrentView: Created symlink to %s from %s", name, target) return result @write_operation @not_in("ignore", check=["target"]) def link(self, name, target): if target.startswith("/%s/" % self.current_path): target = target.replace("/%s/" % self.current_path, "/") result = super(CurrentView, self).link(target, name) message = "Create link to {} for {}".format(target, name) self._stage(add=name, message=message) log.debug("CurrentView: Created link to %s from %s", name, target) return result def readlink(self, path): log.debug("CurrentView: Read link %s", path) return os.readlink(self.repo._full_path(path)) def getattr(self, path, fh=None): full_path = self.repo._full_path(path) status = os.lstat(full_path) attrs = dict((key, getattr(status, key)) for key in STATS) attrs.update({"st_uid": self.uid, "st_gid": self.gid}) log.debug("CurrentView: Get attributes %s for %s", str(attrs), path) return attrs @write_operation @not_in("ignore", check=["path"]) def write(self, path, buf, offset, fh): """ We don't like big big files, so we need to be really carefull with them. First we check for offset, then for size. If any of this is off limit, raise EFBIG error and delete the file. """ if offset + len(buf) > self.max_size: raise FuseOSError(errno.EFBIG) result = super(CurrentView, self).write(path, buf, offset, fh) self.dirty[fh] = {"message": "Update {}".format(path), "stage": True} log.debug("CurrentView: Wrote %s to %s", len(buf), path) return result @write_operation @not_in("ignore", check=["path"]) def mkdir(self, path, mode): result = super(CurrentView, self).mkdir(path, mode) keep_path = "{}/.keep".format(path) full_path = self.repo._full_path(keep_path) if not os.path.exists(keep_path): global writers fh = os.open(full_path, os.O_WRONLY | os.O_CREAT) writers += 1 log.info("CurrentView: Open %s for write", full_path) super(CurrentView, self).chmod(keep_path, 0o644) self.dirty[fh] = { "message": "Create the {} directory".format(path), "stage": True, } self.release(keep_path, fh) log.debug("CurrentView: Created directory %s with mode %s", path, mode) return result def create(self, path, mode, fi=None): fh = self.open_for_write(path, os.O_WRONLY | os.O_CREAT) super(CurrentView, self).chmod(path, mode) self.dirty[fh] = {"message": "Created {}".format(path), "stage": True} log.debug("CurrentView: Created %s", path) return fh @write_operation @not_in("ignore", check=["path"]) def chmod(self, path, mode): """ Executes chmod on the file at os level and then it commits the change. """ str_mode = ("%o" % mode)[-4:] if str_mode not in ["0755", "0644"]: raise FuseOSError(errno.EINVAL) result = super(CurrentView, self).chmod(path, mode) if os.path.isdir(self.repo._full_path(path)): return result message = "Chmod to {} on {}".format(str_mode, path) self._stage(add=path, message=message) log.debug("CurrentView: Change %s mode to %s", path, ("0%o" % mode)[-4:]) return result @write_operation @not_in("ignore", check=["path"]) def fsync(self, path, fdatasync, fh): """ Each time you fsync, a new commit and push are made """ result = super(CurrentView, self).fsync(path, fdatasync, fh) message = "Fsync {}".format(path) self._stage(add=path, message=message) log.debug("CurrentView: Fsync %s", path) return result @write_operation @not_in("ignore", check=["path"]) def open_for_write(self, path, flags): global writers fh = self.open_for_read(path, flags) writers += 1 self.dirty[fh] = {"message": "Opened {} for write".format(path), "stage": False} log.debug("CurrentView: Open %s for write", path) return fh @write_operation @not_in("ignore", check=["path"]) def lock(self, path, fip, cmd, lock): return super().lock(path, fip, cmd, lock) def open_for_read(self, path, flags): full_path = self.repo._full_path(path) log.info("CurrentView: Open %s for read", path) return os.open(full_path, flags) def open(self, path, flags): write_mode = flags & (os.O_WRONLY | os.O_RDWR | os.O_APPEND | os.O_CREAT) if write_mode: return self.open_for_write(path, flags) return self.open_for_read(path, flags) def release(self, path, fh): """ Check for path if something was written to. If so, commit and push the changed to upstream. """ if fh in self.dirty: message = self.dirty[fh]["message"] should_stage = self.dirty[fh].get("stage", False) del self.dirty[fh] global writers writers -= 1 if should_stage: log.debug("CurrentView: Staged %s for commit", path) self._stage(add=path, message=message) log.debug("CurrentView: Release %s", path) return os.close(fh) @write_operation @not_in("ignore", check=["path"]) def rmdir(self, path): message = "Delete the {} directory".format(path) # Unlink all the files full_path = self.repo._full_path(path) for root, dirs, files in os.walk(full_path): for _file in files: deleting_file = os.path.join(root, _file) if os.path.exists(deleting_file): result = super(CurrentView, self).unlink(os.path.join(path, _file)) self._stage(remove=os.path.join(path, _file), message=message) # Delete the actual directory result = super(CurrentView, self).rmdir("{}/".format(path)) log.debug("CurrentView: %s", message) return result @write_operation @not_in("ignore", check=["path"]) def unlink(self, path): result = super(CurrentView, self).unlink(path) message = "Deleted {}".format(path) self._stage(remove=path, message=message) log.debug("CurrentView: Deleted %s", path) return result def _stage(self, message, add=None, remove=None): non_empty = False if remove is not None: remove = self._sanitize(remove) if add is not None: add = self._sanitize(add) paths = self._get_files_from_path(add) if paths: for path in paths: path = path.replace("{}/".format(add), "{}/".format(remove)) self.repo.index.remove(path) else: self.repo.index.remove(remove) else: self.repo.index.remove(remove) non_empty = True if add is not None: add = self._sanitize(add) paths = self._get_files_from_path(add) if paths: for path in paths: self.repo.index.add(path) else: self.repo.index.add(add) non_empty = True if non_empty: self.queue.commit(add=add, remove=remove, message=message) def _get_files_from_path(self, path): paths = [] full_path = self.repo._full_path(self._sanitize(path)) workdir = self.repo._repo.workdir if os.path.isdir(full_path): for (dirpath, dirs, files) in os.walk(full_path): for filename in files: paths.append("{}/{}".format(dirpath.replace(workdir, ""), filename)) return paths def _sanitize(self, path): if path is None: return path if path.startswith("/"): return path[1:] return path
4,516
480
<gh_stars>100-1000 /* * Copyright [2013-2021], Alibaba Group Holding Limited * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.alibaba.polardbx.executor.operator.frame; import com.alibaba.polardbx.common.datatype.Decimal; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.executor.calc.Aggregator; import java.math.BigInteger; import java.util.List; /** * The range unboundedFollowing window frame calculates frames with the following SQL form: * ... RANGE BETWEEN [window frame preceding] AND UNBOUNDED FOLLOWING * [window frame preceding] ::= [unsigned_value_specification] PRECEDING | CURRENT ROW * * <p>e.g.: ... RANGE BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING. */ public class RangeUnboundedPrecedingOverFrame extends AbstractOverWindowFrame { private final int rightBound; private int rightIndex = 0; private int leftIndex = 0; private int orderByColIndex; private int isAscOrder; private int nullRowsLeft; private int nullRowsRight; private DataType dataType; private int currentIndex; //avoid zero for left = 0 and right = 0 private int lastProcessedLeft = -1; private int lastProcessedRight = -1; private Object lastProcessedValue; public RangeUnboundedPrecedingOverFrame( List<Aggregator> aggregator, int rightBound, int orderByColIndex, boolean isAscOrder, DataType dataType) { super(aggregator); this.rightBound = rightBound; this.orderByColIndex = orderByColIndex; this.dataType = dataType; if (isAscOrder) { this.isAscOrder = 1; } else { this.isAscOrder = -1; } } @Override public void updateIndex(int leftIndex, int rightIndex) {//[] this.leftIndex = leftIndex; this.rightIndex = rightIndex = rightIndex - 1; lastProcessedValue = null; if (isAscOrder > 0 && chunksIndex.rowAt(leftIndex).getObject(orderByColIndex) == null) { updateNullRows(leftIndex); } if (isAscOrder < 0 && chunksIndex.rowAt(this.rightIndex).getObject(orderByColIndex) == null) { updateNullRows(rightIndex); } currentIndex = leftIndex; if (isAscOrder > 0) { aggregators.forEach(t -> t.resetToInitValue(0)); } } @Override public void processData(int index) { Object currentValue = chunksIndex.rowAt(index).getObject(orderByColIndex); int[] indexes = new int[2]; if (lastProcessedValue != null && lastProcessedValue.equals(currentValue)) { return; } indexes[0] = leftIndex; if (currentValue == null) { indexes[1] = nullRowsRight; } else { int otherSize = getBound(index); indexes[1] = otherSize; } lastProcessedValue = currentValue; process(indexes[0], indexes[1]); } private void process(int leftIndex, int rightIndex) { // 升序时,不停的添加结果 if (isAscOrder > 0) { while (currentIndex <= rightIndex) { Chunk.ChunkRow row = chunksIndex.rowAt(currentIndex++); aggregators.forEach(t -> t.accumulate(0, row.getChunk(), row.getPosition())); } return; } if (lastProcessedLeft == leftIndex && lastProcessedRight == rightIndex) { return; } // 降序时,根据范围重新进行计算,如果实现了部分函数的retract方法,则可减少该部分函数的计算量 aggregators.forEach(t -> { t.resetToInitValue(0); for (int i = leftIndex; i <= rightIndex; i++) { Chunk.ChunkRow row = chunksIndex.rowAt(i); t.accumulate(0, row.getChunk(), row.getPosition()); } }); lastProcessedRight = rightIndex; lastProcessedLeft = leftIndex; } private void updateNullRows(int index) { int nullRowsAnotherSide = index; while (nullRowsAnotherSide >= leftIndex && nullRowsAnotherSide <= rightIndex && chunksIndex.rowAt(nullRowsAnotherSide).getObject(orderByColIndex) == null) { nullRowsAnotherSide += isAscOrder; } nullRowsAnotherSide -= isAscOrder; if (isAscOrder > 0) { nullRowsLeft = leftIndex; nullRowsRight = nullRowsAnotherSide; } else { nullRowsLeft = nullRowsAnotherSide; nullRowsRight = rightIndex; } } private int getBound(int index) { Object currentValue = chunksIndex.rowAt(index).getObject(orderByColIndex); int other = index; while (other <= rightIndex && other >= leftIndex && chunksIndex.rowAt(other).getObject(orderByColIndex) != null) { //asc need <= , desc need the next value <= current value - right bound boolean compare = compare(chunksIndex.rowAt(other).getObject(orderByColIndex), currentValue, rightBound); //current + bound >= next value if (isAscOrder > 0 && compare(chunksIndex.rowAt(other).getObject(orderByColIndex), currentValue, rightBound)) { other += 1; } else if (isAscOrder < 0 && compare(currentValue, chunksIndex.rowAt(other).getObject(orderByColIndex), rightBound)) { //current - right bounded <= next value[get(Other)] => current <= next value + right bounded other += 1; } else { break; } } if (other != index) { other -= 1; } return other; } boolean compare(Object v1, Object v2, int range) { if (DataTypeUtil.equalsSemantically(dataType, DataTypes.DecimalType)) { return ((Decimal) v1).compareTo(((Decimal) v2).add(new Decimal(range, 0))) <= 0; } else if (DataTypeUtil.equalsSemantically(dataType, DataTypes.IntegerType)) { return ((Integer) v1).compareTo(((Integer) v2) + (range)) <= 0; } else if (DataTypeUtil.equalsSemantically(dataType, DataTypes.ByteType)) { return ((Byte) v1).intValue() <= ((Byte) v1).intValue() + range; } else if (DataTypeUtil.equalsSemantically(dataType, DataTypes.ShortType)) { return ((Short) v1).intValue() <= ((Short) v1).intValue() + range; } else if (DataTypeUtil.equalsSemantically(dataType, DataTypes.LongType)) { return ((Long) v1).compareTo(((Long) v2) + (range)) <= 0; } else if (DataTypeUtil.equalsSemantically(dataType, DataTypes.DoubleType)) { return ((Double) v1).compareTo(((Double) v2) + (range)) <= 0; } else if (DataTypeUtil.equalsSemantically(dataType, DataTypes.FloatType)) { return ((Float) v1).compareTo(((Float) v2) + (range)) <= 0; } else if (DataTypeUtil.equalsSemantically(dataType, DataTypes.ULongType)) { return ((BigInteger) v1).compareTo(((BigInteger) v2).add(new BigInteger(String.valueOf(range)))) <= 0; } return false; } }
3,364
1,780
#ifdef __OBJC__ #import <UIKit/UIKit.h> #else #ifndef FOUNDATION_EXPORT #if defined(__cplusplus) #define FOUNDATION_EXPORT extern "C" #else #define FOUNDATION_EXPORT extern #endif #endif #endif #import "TrustKit.h" #import "TSKTrustKitConfig.h" #import "TSKPinningValidator.h" #import "TSKPinningValidatorCallback.h" #import "TSKPinningValidatorResult.h" #import "TSKTrustDecision.h" FOUNDATION_EXPORT double TrustKitVersionNumber; FOUNDATION_EXPORT const unsigned char TrustKitVersionString[];
186
852
<gh_stars>100-1000 // Globals: holds "global" variables such as the IMATH_TrackletCalculators #include "L1Trigger/TrackFindingTracklet/interface/Settings.h" #include "L1Trigger/TrackFindingTracklet/interface/Globals.h" #include "L1Trigger/TrackFindingTracklet/interface/imath.h" #include "L1Trigger/TrackFindingTracklet/interface/IMATH_TrackletCalculator.h" #include "L1Trigger/TrackFindingTracklet/interface/IMATH_TrackletCalculatorDisk.h" #include "L1Trigger/TrackFindingTracklet/interface/IMATH_TrackletCalculatorOverlap.h" #include "L1Trigger/TrackFindingTracklet/interface/TrackletLUT.h" #include "L1Trigger/TrackFindingTracklet/interface/HistBase.h" using namespace std; using namespace trklet; Globals::Globals(Settings const& settings) { imathGlobals* imathGlobs = new imathGlobals(); //takes owernship of globals pointer imathGlobals_.reset(imathGlobs); // tracklet calculators ITC_L1L2_ = make_unique<IMATH_TrackletCalculator>(settings, imathGlobs, 1, 2); ITC_L2L3_ = make_unique<IMATH_TrackletCalculator>(settings, imathGlobs, 2, 3); ITC_L3L4_ = make_unique<IMATH_TrackletCalculator>(settings, imathGlobs, 3, 4); ITC_L5L6_ = make_unique<IMATH_TrackletCalculator>(settings, imathGlobs, 5, 6); ITC_F1F2_ = make_unique<IMATH_TrackletCalculatorDisk>(settings, imathGlobs, 1, 2); ITC_F3F4_ = make_unique<IMATH_TrackletCalculatorDisk>(settings, imathGlobs, 3, 4); ITC_B1B2_ = make_unique<IMATH_TrackletCalculatorDisk>(settings, imathGlobs, -1, -2); ITC_B3B4_ = make_unique<IMATH_TrackletCalculatorDisk>(settings, imathGlobs, -3, -4); ITC_L1F1_ = make_unique<IMATH_TrackletCalculatorOverlap>(settings, imathGlobs, 1, 1); ITC_L2F1_ = make_unique<IMATH_TrackletCalculatorOverlap>(settings, imathGlobs, 2, 1); ITC_L1B1_ = make_unique<IMATH_TrackletCalculatorOverlap>(settings, imathGlobs, 1, -1); ITC_L2B1_ = make_unique<IMATH_TrackletCalculatorOverlap>(settings, imathGlobs, 2, -1); } Globals::~Globals() { for (auto i : thePhiCorr_) { delete i; i = nullptr; } } std::ofstream& Globals::ofstream(std::string fname) { if (ofstreams_.find(fname) != ofstreams_.end()) { return *(ofstreams_[fname]); } std::ofstream* outptr = new std::ofstream(fname.c_str()); ofstreams_[fname] = outptr; return *outptr; }
889
763
package org.batfish.representation.cisco_nxos; import java.util.SortedMap; import java.util.TreeMap; import javax.annotation.Nonnull; /** An {@link ObjectGroup} of IP addresses. */ public final class ObjectGroupIpAddress extends ObjectGroup { public ObjectGroupIpAddress(String name) { super(name); _lines = new TreeMap<>(); } @Override public <T> T accept(ObjectGroupVisitor<T> visitor) { return visitor.visitObjectGroupIpAddress(this); } public @Nonnull SortedMap<Long, ObjectGroupIpAddressLine> getLines() { return _lines; } private final @Nonnull SortedMap<Long, ObjectGroupIpAddressLine> _lines; }
217