max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
310
<filename>Frameworks/MetalPetal/MTIColorMatrix.h // // MTIColorMatrix.h // MetalPetal // // Created by <NAME> on 25/10/2017. // #import <Foundation/Foundation.h> #import "MTIShaderLib.h" FOUNDATION_EXPORT const MTIColorMatrix MTIColorMatrixIdentity NS_SWIFT_NAME(MTIColorMatrix.identity); FOUNDATION_EXPORT const MTIColorMatrix MTIColorMatrixRGBColorInvert NS_SWIFT_NAME(MTIColorMatrix.rgbColorInvert); FOUNDATION_EXPORT BOOL MTIColorMatrixEqualToColorMatrix(MTIColorMatrix a, MTIColorMatrix b) NS_SWIFT_NAME(MTIColorMatrix.isEqual(self:to:)); FOUNDATION_EXPORT BOOL MTIColorMatrixIsIdentity(MTIColorMatrix matrix) NS_SWIFT_NAME(getter:MTIColorMatrix.isIdentity(self:)); FOUNDATION_EXPORT MTIColorMatrix MTIColorMatrixConcat(MTIColorMatrix a, MTIColorMatrix b) NS_SWIFT_NAME(MTIColorMatrix.concat(self:with:)); FOUNDATION_EXPORT MTIColorMatrix MTIColorMatrixMakeWithExposure(float exposure) NS_SWIFT_NAME(MTIColorMatrix.init(exposure:)); FOUNDATION_EXPORT MTIColorMatrix MTIColorMatrixMakeWithSaturation(float saturation, simd_float3 grayColorTransform) NS_SWIFT_NAME(MTIColorMatrix.init(saturation:grayColorTransform:)); FOUNDATION_EXPORT MTIColorMatrix MTIColorMatrixMakeWithBrightness(float brightness) NS_SWIFT_NAME(MTIColorMatrix.init(brightness:)); FOUNDATION_EXPORT MTIColorMatrix MTIColorMatrixMakeWithContrast(float contrast) NS_SWIFT_NAME(MTIColorMatrix.init(contrast:)); FOUNDATION_EXPORT MTIColorMatrix MTIColorMatrixMakeWithOpacity(float opacity) NS_SWIFT_NAME(MTIColorMatrix.init(opacity:));
518
694
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License.. #ifndef CLIENT_H #define CLIENT_H #include "AbstractNetworkOps.h" using namespace std; class Client : public AbstractNetworkOps { public: Client(boost::asio::io_service& io_service, boost::asio::ssl::context& context, boost::asio::ip::tcp::resolver::iterator endpoint_iterator); virtual ~Client(); bool verify_certificate(bool preverified, boost::asio::ssl::verify_context& ctx); void handle_connect(const boost::system::error_code& error); void handle_handshake(const boost::system::error_code& error); void startConnection(); private: boost::asio::ip::tcp::resolver::iterator endpoint_iterator; }; #endif
400
32,544
<filename>patterns/design-patterns-architectural/src/main/java/com/baeldung/mvc_mvp/mvc/ProductView.java package com.baeldung.mvc_mvp.mvc; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class ProductView { private static Logger log = LoggerFactory.getLogger(ProductView.class); public void printProductDetails(String name, String description, Double price) { log.info("Product details:"); log.info("product Name: " + name); log.info("product Description: " + description); log.info("product price: " + price); } }
221
1,704
<gh_stars>1000+ from torchmeta.datasets.cifar100.cifar_fs import CIFARFS from torchmeta.datasets.cifar100.fc100 import FC100 __all__ = ['CIFARFS', 'FC100']
67
477
import requests # retrieving relevant terms using https://www.wordsapi.com/ API_URL = "https://wordsapiv1.p.rapidapi.com/words/" def get_relevant_words(search_term, api_key, depth=1): relevant_arr = [search_term] to_be_searched = [search_term] while depth > 0 and to_be_searched: next_to_be_searched = [] for word in to_be_searched: response = requests.get(API_URL+word, headers={"X-RapidAPI-Key": api_key}).json() if "results" not in response: continue for result in response["results"]: if "similarTo" in result: for similar in result["similarTo"]: if similar not in relevant_arr and similar not in to_be_searched: next_to_be_searched.append(similar) if "derivation" in result: for derivation in result["derivation"]: if derivation not in relevant_arr and derivation not in to_be_searched: next_to_be_searched.append(derivation) if "synonyms" in result: for synonym in result["synonyms"]: if synonym not in relevant_arr and synonym not in to_be_searched: next_to_be_searched.append(synonym) relevant_arr += next_to_be_searched to_be_searched = next_to_be_searched depth -= 1 return relevant_arr
739
412
<reponame>dave1667/graphd /* Copyright 2015 Google Inc. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "graphd/graphd.h" #include "graphd/graphd-iterator-and.h" #include <errno.h> #include <stdbool.h> #include <stdio.h> #include <string.h> GRAPHD_SABOTAGE_DECL; static double step_size(pdb_handle const *pdb, pdb_iterator const *it, unsigned long long upper_bound) { if (pdb_iterator_n_valid(pdb, it)) { unsigned long long bre; bre = (it->it_high == PDB_ITERATOR_HIGH_ANY ? upper_bound : it->it_high) - it->it_low; if (pdb_iterator_n(pdb, it) <= 1) return (double)bre; return (double)bre / pdb_iterator_n(pdb, it); } return 1.0; } /** * @brief Given a producer and some checkers, get the next value. * * This is used both for the and_iterator_next() * implementation and for the internal contest phase in * the statistics. * * @param it The and-iterator * @param producer Index of the producer. * @param ps process-state local to the particular * statistics/and_iterator_next/../... * @param budget_inout budget * * @return 0 on success, a nonzero error code on failure. * @return PDB_ERR_MORE if we ran out of time. */ int graphd_iterator_and_run(pdb_iterator *const it, size_t const producer, and_process_state *const ps, pdb_budget *const budget_inout) { graphd_iterator_and *gia = it->it_theory; cl_handle *cl = gia->gia_cl; pdb_handle *pdb = gia->gia_pdb; bool changed; int err = 0; pdb_iterator *c_it = NULL; pdb_budget budget_in = *budget_inout; char buf[200]; bool checker_likes_find; size_t check_i; cl_log(cl, CL_LEVEL_DEBUG, "graphd_iterator_and_run(it=%p, ps=%p, ps_id=%llx, resume_id=%llx, " "call_state=%d, check_i=%zu, producer=#%d, " "budget $%lld, gia_n %zu, ogia_n %zu, ps_n %zu)", (void *)it, (void *)ps, (unsigned long long)ps->ps_id, (unsigned long long)ps->ps_next_find_resume_id, (int)ps->ps_run_call_state, ps->ps_check_i, (int)producer, *budget_inout, gia->gia_n, ogia(it)->gia_n, ps->ps_n); cl_enter(cl, CL_LEVEL_VERBOSE, "(it=%p, call_state=%d, check_i=%zu, producer=#%d, " "budget $%lld, gia_n %zu, ogia_n %zu, ps_n %zu, sabotage %p, %lu)", (void *)it, (int)ps->ps_run_call_state, ps->ps_check_i, (int)producer, *budget_inout, gia->gia_n, ogia(it)->gia_n, ps->ps_n, gia->gia_graphd->g_sabotage, gia->gia_graphd->g_sabotage ? gia->gia_graphd->g_sabotage->gs_countdown : 0); switch (ps->ps_run_call_state) { default: cl_notreached(cl, "graphd_iterator_and_run: " "unexpected call state %d", ps->ps_run_call_state); case GRAPHD_ITERATOR_AND_RUN_NEXT_CATCH_UP_START: /* Preprocessing: resynchronize the producer -- * that is, move it onto ps_next_find_resume_id; * *then* do a next. * * It's possible that ps_next_find_resume_id * resulted from a find in one of the checkers - * count a producer find that jumps past it * as if it were a find and a next. */ if (ps->ps_it == NULL) { err = graphd_iterator_and_process_state_initialize(pdb, it, ps); if (err != 0) return err; } cl_assert(cl, ps->ps_it != NULL); ps->ps_run_cost = 0; pdb_iterator_call_reset(pdb, ps->ps_it[producer]); if (ps->ps_next_find_resume_id == PDB_ID_NONE) { err = pdb_iterator_reset(pdb, ps->ps_it[producer]); if (err != 0) return err; } else if (pdb_iterator_sorted(pdb, ps->ps_it[producer]) && pdb_iterator_statistics_done(pdb, ps->ps_it[producer]) && pdb_iterator_find_cost(pdb, ps->ps_it[producer]) < (pdb_iterator_n(pdb, ps->ps_it[producer]) * pdb_iterator_next_cost(pdb, ps->ps_it[producer]))) { /* Resumption via a direct "find" on * the producer. */ pdb_id id_found; pdb_iterator *p_it; p_it = ps->ps_it[producer]; pdb_iterator_call_reset(pdb, p_it); case 9: p_it = ps->ps_it[producer]; err = pdb_iterator_find(pdb, p_it, ps->ps_next_find_resume_id, &id_found, budget_inout); if (err == PDB_ERR_MORE) { ps->ps_run_call_state = 9; goto suspend; } if (err != 0) { /* This is possible if the ID * didn't originate with this * producer, and this producer * just bumped into its end. */ char buf[200]; cl_log_errno(cl, err == PDB_ERR_NO ? CL_LEVEL_FAIL : CL_LEVEL_ERROR, "pdb_iterator_find", err, "id=%llx, iterator=%s", (unsigned long long)ps->ps_next_find_resume_id, pdb_iterator_to_string(pdb, p_it, buf, sizeof buf)); goto done; } ps->ps_id = id_found; /* If we overshot, count this as the producer * producing a new candidate. */ if (id_found != ps->ps_next_find_resume_id) { cl_assert(cl, pdb_iterator_forward(pdb, p_it) ? id_found > ps->ps_next_find_resume_id : id_found < ps->ps_next_find_resume_id); ps->ps_next_find_resume_id = PDB_ID_NONE; goto have_producer_next_result; } } else { /* Resumption via "next" calls on just the producer, * until we see our desired ID. */ pdb_id id_found; do { pdb_iterator *p_it; if (*budget_inout < 0) { err = PDB_ERR_MORE; ps->ps_run_call_state = 10; goto suspend; } p_it = ps->ps_it[producer]; pdb_iterator_call_reset(pdb, p_it); case 10: p_it = ps->ps_it[producer]; err = pdb_iterator_next(pdb, p_it, &id_found, budget_inout); if (err != 0) { if (err == PDB_ERR_MORE) { ps->ps_run_call_state = 10; goto suspend; } ps->ps_next_find_resume_id = PDB_ID_NONE; ps->ps_run_call_state = 0; goto done; } if (pdb_iterator_sorted(pdb, p_it) && pdb_iterator_sorted_valid(pdb, p_it) && (pdb_iterator_forward(pdb, p_it) ? id_found > ps->ps_next_find_resume_id : id_found < ps->ps_next_find_resume_id)) { /* We overshot, as in the find case. */ ps->ps_id = id_found; ps->ps_next_find_resume_id = PDB_ID_NONE; ps->ps_run_call_state = 0; goto have_producer_next_result; } /* If we're not sorted, we can't tell * whether we overshoot - but in that * case, the checkers aren't allowed to * find past their check-id, and thus * any ps_id must actually have originated * with the producer. */ } while (id_found != ps->ps_next_find_resume_id); } ps->ps_run_call_state = 0; ps->ps_next_find_resume_id = PDB_ID_NONE; /* Fall through */ /* This entry point, 0, is for initial entry only. * For resumption as if via 0, use 7 instead. */ case 0: if (ps->ps_eof) { cl_leave(cl, CL_LEVEL_VERBOSE, "eof (stored)"); return GRAPHD_ERR_NO; } if (ps->ps_it == NULL) { err = graphd_iterator_and_process_state_initialize(pdb, it, ps); if (err != 0) return err; cl_log(cl, CL_LEVEL_DEBUG, "graphd_iterator_and_run, after and_process_state_initialize: " "it=%p, ps=%p, ps_id=%llx, resume_id=%llx, call_state=%d, " "check_i=%zu, producer=#%d, " "budget $%lld, gia_n %zu, ogia_n %zu, ps_n %zu", (void *)it, (void *)ps, (unsigned long long)ps->ps_id, (unsigned long long)ps->ps_next_find_resume_id, (int)ps->ps_run_call_state, ps->ps_check_i, (int)producer, *budget_inout, gia->gia_n, ogia(it)->gia_n, ps->ps_n); } cl_assert(cl, ps->ps_it != NULL); ps->ps_run_cost = 0; ps->ps_next_find_resume_id = PDB_ID_NONE; do /* ... while (err == GRAPHD_ERR_NO) */ { case 7: cl_assert(cl, pdb_iterator_has_position(pdb, ps->ps_it[producer])); pdb_iterator_call_reset(pdb, ps->ps_it[producer]); case 1: ps->ps_run_call_state = 0; cl_assert(cl, ps->ps_it != NULL); PDB_IS_ITERATOR(cl, ps->ps_it[producer]); cl_assert(cl, pdb_iterator_has_position(pdb, ps->ps_it[producer])); /* Get the next candidate from this producer. * One of two ways: * * - If a previous checker used "find" to go forward * to an ID, maybe we can use "find" to position * on or after it. * * - Otherwise, either there was no previous * candidate, or the last failure was a simple * check, and we just use "next". */ /* Cost to the producer with find: 1 find cost. * Cost with nexts: distance between the new * point and the current point, * divided by the average next step width, * times the next cost. */ cl_log(cl, CL_LEVEL_VERBOSE, "graphd_iterator_and_run: find or next? " "producer sorted? %s; find-id %llx; resume-id %llx; stats " "done %s; fc=%lld; primitive n: %llu; total %llu; nc=%lld", pdb_iterator_sorted(pdb, ps->ps_it[producer]) ? "yes" : "no", (unsigned long long)ps->ps_next_find_resume_id, (unsigned long long)ps->ps_producer_id, pdb_iterator_statistics_done(pdb, ps->ps_it[producer]) ? "yes" : "no", (long long)pdb_iterator_find_cost(pdb, ps->ps_it[producer]), (unsigned long long)pdb_primitive_n(pdb), (unsigned long long)pdb_iterator_n(pdb, ps->ps_it[producer]), (long long)pdb_iterator_next_cost(pdb, ps->ps_it[producer])); if (pdb_iterator_sorted(pdb, ps->ps_it[producer]) && ps->ps_next_find_resume_id != PDB_ID_NONE && ps->ps_producer_id != PDB_ID_NONE && pdb_iterator_statistics_done(pdb, ps->ps_it[producer]) && pdb_iterator_find_cost(pdb, ps->ps_it[producer]) < ((double)(ps->ps_next_find_resume_id > ps->ps_producer_id ? ps->ps_next_find_resume_id - ps->ps_producer_id : ps->ps_producer_id - ps->ps_next_find_resume_id) / ((double)pdb_iterator_spread(pdb, ps->ps_it[producer]) / pdb_iterator_n(pdb, ps->ps_it[producer]))) * pdb_iterator_next_cost(pdb, ps->ps_it[producer])) { pdb_id id_found; pdb_iterator *p_it; /* This producer likes find for this jump. */ ps->ps_id = ps->ps_next_find_resume_id; case GRAPHD_ITERATOR_AND_RUN_FIND_START: p_it = ps->ps_it[producer]; pdb_iterator_call_reset(pdb, p_it); case 2: cl_assert(cl, ps->ps_it != NULL); p_it = ps->ps_it[producer]; PDB_IS_ITERATOR(cl, p_it); err = pdb_iterator_find(pdb, p_it, ps->ps_id, &id_found, budget_inout); if (err == PDB_ERR_MORE) { ps->ps_run_call_state = 2; goto suspend; } if (err != 0) goto done; if (ps->ps_next_find_resume_id != PDB_ID_NONE && id_found != ps->ps_next_find_resume_id) { if (pdb_iterator_forward(pdb, p_it) ? (id_found > ps->ps_next_find_resume_id) : (id_found < ps->ps_next_find_resume_id)) { ps->ps_next_find_resume_id = PDB_ID_NONE; ps->ps_id = id_found; goto have_producer_next_result; } cl_notreached(cl, "graphd_iterator_and_run: " "producer %s jumped past " "ps_next_find_resume_id %llx, " "landing on %llx instead.", pdb_iterator_to_string(pdb, ps->ps_it[producer], buf, sizeof buf), (unsigned long long)ps->ps_next_find_resume_id, (unsigned long long)id_found); } ps->ps_id = id_found; } else { pdb_iterator *p_it; ; p_it = ps->ps_it[producer]; pdb_iterator_call_reset(pdb, p_it); /* Just use "next". */ case 3: p_it = ps->ps_it[producer]; err = pdb_iterator_next(pdb, p_it, &ps->ps_id, budget_inout); if (err != 0) { if (err != PDB_ERR_MORE) goto done; ps->ps_run_call_state = 3; goto suspend; } if (ps->ps_next_find_resume_id != PDB_ID_NONE && ps->ps_next_find_resume_id != ps->ps_id) { /* Did we overshoot? */ if (pdb_iterator_sorted(pbd, ps->ps_it[producer]) && (pdb_iterator_forward(pdb, ps->ps_it[producer]) ? ps->ps_id > ps->ps_next_find_resume_id : ps->ps_id < ps->ps_next_find_resume_id)) { ps->ps_next_find_resume_id = PDB_ID_NONE; goto have_producer_next_result; } cl_log(cl, CL_LEVEL_VERBOSE, "graphd_iterator_and_run: " "ignore %llx; still " "waiting to go past %llx", (unsigned long long)ps->ps_id, (unsigned long long)ps->ps_next_find_resume_id); if (GRAPHD_SABOTAGE(gia->gia_graphd, *budget_inout <= 0)) { pdb_iterator_call_reset(pdb, ps->ps_it[producer]); ps->ps_run_call_state = 3; goto suspend; } err = GRAPHD_ERR_NO; continue; } } have_producer_next_result: ps->ps_next_find_resume_id = PDB_ID_NONE; ps->ps_producer_id = ps->ps_id; /* Count this as the producer producing something. * * If we're running as part of the initial contest, * this count, compared to the number of elements * that actually make it through all the tests, * will help the contest figure out how many elements * to expect, overall. */ ps->ps_run_produced_n++; cl_log(cl, CL_LEVEL_VERBOSE, "graphd_iterator_and_run: " "producer #%d made %llx (attempt #%llu)", (int)producer, (unsigned long long)ps->ps_id, (unsigned long long)ps->ps_run_produced_n); if (ps->ps_id < it->it_low || ps->ps_id >= it->it_high) { cl_log(cl, CL_LEVEL_VERBOSE, "graphd_iterator_and_run: " "value %lld outside " "of low/high boundaries %lld..%lld", (long long)ps->ps_id, (long long)it->it_low, (long long)it->it_high - 1); if (pdb_iterator_sorted(pdb, ps->ps_it[producer]) && (pdb_iterator_forward(pdb, it) ? ps->ps_id >= it->it_high : ps->ps_id < it->it_low)) { err = GRAPHD_ERR_NO; goto done; } /* If our producer is sorted, and we're not * currently chasing after something - maybe * we can use "find" to position on-or-after * the initial boundary? */ if (pdb_iterator_sorted(pdb, ps->ps_it[producer]) && ps->ps_next_find_resume_id == PDB_ID_NONE) { ps->ps_next_find_resume_id = pdb_iterator_forward(pdb, it) ? it->it_low : it->it_high - 1; } if (GRAPHD_SABOTAGE(gia->gia_graphd, *budget_inout <= 0)) { ps->ps_run_call_state = 7; goto suspend; } err = GRAPHD_ERR_NO; continue; } /* Check the candidate ps->ps_id against all * the iterators that didn't produce it. */ err = graphd_iterator_and_check_sort_refresh(it, ps); if (err != 0) goto done; /* Whenever we resume into the middle of this * loop, we actually do need to retest the end * condition, ps->ps_check_i >= ps->ps_n, because * ps_n can have been decremented while removing * a subiterator during optimization. * (We're not really sure *which* ps is involved * here.) */ for (ps->ps_check_i = 0; ps->ps_check_i < ps->ps_n; ps->ps_check_i++) { pdb_id id_found; cl_assert(cl, ps->ps_check_order != NULL); check_i = ps->ps_check_order[ps->ps_check_i]; if (GRAPHD_SABOTAGE(gia->gia_graphd, *budget_inout < 0)) { ps->ps_run_call_state = 5; goto suspend; case 5: ps->ps_run_call_state = 0; if (ps->ps_check_i >= ps->ps_n) break; check_i = ps->ps_check_order[ps->ps_check_i]; } /* This checker is the producer? * If that's true, we don't need to check. */ if (check_i == producer) continue; c_it = ps->ps_it[check_i]; pdb_iterator_call_reset(pdb, c_it); if (!pdb_iterator_sorted(pdb, c_it) || !pdb_iterator_sorted(pdb, ps->ps_it[producer]) || !pdb_iterator_statistics_done(pdb, c_it)) checker_likes_find = false; else if (pdb_iterator_n(pdb, c_it) == 0) checker_likes_find = true; else { double find_cost_per_point, check_cost_per_point; double c_step, p_step, two_find_step; long long step_i_can_use; long long upper_bound = pdb_primitive_n(pdb); /* Each "find" slides us on average stepsize/2 * across the iterator's numerical breadth. * * So, two pairs of checker and producer finds * together get us past p_step + c_step IDs. */ c_step = step_size(pdb, c_it, upper_bound); p_step = step_size(pdb, ps->ps_it[producer], upper_bound); two_find_step = c_step + p_step; step_i_can_use = (pdb_iterator_forward(pdb, it) ? (it->it_high == PDB_ITERATOR_HIGH_ANY ? upper_bound : it->it_high) - ps->ps_id : ps->ps_id - it->it_low); if (step_i_can_use < 1) step_i_can_use = 0.00001; if (c_step > step_i_can_use) c_step = step_i_can_use; if (p_step > step_i_can_use) p_step = step_i_can_use; if (two_find_step > step_i_can_use) two_find_step = step_i_can_use; find_cost_per_point = ((double)(pdb_iterator_find_cost(pdb, c_it) + pdb_iterator_find_cost(pdb, ps->ps_it[producer])) * 2.0) / two_find_step; /* Each "next+check" step gets us past * p_step IDs. */ check_cost_per_point = (double)(pdb_iterator_next_cost(pdb, ps->ps_it[producer]) + pdb_iterator_check_cost(pdb, c_it)) / p_step; checker_likes_find = find_cost_per_point < check_cost_per_point; cl_log( cl, CL_LEVEL_VERBOSE, "graphd_iterator_and_run: " "subiterator %s: find cost %.3f, " "(c.fc=%lld + p.fc=%lld)*2" "/(p_step=%.3f+c_step=%.3f;step_i_can_use=%lld);" "check_cost %.3f (nc=%lld + cc=%lld)/p_step=%.3f", pdb_iterator_to_string(pdb, c_it, buf, sizeof buf), find_cost_per_point, (long long)pdb_iterator_find_cost(pdb, c_it), (long long)pdb_iterator_find_cost(pdb, ps->ps_it[producer]), p_step, c_step, step_i_can_use, check_cost_per_point, (long long)pdb_iterator_next_cost(pdb, ps->ps_it[producer]), (long long)pdb_iterator_check_cost(pdb, c_it), p_step); } if (!checker_likes_find) { /* Perform a check. */ cl_assert(cl, ps->ps_check_i < ps->ps_n); cl_assert(cl, ps->ps_check_order != NULL); check_i = ps->ps_check_order[ps->ps_check_i]; c_it = ps->ps_it[check_i]; pdb_iterator_call_reset(pdb, c_it); case 4: ps->ps_run_call_state = 0; if (ps->ps_check_i >= ps->ps_n) { err = 0; break; } check_i = ps->ps_check_order[ps->ps_check_i]; c_it = ps->ps_it[check_i]; PDB_IS_ITERATOR(cl, c_it); cl_log(cl, CL_LEVEL_VERBOSE, "check %llx against " "iterator #%d (producer is %d), %s ($%lld)", (unsigned long long)ps->ps_id, (int)check_i, (int)producer, pdb_iterator_to_string(pdb, c_it, buf, sizeof buf), *budget_inout); err = pdb_iterator_check(pdb, c_it, ps->ps_id, budget_inout); if (err != 0) { if (err == PDB_ERR_MORE) { ps->ps_run_call_state = 4; goto suspend; } if (err != GRAPHD_ERR_NO) goto unexpected_check_error; cl_log(cl, CL_LEVEL_VERBOSE, "graphd_iterator_and_run: check #%zu " "(%s) fails: %llx: %s", ps->ps_check_i, pdb_iterator_to_string(pdb, c_it, buf, sizeof buf), (unsigned long long)ps->ps_id, graphd_strerror(err)); break; } continue; } /* Perform a find. */ check_i = ps->ps_check_order[ps->ps_check_i]; c_it = ps->ps_it[check_i]; pdb_iterator_call_reset(pdb, c_it); if (GRAPHD_SABOTAGE(gia->gia_graphd, *budget_inout < 0)) { ps->ps_run_call_state = 6; goto suspend; case 6: ps->ps_run_call_state = 0; if (ps->ps_check_i >= ps->ps_n) { err = 0; break; } } check_i = ps->ps_check_order[ps->ps_check_i]; c_it = ps->ps_it[check_i]; changed = false; cl_log(cl, CL_LEVEL_VERBOSE, "find %llx in iterator #%zu, %s", (unsigned long long)ps->ps_id, check_i, pdb_iterator_to_string(pdb, c_it, buf, sizeof buf)); err = pdb_iterator_find(pdb, c_it, ps->ps_id, &id_found, budget_inout); if (err != 0) { if (err != PDB_ERR_MORE) goto done; ps->ps_run_call_state = 6; goto suspend; } cl_assert(cl, id_found <= ADDB_U5_MAX); /* Not changing the ID is like passing * pdb_iterator_check() -- just move on to * the next condition. */ if (ps->ps_id == id_found) continue; /* Tell the producer where to resume. */ ps->ps_next_find_resume_id = ps->ps_id = id_found; err = GRAPHD_ERR_NO; break; } cl_assert(cl, err != 0 || ps->ps_check_i >= ps->ps_n); if (err == GRAPHD_ERR_NO && GRAPHD_SABOTAGE(gia->gia_graphd, *budget_inout <= 0)) { ps->ps_run_call_state = 7; goto suspend; } } while (err == GRAPHD_ERR_NO); } done: ps->ps_run_cost += budget_in - *budget_inout; if (err != 0) { if (err == GRAPHD_ERR_NO) ps->ps_eof = true; cl_leave(cl, CL_LEVEL_VERBOSE, "%s: %s ($%lld)", pdb_iterator_to_string(pdb, it, buf, sizeof buf), err == GRAPHD_ERR_NO ? "done" : graphd_strerror(err), (long long)(budget_in - *budget_inout)); } else cl_leave(cl, CL_LEVEL_VERBOSE, "%s: %llu ($%lld)", pdb_iterator_to_string(pdb, it, buf, sizeof buf), (unsigned long long)ps->ps_id, (long long)(budget_in - *budget_inout)); return err; unexpected_check_error: cl_assert(cl, c_it != NULL); cl_log_errno(cl, CL_LEVEL_FAIL, "pdb_iterator_check", err, "iterator=%s, id=%llu", pdb_iterator_to_string(pdb, c_it, buf, sizeof buf), (unsigned long long)ps->ps_id); cl_leave(cl, CL_LEVEL_VERBOSE, "unexpected error: %s", graphd_strerror(err)); return err; suspend: ps->ps_run_cost += budget_in - *budget_inout; cl_leave(cl, CL_LEVEL_VERBOSE, "resume %hd ($%lld)", ps->ps_run_call_state, (long long)(budget_in - *budget_inout)); return PDB_ERR_MORE; } /* A "find" is like a next with a slightly different * starting point. */ int graphd_iterator_and_find_loc(pdb_handle *pdb, pdb_iterator *it, pdb_id id_in, pdb_id *id_out, pdb_budget *budget_inout, char const *file, int line) { graphd_iterator_and *const gia = it->it_theory; and_process_state *const ps = &gia->gia_ps; cl_handle *const cl = gia->gia_cl; pdb_budget budget_in = *budget_inout; int err; /* Come back when there's budget! */ if (GRAPHD_SABOTAGE(gia->gia_graphd, *budget_inout < 0)) return PDB_ERR_MORE; pdb_rxs_push(pdb, "FIND %p and %llx state=%d+%d [%s:%d]", (void *)it, (unsigned long long)id_in, it->it_call_state, ps->ps_run_call_state, file, line); cl_assert(cl, pdb_iterator_sorted(pdb, it)); /* We no longer care about the resume ID - with the start * of the find, our previous position is irrelevant. */ gia->gia_resume_id = PDB_ID_NONE; gia->gia_id = PDB_ID_NONE; gia->gia_ps.ps_eof = false; err = graphd_iterator_and_access(pdb, it, budget_inout, 1.0); if (err != GRAPHD_ERR_ALREADY) { if (err == 0) { pdb_rxs_pop(pdb, "FIND %p and %llx redirect ($%lld)", (void *)it, (unsigned long long)id_in, (long long)(budget_in - *budget_inout)); return pdb_iterator_find_loc(pdb, it, id_in, id_out, budget_inout, file, line); } if (err == PDB_ERR_MORE) pdb_rxs_pop(pdb, "FIND %p and %llx suspend; " "state=%d ($%lld)", (void *)it, (unsigned long long)id_in, it->it_call_state, (long long)(budget_in - *budget_inout)); else pdb_rxs_pop(pdb, "FIND %p and %llx error %s ($%lld)", (void *)it, (unsigned long long)id_in, graphd_strerror(err), (long long)(budget_in - *budget_inout)); goto err; } cl_assert(cl, pdb_iterator_statistics_done(pdb, it)); if (it->it_call_state == 0) { ps->ps_id = id_in; ps->ps_eof = false; /* Do we know for sure that we're out of range? */ if (pdb_iterator_forward(pdb, it) ? ps->ps_id >= it->it_high : ps->ps_id < it->it_low) { err = GRAPHD_ERR_NO; goto done; } /* Is the next value cached? */ gia->gia_cache_offset_valid = false; if ((err = graphd_iterator_cache_search( pdb, it, ogia(it)->gia_cache, &ps->ps_id, &gia->gia_cache_offset)) != PDB_ERR_MORE) { if (err == 0) { /* Go past the value we're returning. */ gia->gia_cache_offset_valid = true; gia->gia_cache_offset++; *budget_inout -= graphd_iterator_cache_cost(ogia(it)->gia_cache); } goto done; } /* Mark that our cache position is invalid. */ gia->gia_cache_offset_valid = false; ps->ps_run_call_state = GRAPHD_ITERATOR_AND_RUN_FIND_START; it->it_call_state = 1; /* Make sure that we have an iterator state * to actually walk around with. */ err = graphd_iterator_and_process_state_initialize(pdb, it, ps); if (err != 0) goto done; cl_assert(cl, ps->ps_it != NULL); } cl_assert(cl, ps->ps_it != NULL); err = graphd_iterator_and_run(it, ogia(it)->gia_producer, ps, budget_inout); done: if (err == PDB_ERR_MORE) { pdb_rxs_pop(pdb, "FIND %p and %llx suspend; state=%d+%d ($%lld)", (void *)it, (unsigned long long)id_in, it->it_call_state, ps->ps_run_call_state, (long long)(budget_in - *budget_inout)); goto err; } it->it_call_state = 0; if (err == 0) { gia->gia_id = *id_out = ps->ps_id; pdb_rxs_pop(pdb, "FIND %p and %llx -> %llx ($%lld)", (void *)it, (unsigned long long)id_in, (unsigned long long)*id_out, (long long)(budget_in - *budget_inout)); } else { ps->ps_id = PDB_ID_NONE; if (err == GRAPHD_ERR_NO) { ps->ps_eof = true; pdb_rxs_pop(pdb, "FIND %p and %llx EOF ($%lld)", (void *)it, (unsigned long long)id_in, (long long)(budget_in - *budget_inout)); } else pdb_rxs_pop(pdb, "FIND %p and %llx: error %s ($%lld)", (void *)it, (unsigned long long)id_in, graphd_strerror(err), (long long)(budget_in - *budget_inout)); } err: pdb_iterator_account_charge_budget(pdb, it, find); return err; }
17,848
3,263
// the .m for this file is generated in a 'pre-action' in the Artsy build scheme // which executes the file <projectDir>/scripts/set_packager_host.sh #import <Foundation/Foundation.h> @interface ARReactPackagerHost : NSObject + (NSString*)hostname; @end
83
2,151
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CONTENT_BROWSER_SERVICE_WORKER_SERVICE_WORKER_REGISTRATION_OBJECT_HOST_H_ #define CONTENT_BROWSER_SERVICE_WORKER_SERVICE_WORKER_REGISTRATION_OBJECT_HOST_H_ #include <memory> #include "base/macros.h" #include "base/memory/ref_counted.h" #include "base/memory/weak_ptr.h" #include "content/browser/service_worker/service_worker_registration.h" #include "content/common/content_export.h" #include "content/common/service_worker/service_worker_types.h" #include "mojo/public/cpp/bindings/associated_binding_set.h" #include "third_party/blink/public/mojom/service_worker/service_worker_registration.mojom.h" namespace content { class ServiceWorkerContextCore; class ServiceWorkerVersion; // ServiceWorkerRegistrationObjectHost has a 1:1 correspondence to // WebServiceWorkerRegistration in the renderer process. // The host stays alive while the WebServiceWorkerRegistration is alive, and // also initiates destruction of the WebServiceWorkerRegistration once detected // that it's no longer needed. See the class documentation in // WebServiceWorkerRegistrationImpl for details. // // Has a reference to the corresponding ServiceWorkerRegistration in order to // ensure that the registration is alive while this object host is around. class CONTENT_EXPORT ServiceWorkerRegistrationObjectHost : public blink::mojom::ServiceWorkerRegistrationObjectHost, public ServiceWorkerRegistration::Listener { public: ServiceWorkerRegistrationObjectHost( base::WeakPtr<ServiceWorkerContextCore> context, ServiceWorkerProviderHost* provider_host, scoped_refptr<ServiceWorkerRegistration> registration); ~ServiceWorkerRegistrationObjectHost() override; // Establishes a new mojo connection into |bindings_|. blink::mojom::ServiceWorkerRegistrationObjectInfoPtr CreateObjectInfo(); ServiceWorkerRegistration* registration() { return registration_.get(); } private: // ServiceWorkerRegistration::Listener overrides. void OnVersionAttributesChanged( ServiceWorkerRegistration* registration, ChangedVersionAttributesMask changed_mask, const ServiceWorkerRegistrationInfo& info) override; void OnUpdateViaCacheChanged( ServiceWorkerRegistration* registration) override; void OnRegistrationFailed(ServiceWorkerRegistration* registration) override; void OnUpdateFound(ServiceWorkerRegistration* registration) override; // Implements blink::mojom::ServiceWorkerRegistrationObjectHost. void Update(UpdateCallback callback) override; void Unregister(UnregisterCallback callback) override; void EnableNavigationPreload( bool enable, EnableNavigationPreloadCallback callback) override; void GetNavigationPreloadState( GetNavigationPreloadStateCallback callback) override; void SetNavigationPreloadHeader( const std::string& value, SetNavigationPreloadHeaderCallback callback) override; // Called back from ServiceWorkerContextCore when an update is complete. void UpdateComplete(UpdateCallback callback, ServiceWorkerStatusCode status, const std::string& status_message, int64_t registration_id); // Called back from ServiceWorkerContextCore when the unregistration is // complete. void UnregistrationComplete(UnregisterCallback callback, ServiceWorkerStatusCode status); // Called back from ServiceWorkerStorage when setting navigation preload is // complete. void DidUpdateNavigationPreloadEnabled( bool enable, EnableNavigationPreloadCallback callback, ServiceWorkerStatusCode status); // Called back from ServiceWorkerStorage when setting navigation preload // header is complete. void DidUpdateNavigationPreloadHeader( const std::string& value, SetNavigationPreloadHeaderCallback callback, ServiceWorkerStatusCode status); // Sets the corresponding version field to the given version or if the given // version is nullptr, clears the field. void SetVersionAttributes(ChangedVersionAttributesMask changed_mask, ServiceWorkerVersion* installing_version, ServiceWorkerVersion* waiting_version, ServiceWorkerVersion* active_version); void OnConnectionError(); // Perform common checks that need to run before RegistrationObjectHost // methods that come from a child process are handled. Returns true if all // checks have passed. If anything looks wrong |callback| will run with an // error message prefixed by |error_prefix| and |args|, and false is returned. template <typename CallbackType, typename... Args> bool CanServeRegistrationObjectHostMethods(CallbackType* callback, const char* error_prefix, Args... args); // |provider_host_| is valid throughout lifetime of |this| because it owns // |this|. ServiceWorkerProviderHost* provider_host_; base::WeakPtr<ServiceWorkerContextCore> context_; scoped_refptr<ServiceWorkerRegistration> registration_; mojo::AssociatedBindingSet<blink::mojom::ServiceWorkerRegistrationObjectHost> bindings_; // Mojo connection to the content::WebServiceWorkerRegistrationImpl in the // renderer, which corresponds to the ServiceWorkerRegistration JavaScript // object. blink::mojom::ServiceWorkerRegistrationObjectAssociatedPtr remote_registration_; base::WeakPtrFactory<ServiceWorkerRegistrationObjectHost> weak_ptr_factory_; DISALLOW_COPY_AND_ASSIGN(ServiceWorkerRegistrationObjectHost); }; } // namespace content #endif // CONTENT_BROWSER_SERVICE_WORKER_SERVICE_WORKER_REGISTRATION_OBJECT_HOST_H_
1,820
637
/** * Solaris specific features of Hudson. */ package hudson.os.solaris;
24
4,054
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "errorcode.h" namespace config { vespalib::string ErrorCode::getName(int error) { switch(error) { case UNKNOWN_CONFIG: return "UNKNOWN_CONFIG"; case UNKNOWN_DEFINITION: return "UNKNOWN_DEFINITION"; case UNKNOWN_VERSION: return "UNKNOWN_VERSION"; case UNKNOWN_CONFIGID: return "UNKNOWN_CONFIGID"; case UNKNOWN_DEF_MD5: return "UNKNOWN_DEF_MD5"; case UNKNOWN_VESPA_VERSION: return "UNKNOWN_VESPA_VERSION"; case ILLEGAL_NAME: return "ILLEGAL_NAME"; case ILLEGAL_VERSION: return "ILLEGAL_VERSION"; case ILLEGAL_CONFIGID: return "ILLEGAL_CONFIGID"; case ILLEGAL_DEF_MD5: return "ILLEGAL_DEF_MD5"; case ILLEGAL_CONFIG_MD5: return "ILLEGAL_CONFIG_MD5"; case ILLEGAL_TIMEOUT: return "ILLEGAL_TIMEOUT"; case ILLEGAL_TIMESTAMP: return "ILLEGAL_TIMESTAMP"; case ILLEGAL_NAME_SPACE: return "ILLEGAL_NAME_SPACE"; case ILLEGAL_PROTOCOL_VERSION: return "ILLEGAL_PROTOCOL_VERSION"; case ILLEGAL_CLIENT_HOSTNAME: return "ILLEGAL_CLIENT_HOSTNAME"; case OUTDATED_CONFIG: return "OUTDATED_CONFIG"; case INTERNAL_ERROR: return "INTERNAL_ERROR"; case APPLICATION_NOT_LOADED: return "APPLICATION_NOT_LOADED"; case INCONSISTENT_CONFIG_MD5: return "INCONSISTENT_CONFIG_MD5"; default: return "Unknown error"; } } }
738
2,151
<filename>services/identity/public/cpp/identity_manager.h // Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef SERVICES_IDENTITY_PUBLIC_CPP_IDENTITY_MANAGER_H_ #define SERVICES_IDENTITY_PUBLIC_CPP_IDENTITY_MANAGER_H_ #include "base/memory/weak_ptr.h" #include "base/observer_list.h" #include "components/signin/core/browser/account_info.h" #include "components/signin/core/browser/profile_oauth2_token_service.h" #include "components/signin/core/browser/signin_manager_base.h" #include "services/identity/public/cpp/primary_account_access_token_fetcher.h" #if !defined(OS_CHROMEOS) #include "components/signin/core/browser/signin_manager.h" #endif // Necessary to declare this class as a friend. namespace arc { class ArcTermsOfServiceDefaultNegotiatorTest; } // Necessary to declare this class as a friend. namespace browser_sync { class ProfileSyncServiceStartupCrosTest; } // Necessary to declare these classes as friends. namespace chromeos { class ChromeSessionManager; class UserSessionManager; } // Necessary to declare this class as a friend. namespace file_manager { class MultiProfileFileManagerBrowserTest; } // Necessary to declare these classes as friends. class ArcSupportHostTest; class MultiProfileDownloadNotificationTest; class ProfileSyncServiceHarness; namespace identity { // Gives access to information about the user's Google identities. See // ./README.md for detailed documentation. class IdentityManager : public SigninManagerBase::Observer, #if !defined(OS_CHROMEOS) public SigninManager::DiagnosticsClient, #endif public OAuth2TokenService::DiagnosticsObserver { public: class Observer { public: Observer() = default; virtual ~Observer() = default; Observer(const Observer&) = delete; Observer& operator=(const Observer&) = delete; // Called when an account becomes the user's primary account. // This method is not called during a reauth. virtual void OnPrimaryAccountSet(const AccountInfo& primary_account_info) {} // Called when when the user moves from having a primary account to no // longer having a primary account. virtual void OnPrimaryAccountCleared( const AccountInfo& previous_primary_account_info) {} // TODO(blundell): Eventually we might need a callback for failure to log in // to the primary account. }; // Observer interface for classes that want to monitor status of various // requests. Mostly useful in tests and debugging contexts (e.g., WebUI). class DiagnosticsObserver { public: DiagnosticsObserver() = default; virtual ~DiagnosticsObserver() = default; DiagnosticsObserver(const DiagnosticsObserver&) = delete; DiagnosticsObserver& operator=(const DiagnosticsObserver&) = delete; // Called when receiving request for access token. virtual void OnAccessTokenRequested( const std::string& account_id, const std::string& consumer_id, const OAuth2TokenService::ScopeSet& scopes) {} }; IdentityManager(SigninManagerBase* signin_manager, ProfileOAuth2TokenService* token_service); ~IdentityManager() override; // Provides access to the latest cached information of the user's primary // account. AccountInfo GetPrimaryAccountInfo(); // Returns whether the primary account is available, according to the latest // cached information. Simple convenience wrapper over checking whether the // primary account info has a valid account ID. bool HasPrimaryAccount(); // Creates a PrimaryAccountAccessTokenFetcher given the passed-in information. std::unique_ptr<PrimaryAccountAccessTokenFetcher> CreateAccessTokenFetcherForPrimaryAccount( const std::string& oauth_consumer_name, const OAuth2TokenService::ScopeSet& scopes, PrimaryAccountAccessTokenFetcher::TokenCallback callback, PrimaryAccountAccessTokenFetcher::Mode mode); // If an entry exists in the Identity Service's cache corresponding to the // given information, removes that entry; in this case, the next access token // request for |account_id| and |scopes| will fetch a new token from the // network. Otherwise, is a no-op. void RemoveAccessTokenFromCache(const AccountInfo& account_info, const OAuth2TokenService::ScopeSet& scopes, const std::string& access_token); // Methods to register or remove observers. void AddObserver(Observer* observer); void RemoveObserver(Observer* observer); void AddDiagnosticsObserver(DiagnosticsObserver* observer); void RemoveDiagnosticsObserver(DiagnosticsObserver* observer); private: // These clients need to call SetPrimaryAccountSynchronouslyForTests(). friend void MakePrimaryAccountAvailable( SigninManagerBase* signin_manager, ProfileOAuth2TokenService* token_service, IdentityManager* identity_manager, const std::string& email); friend MultiProfileDownloadNotificationTest; friend ProfileSyncServiceHarness; friend file_manager::MultiProfileFileManagerBrowserTest; // These clients needs to call SetPrimaryAccountSynchronously(). friend ArcSupportHostTest; friend arc::ArcTermsOfServiceDefaultNegotiatorTest; friend chromeos::ChromeSessionManager; friend chromeos::UserSessionManager; friend browser_sync::ProfileSyncServiceStartupCrosTest; // Sets the primary account info synchronously with both the IdentityManager // and its backing SigninManager/ProfileOAuth2TokenService instances. // Prefer using the methods in identity_test_{environment, utils}.h to using // this method directly. void SetPrimaryAccountSynchronouslyForTests(const std::string& gaia_id, const std::string& email_address, const std::string& refresh_token); // Sets the primary account info synchronously with both the IdentityManager // and its backing SigninManager instance. If |refresh_token| is not empty, // sets the refresh token with the backing ProfileOAuth2TokenService // instance. This method should not be used directly; it exists only to serve // one legacy use case at this point. // TODO(https://crbug.com/814787): Eliminate the need for this method. void SetPrimaryAccountSynchronously(const std::string& gaia_id, const std::string& email_address, const std::string& refresh_token); // SigninManagerBase::Observer: void GoogleSigninSucceeded(const AccountInfo& account_info) override; void GoogleSignedOut(const AccountInfo& account_info) override; #if !defined(OS_CHROMEOS) // SigninManager::DiagnosticsClient: // Override these to update |primary_account_info_| before any observers of // SigninManager are notified of the signin state change, ensuring that any // such observer flows that eventually interact with IdentityManager observe // its state as being consistent with that of SigninManager. void WillFireGoogleSigninSucceeded(const AccountInfo& account_info) override; void WillFireGoogleSignedOut(const AccountInfo& account_info) override; #endif // OAuth2TokenService::DiagnosticsObserver: void OnAccessTokenRequested( const std::string& account_id, const std::string& consumer_id, const OAuth2TokenService::ScopeSet& scopes) override; // Removes synchronously token from token_service void HandleRemoveAccessTokenFromCache( const std::string& account_id, const OAuth2TokenService::ScopeSet& scopes, const std::string& access_token); // Notifies diagnostics observers. Invoked asynchronously from // OnAccessTokenRequested() to mimic the effect of receiving this call // asynchronously from the Identity Service. void HandleOnAccessTokenRequested(const std::string& account_id, const std::string& consumer_id, const OAuth2TokenService::ScopeSet& scopes); // Backing signin classes. NOTE: We strive to limit synchronous access to // these classes in the IdentityManager implementation, as all such // synchronous access will become impossible when IdentityManager is backed by // the Identity Service. SigninManagerBase* signin_manager_; ProfileOAuth2TokenService* token_service_; // The latest (cached) value of the primary account. AccountInfo primary_account_info_; // Lists of observers. // Makes sure lists are empty on destruction. base::ObserverList<Observer, true> observer_list_; base::ObserverList<DiagnosticsObserver, true> diagnostics_observer_list_; base::WeakPtrFactory<IdentityManager> weak_ptr_factory_; DISALLOW_COPY_AND_ASSIGN(IdentityManager); }; } // namespace identity #endif // SERVICES_IDENTITY_PUBLIC_CPP_IDENTITY_MANAGER_H_
2,823
3,262
<filename>spark-on-angel/graph/src/main/java/com/tencent/angel/graph/common/data/Feature.java package com.tencent.angel.graph.common.data; import com.tencent.angel.common.ByteBufSerdeUtils; import com.tencent.angel.common.StreamSerdeUtils; import com.tencent.angel.ml.math2.vector.IntFloatVector; import com.tencent.angel.ps.storage.vector.element.IElement; import io.netty.buffer.ByteBuf; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; public class Feature implements IElement { private IntFloatVector features; public Feature(IntFloatVector features) { this.features = features; } public Feature() { this(null); } @Override public Object deepClone() { // Just return original features return features; } @Override public void serialize(ByteBuf output) { ByteBufSerdeUtils.serializeVector(output, features); } @Override public void deserialize(ByteBuf input) { features = (IntFloatVector) ByteBufSerdeUtils.deserializeVector(input); } @Override public int bufferLen() { return ByteBufSerdeUtils.serializedVectorLen(features); } @Override public void serialize(DataOutputStream output) throws IOException { StreamSerdeUtils.serializeVector(output, features); } @Override public void deserialize(DataInputStream input) throws IOException { features = (IntFloatVector) StreamSerdeUtils.deserializeVector(input); } @Override public int dataLen() { return StreamSerdeUtils.serializedVectorLen(features); } public IntFloatVector getFeatures() { return features; } public void setFeatures(IntFloatVector features) { this.features = features; } }
551
809
/** * @file * @brief * * @author <NAME> * @date 30.10.2014 */ #include <assert.h> #include <stdint.h> #include <system_stm32h7xx.h> #include <stm32h7xx_hal.h> #include <stm32h7xx_hal_cortex.h> #include <framework/mod/options.h> #include <stm32h745i_discovery.h> #include <stm32h745i_discovery_sdram.h> #include <embox/unit.h> EMBOX_UNIT_INIT(sdram_init); #define FMC_SWAP OPTION_GET(BOOLEAN, fmc_swap) static int sdram_init(void) { BSP_SDRAM_Init(0); if (FMC_SWAP) { //HAL_SetFMCMemorySwappingConfig(FMC_BCR1_BMAP_0); HAL_SetFMCMemorySwappingConfig(FMC_SWAPBMAP_SDRAMB2); } return 0; }
305
1,831
<filename>logdevice/test/utils/IntegrationTestUtils.h<gh_stars>1000+ /** * Copyright (c) 2017-present, Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #pragma once #include <chrono> #include <initializer_list> #include <memory> #include <string> #include <vector> #include <boost/filesystem.hpp> #include <folly/Optional.h> #include <folly/Subprocess.h> #include <folly/experimental/TestUtil.h> #include "folly/io/async/EventBase.h" #include "logdevice/common/EpochMetaData.h" #include "logdevice/common/ShardAuthoritativeStatusMap.h" #include "logdevice/common/ShardID.h" #include "logdevice/common/configuration/Configuration.h" #include "logdevice/common/configuration/InternalLogs.h" #include "logdevice/common/configuration/UpdateableConfig.h" #include "logdevice/common/configuration/logs/LogsConfigTree.h" #include "logdevice/common/debug.h" #include "logdevice/common/event_log/EventLogRecord.h" #include "logdevice/common/replicated_state_machine/RsmVersionTypes.h" #include "logdevice/common/test/NodesConfigurationTestUtil.h" #include "logdevice/common/test/TestUtil.h" #include "logdevice/include/ClientSettings.h" #include "logdevice/include/LogsConfigTypes.h" #include "logdevice/include/types.h" #include "logdevice/test/utils/AdminServer.h" #include "logdevice/test/utils/MetaDataProvisioner.h" #include "logdevice/test/utils/NodesConfigurationFileUpdater.h" #include "logdevice/test/utils/ParamMaps.h" #include "logdevice/test/utils/port_selection.h" namespace facebook { namespace logdevice { /** * @file Utilities for running LogDevice clusters in integration tests. * * In the common case, this suffices to spin up a LogDevice cluster in an * integration test: * * // Start a LogDevice cluster with 5 nodes on localhost. * auto cluster = logdevice::IntegrationTestUtils::ClusterFactory().create(5); * * // Create a Client that can be used to write and read data. * std::shared_ptr<logdevice::Client> client = cluster->createClient(); * * // Cluster shuts down when it goes out of scope. */ /* * Several environment variables alter how the utilities behave and can aid * debugging. * * LOGDEVICE_TEST_PAUSE_FOR_GDB=1 makes ClusterFactory pause the main process * right after starting the cluster, allowing the user to attach to server * processes with GDB. For convenience, command lines to attach GDB are * printed to stderr. * * LOGDEVICE_TEST_LEAVE_DATA=1 makes Cluster not delete data from the * filesystem when shutting down. For convenience, the path is logged at info * level. * * LOGDEVICE_TEST_BINARY controls which binary to run as the server. If not * set, _bin/logdevice/server/logdeviced is used. * * LOGDEVICE_TEST_ADMIN_SERVER_BINARY controls which binary to run as the admin * server. If not set, _bin/logdevice/ops/admin_server/ld-admin-server is used. * LOGDEVICE_TEST_USE_TCP use TCP ports instead of UNIX domain sockets * * LOGDEVICE_LOG_LEVEL set the default log level used by tests * * LOGDEVICE_TEST_FORCE_SSL forces all sockets to be SSL-enabled * * LOGDEVICE_TEST_NO_TIMEOUT do not enforce timeout in tests * * LOGDEVICE_TEST_MESSAGE_ERROR_CHANCE together defines chance and status * LOGDEVICE_TEST_MESSAGE_STATUS parameters for message error injection * */ class Client; class EpochStore; class FileConfigSource; class ShardedLocalLogStore; namespace configuration { namespace nodes { class NodesConfigurationStore; }} // namespace configuration::nodes namespace test { struct ServerInfo; } namespace thrift { class AdminAPIAsyncClient; } namespace IntegrationTestUtils { class Cluster; class Node; // used to specify the type of rockdb local logstore for storage // nodes in the cluster enum class RocksDBType : uint8_t { SINGLE, PARTITIONED }; enum class NodesConfigurationSourceOfTruth { NCM, SERVER_CONFIG }; /** * Configures a cluster and creates it. */ class ClusterFactory { public: ClusterFactory(); /** * Creates a Cluster object, configured with logs 1 and 2. * * Unless setNodes() is called (in which case nnodes is ignored), * Cluster will contain the specified number of nodes. If nnodes = 1, the one * node will act as both a sequencer and storage node. Otherwise, there will * be one sequencer node and the rest will be storage nodes. * * By default, an initial epoch metadata will be provisioned for all logs * in the configuration, both in epoch store and in metadata storage nodes * as metadata log records. The nodeset in the metadata includes ALL nodes in * the cluster and replication factor is the value specified in LogConfig. * Regarding the epoch, for metadata log records in storage nodes, _epoch_ and * _effective_since_ are both EPOCH_MIN, while for metadata in the epoch * store, _epoch_ is set to be EPOCH_MIN+1 with _effective_since_ remains * EPOCH_MIN. * * A side effect of the provison is that sequencer nodes of the cluster * will be started on epoch 2 (EPOCH_MIN+1) and recovery will be performed * on sequencer nodes when the cluster starts. */ std::unique_ptr<Cluster> create(int nnodes); /** * Creates a Cluster, specifying the full config to be used by the * cluster. This allows for fine control of configuration. Only the * addresses of nodes will be overwritten by the factory. */ std::unique_ptr<Cluster> create(const Configuration& config); /** * Call the passed in function on this object. Typically used to * collect settings common to multiple test cases in a single * function, and apply them by adding a single function call in * each test case function. Example: * * static void commonOptions(IntegrationTestUtils::ClusterFactory& cluster) { * cluster * .setRocksDBType(IntegrationTestUtils::RocksDBType::PARTITIONED); * } * * ... * * auto cluster = IntegrationTestUtils::ClusterFactory() * .apply(commonOptions) * .setLogAttributes(log_attrs) * .setEventLogAttributes(log_attrs) * .setNumLogs(1) * .create(nnodes); */ template <typename F> ClusterFactory& apply(F fn) { fn(*this); return *this; } /** * Use tcp ports instead of unix domain sockets. This can be used for tests * that verify behaviors specific to TCP. */ ClusterFactory& useTcp() { use_tcp_ = true; return *this; } /** * Sets the default log attributes to use for logs when using the simple * factory create(nnodes). If this is never called, a default log config will * be used with reasonable replication parameters depending on nnodes. */ ClusterFactory& setLogAttributes(logsconfig::LogAttributes log_attributes) { log_attributes_ = log_attributes; return *this; } /** * Set the attributes for the internal config log. */ ClusterFactory& setConfigLogAttributes(logsconfig::LogAttributes attrs); /** * Set the attributes for the internal event log. */ ClusterFactory& setEventLogAttributes(logsconfig::LogAttributes attrs); /** * Set the attributes for the internal event log delta. * NOTE: unlike setEventLogAttributes() above, does not set attributes * for the "event_log_snapshots" log. */ ClusterFactory& setEventLogDeltaAttributes(logsconfig::LogAttributes attrs); /** * Set the attributes for the internal maintenance log */ ClusterFactory& setMaintenanceLogAttributes(logsconfig::LogAttributes attrs); /** * Enables LogsConfigManager for clusters. Strongly recommend also calling * useHashBasedSequencerAssignment(), especially if creating log groups or * directories after startup, since that will: * a) enable lazy sequencer activation (since static activation won't work) * b) enable gossip, for failure detector, required for lazy activation * c) wait for all nodes to be marked as ALIVE via gossip */ ClusterFactory& enableLogsConfigManager(); /** * Sets the metadata log config to use for logs when using the simple factory * create(nnodes). If this is never called, a default metadata log config * will be used with metadata log stored on all nodes and replication factor * set to be min(3, num_storage_nodes) */ ClusterFactory& setMetaDataLogsConfig(Configuration::MetaDataLogsConfig meta_config) { meta_config_ = meta_config; return *this; } /** * Sets the number of logs in the config. Logs will be numbered 1 through * `n'. Ignored when LogsConfigManager is enabled, use * setNumLogsConfigManagerLogs instead. */ ClusterFactory& setNumLogs(int n) { num_logs_ = n; return *this; } /** * Sets that number of logs that needs to be created if LogsConfigManager is * enabled. It's created by client API calls after after bootstrapping the * cluster. It's ignored when `defer_start_` is true. */ ClusterFactory& setNumLogsConfigManagerLogs(int n) { num_logs_config_manager_logs_ = n; return *this; } /** * If called, create() will use specified node configs. */ ClusterFactory& setNodes(std::shared_ptr<const NodesConfiguration> nodes) { nodes_config_ = std::move(nodes); return *this; } /** * Set number of racks to spread the storage amongst. Ignored if the * node config is overridden with `setNodes()`. By default the number of racks * is 1. Nodes will be assigned to a rack in round robin fashion, ie if there * are 2 racks, nodes with nid % 2 == 0 will be in rack 1 and the others in * rack 2. */ ClusterFactory& setNumRacks(int num_racks) { num_racks_ = num_racks; return *this; } /** * Set the number of shards to use on storage nodes. * Ignored if you configure nodes using setNodes(). */ ClusterFactory& setNumDBShards(int num_db_shards) { num_db_shards_ = num_db_shards; return *this; } /** * Sets the rocksdb type for storage nodes in the cluster */ ClusterFactory& setRocksDBType(RocksDBType db_type) { rocksdb_type_ = db_type; setParam("--rocksdb-partitioned", db_type == RocksDBType::PARTITIONED ? "true" : "false"); return *this; } /** * Sets whether the standalone admin server will be running or not. */ ClusterFactory& useStandaloneAdminServer(bool enable) { use_standalone_admin_server_ = enable; return *this; } /** * If called, epoch metadata will be provisioned in epoch store and metadata * storage nodes on cluster startup. */ ClusterFactory& doPreProvisionEpochMetaData() { provision_epoch_metadata_ = true; return *this; } ClusterFactory& setNodesConfigurationSourceOfTruth(NodesConfigurationSourceOfTruth sot) { nodes_configuration_sot_ = sot; return *this; } /** * By default, epoch store metadata is provisioned and metadata logs are * written by sequencers. If this method is called, sequencers will be * precluded from writing metadata. Note that this will have no effect if * ClusterFactory::setMetaDataLogsConfig() is called with a MetaDataLogsConfig * instance as an argument. */ ClusterFactory& doNotLetSequencersProvisionEpochMetaData() { let_sequencers_provision_metadata_ = false; return *this; } /** * If metadata is to be provisioned by the test cluster, and it already * exists, the default behaviour is to fail provisioning with E::EXISTS. * Call this method to silently use existing metadata instead. */ ClusterFactory& allowExistingMetaData() { allow_existing_metadata_ = true; return *this; } /** * Skips assigning SSL addresses to nodes. */ ClusterFactory& noSSLAddress() { no_ssl_address_ = true; return *this; } enum class EventLogMode { NONE, DELTA_LOG_ONLY, SNAPSHOTTED }; /** * @param mode one of: * - EventLogMode::NONE: an event log is not provisioned * in the cluster's config. TODO(#8466255): currently the event * log is not mandatory in the config file. When we make it * mandatory, all tests that use this option must be modified; * - EventLogMode::DELTA_LOG_ONLY: no snapshot log is provisioned; * - EventLogMode::SNAPSHOTTED: both the delta and snapshot logs * are provisioned. If the --event-log-snapshotting setting is * true, the content of the delta log will be periodically * snapshotted onto this log. */ ClusterFactory& eventLogMode(EventLogMode mode) { event_log_mode_ = mode; return *this; } /** * If called, epoch metadata will be provisioned using the specific nodeset * selector. Otherwise, SELECT_ALL will be used to selecto all nodes in the * cluster as the nodeset */ ClusterFactory& setProvisionNodeSetSelector(std::shared_ptr<NodeSetSelector> selector) { provision_nodeset_selector_ = std::move(selector); return *this; } /** * Sets replication factor to use for internal and metadata logs if * set*LogsConfig() wasn't called. * If not called, the default is 3 for metadata log, 2 for internal logs. */ ClusterFactory& setInternalLogsReplicationFactor(int r) { internal_logs_replication_factor_ = r; return *this; } /** * If called, create() will not immediately start all nodes after creating * a Cluster object. Instead, Cluster::start() should be called to run all * processes. */ ClusterFactory& deferStart() { defer_start_ = true; return *this; } /** * Sets a command line parameter for logdeviced processes. The scope parameter * can be used to specify that the parameter is only for sequencer nodes or * storage nodes. */ ClusterFactory& setParam(std::string key, std::string value, ParamScope scope = ParamScope::ALL) { return setParam(ParamSpec{key, value, scope}); } /** * Same as setParam(key, value, scope) but for parameters without values */ ClusterFactory& setParam(std::string key, ParamScope scope = ParamScope::ALL) { return setParam(ParamSpec{key, "true", scope}); } /** * Same as setParam(key, value, scope) or setParam(key, scope) as appropriate. */ ClusterFactory& setParam(ParamSpec spec) { // If the scope is ParamScope::ALL, we can safely add this to the server // config instead of command line args. // TODO: Codemod all the usages of ParamScope::ALL to use setServerSettings // directly. if (spec.scope_ == ParamScope::ALL) { // Trim the "--" prefix from the command line arg name. auto& key = spec.key_; ld_check(key.substr(0, 2) == "--"); setServerSetting(spec.key_.substr(2), spec.value_); } else { cmd_param_[spec.scope_][spec.key_] = spec.value_; } return *this; } /** * Sets a config setting for logdeviced processes. */ ClusterFactory& setServerSetting(std::string key, std::string value) { server_settings_[std::move(key)] = std::move(value); return *this; } /** * Sets a config setting for clients */ ClusterFactory& setClientSetting(std::string key, std::string value) { client_settings_[std::move(key)] = std::move(value); return *this; } /** * Sets the root directory for all the cluster's data. If never called, a * temporary directory is created. */ ClusterFactory& setRootPath(std::string path) { root_path_.assign(std::move(path)); return *this; } /** * Use a gossip-based failure detector and spread logs across all sequencer * nodes (based on a hash function). */ ClusterFactory& useHashBasedSequencerAssignment(uint32_t gossip_interval_ms = 100, std::string suspect_duration = "0ms", bool use_health_based_hashing = false) { setParam("--gossip-enabled", ParamScope::ALL); setParam("--gossip-interval", std::to_string(gossip_interval_ms) + "ms", ParamScope::ALL); setParam("--suspect-duration", suspect_duration, ParamScope::ALL); // lazy sequencer bringup setParam("--sequencers", "lazy", ParamScope::SEQUENCER); hash_based_sequencer_assignment_ = true; if (!use_health_based_hashing) { setParam("--enable-health-based-sequencer-placement", "false", ParamScope::ALL); } return *this; } // Modify default HM parameters to avoid false positive detection of stalls or // unhealthy states. In many tests gossip interval is modified to be more // frequent causing delays in HM activation that is then detected and an // unhealthy status of tha node is propagated. Modifying the maximum tolerated // stalled percentages is due to the number of workers on each node being // reduced to a very small number (5). // Health based sequencer hashing is disabled by calling // useHashBasedSequencerAssignment, but in HM related tests its behaviour is // often needed, so this method handles toggling this setting too. ClusterFactory& setHealthMonitorParameters( uint32_t health_monitor_max_delay_ms = 240000, uint32_t watchdog_poll_interval = 50000, double worker_stall_percentage = 1.1, double queue_stall_percentage = 1.1, bool enable_health_based_sequencer_placement = true) { setParam("--health-monitor-max-delay", std::to_string(health_monitor_max_delay_ms) + "ms", ParamScope::ALL); setParam("--watchdog-poll-interval", std::to_string(watchdog_poll_interval) + "ms"); setParam("--health-monitor-max-stalled-worker-percentage", std::to_string(worker_stall_percentage), ParamScope::ALL); setParam("--health-monitor-max-overloaded-worker-percentage", std::to_string(queue_stall_percentage), ParamScope::ALL); setParam("--enable-health-based-sequencer-placement", std::to_string(enable_health_based_sequencer_placement), ParamScope::ALL); return *this; } ClusterFactory& enableSelfInitiatedRebuilding(std::string grace_period = "") { if (!grace_period.empty()) { setParam("--self-initiated-rebuilding-grace-period", grace_period); } return setParam("--enable-self-initiated-rebuilding", "true") .setParam("--disable-rebuilding", "false"); } /** */ ClusterFactory& enableMessageErrorInjection(); /** */ ClusterFactory& enableMessageErrorInjection(double chance, Status st) { setParam("--msg-error-injection-chance", std::to_string(chance), ParamScope::ALL); setParam("--msg-error-injection-status", error_name(st), ParamScope::ALL); if (chance != 0) { ld_info("Enabling message error injection with chance %.2f%% " "and status %s", chance, error_name(st)); } return *this; } /** * Sets the path to the server binary (relative to the build root) to use if * a custom one is needed. */ ClusterFactory& setServerBinary(std::string path) { server_binary_ = path; return *this; } /** * Sets the path to the admin server binary (relative to the build root) to * use if a custom one is needed. */ ClusterFactory& setAdminServerBinary(std::string path) { admin_server_binary_ = path; return *this; } /** * By default, the cluster will use a traffic shaping configuration which is * designed for coverage of the traffic shaping logic in tests, but limits * throughput. This method allows traffic shaping to be turned off in cases * where performance matters. */ ClusterFactory& useDefaultTrafficShapingConfig(bool use) { use_default_traffic_shaping_config_ = use; return *this; } /** * This will be passed to logdeviced as --loglevel option. More precisely, * --loglevel will be set to the first item on this list that's defined: * 1. "--loglevel" value set with setParam(), * 2. value passed to setLogLevel(), * 3. LOGDEVICE_LOG_LEVEL environment variable, * 4. "info". */ ClusterFactory& setLogLevel(dbg::Level log_level) { default_log_level_ = log_level; return *this; } /** * Value of the cluster_name property in config. * Affects how stats are exported. */ ClusterFactory& setClusterName(std::string name) { cluster_name_ = name; return *this; } ClusterFactory& setLogGroupName(const std::string& name) { log_group_name_ = name; return *this; } /** * Generates a default log attribute (replication, extras) based on the * cluster size. This is used internally if setLogAttributes() is not called. * Exposed so that the logic can be reused. */ static logsconfig::LogAttributes createDefaultLogAttributes(int nstorage_nodes); private: folly::Optional<logsconfig::LogAttributes> log_attributes_; std::shared_ptr<const NodesConfiguration> nodes_config_; folly::Optional<Configuration::MetaDataLogsConfig> meta_config_; bool enable_logsconfig_manager_ = false; configuration::InternalLogs internal_logs_; ParamMaps cmd_param_; ServerConfig::SettingsConfig server_settings_; ServerConfig::SettingsConfig client_settings_; // If set to true, allocate tcp ports to be used by the tests for the nodes' // protocol and command ports instead of unix domain sockets. bool use_tcp_ = false; // How many times to try the entire process of starting up the cluster (pick // ports, start servers, wait for them to start). Only applies when // `use_tcp_' is true as we don't expect flaky startup with Unix domain // sockets. int outer_tries_ = 5; int outerTries() const { return use_tcp_ ? outer_tries_ : 1; } // Provision the inital epoch metadata in epoch store and storage nodes // that store metadata bool provision_epoch_metadata_ = false; // Controls whether the cluster should also update the NodesConfiguration // whenver the ServerConfig change. This is there only during the migration // period. bool sync_server_config_to_nodes_configuration_ = true; // Whether to let sequencers provision metadata bool let_sequencers_provision_metadata_ = true; // Allow pre-existing metadata when provisioning bool allow_existing_metadata_ = false; // Don't set SSL addresses on nodes bool no_ssl_address_ = false; // @see useDefaultTrafficShapingConfig() bool use_default_traffic_shaping_config_{true}; // Defines how we should provision the event log. EventLogMode event_log_mode_{EventLogMode::DELTA_LOG_ONLY}; // nodeset selector used for provisioning epoch metadata std::shared_ptr<NodeSetSelector> provision_nodeset_selector_; // Don't start all nodes when Cluster is created bool defer_start_ = false; // How many logs in the config int num_logs_ = 2; int num_logs_config_manager_logs_ = 0; // Number of shards for each storage node int num_db_shards_ = 2; // Number of racks to spread the nodes amongst. int num_racks_ = 1; // See setInternalLogsReplicationFactor(). int internal_logs_replication_factor_ = -1; // If set to true, logs are assumed to be spread across all sequencer nodes. // Otherwise, all appends are sent to the first node in the cluster. bool hash_based_sequencer_assignment_{false}; // if unset, use a random choice between the two sources folly::Optional<NodesConfigurationSourceOfTruth> nodes_configuration_sot_; // Whether to start the standalone admin server or not. bool use_standalone_admin_server_ = false; // Type of rocksdb local log store RocksDBType rocksdb_type_ = RocksDBType::PARTITIONED; // Root path for all data if setRootPath() was called folly::Optional<std::string> root_path_; // Server binary if setServerBinary() was called folly::Optional<std::string> server_binary_; // Server binary if setAdminServerBinary() was called folly::Optional<std::string> admin_server_binary_; std::string cluster_name_ = "integration_test"; std::string log_group_name_ = "/ns/test_logs"; // See setLogLevel(). dbg::Level default_log_level_ = getLogLevelFromEnv().value_or(dbg::Level::INFO); // Helper method, one attempt in create(), repeated up to outer_tries_ times std::unique_ptr<Cluster> createOneTry(const Configuration& config); static logsconfig::LogAttributes createLogAttributesStub(int nstorage_nodes); /** * Figures out the full path to the server binary, considering in order of * precedence: * * - the environment variable LOGDEVICE_TEST_BINARY, * - setServerBinary() override * - a default path */ std::string actualServerBinary() const; /** * Figures out the full path to the server binary, considering in order of * precedence: * * - the environment variable LOGDEVICE_ADMIN_SERVER_BINARY, * - setAdminServerBinary() override * - a default path */ std::string actualAdminServerBinary() const; // Set the attributes of an internal log. void setInternalLogAttributes(const std::string& name, logsconfig::LogAttributes attrs); /** * Uses either the provided log_config_ or creates a new default one to * create a new logs config manager based log group. It requires that the * cluster is up and running. */ std::unique_ptr<client::LogGroup> createLogsConfigManagerLogs(std::unique_ptr<Cluster>& cluster); void populateDefaultServerSettings(); std::shared_ptr<const NodesConfiguration> provisionNodesConfiguration(int nnodes) const; }; // All ports logdeviced can listen on. struct ServerAddresses { static constexpr size_t COUNT = 11; Sockaddr protocol; Sockaddr gossip; Sockaddr admin; Sockaddr server_to_server; Sockaddr protocol_ssl; Sockaddr server_thrift_api; Sockaddr client_thrift_api; Sockaddr data_low_priority; Sockaddr data_medium_priority; // If we're holding open sockets on the above ports, this list contains the // fd-s of these sockets. This list is cleared (and sockets closed) just // before starting the server process. std::vector<detail::PortOwner> owners; void toNodeConfig(configuration::nodes::NodeServiceDiscovery& node, bool ssl) { using Priority = configuration::nodes::NodeServiceDiscovery::ClientNetworkPriority; node.default_client_data_address = protocol; node.gossip_address = gossip; if (ssl) { node.ssl_address.assign(protocol_ssl); } node.admin_address.assign(admin); node.server_to_server_address.assign(server_to_server); node.server_thrift_api_address.assign(server_thrift_api); node.client_thrift_api_address.assign(client_thrift_api); node.addresses_per_priority = {{Priority::LOW, data_low_priority}, {Priority::MEDIUM, data_medium_priority}}; } static ServerAddresses withTCPPorts(std::vector<detail::PortOwner> ports) { std::string addr = get_localhost_address_str(); ServerAddresses r; r.protocol = Sockaddr(addr, ports[0].port); r.gossip = Sockaddr(addr, ports[2].port); r.admin = Sockaddr(addr, ports[3].port); r.protocol_ssl = Sockaddr(addr, ports[4].port); r.server_to_server = Sockaddr(addr, ports[6].port); r.server_thrift_api = Sockaddr(addr, ports[7].port); r.client_thrift_api = Sockaddr(addr, ports[8].port); r.data_low_priority = Sockaddr(addr, ports[9].port); r.data_medium_priority = Sockaddr(addr, ports[10].port); r.owners = std::move(ports); return r; } static ServerAddresses withUnixSockets(const std::string& path) { ServerAddresses r; r.protocol = Sockaddr(path + "/socket_main"); r.gossip = Sockaddr(path + "/socket_gossip"); r.admin = Sockaddr(path + "/socket_admin"); r.server_to_server = Sockaddr(path + "/socket_server_to_server"); r.protocol_ssl = Sockaddr(path + "/ssl_socket_main"); r.server_thrift_api = Sockaddr(path + "/server_thrift_api"); r.client_thrift_api = Sockaddr(path + "/client_thrift_api"); r.data_low_priority = Sockaddr(path + "/socket_data_low_pri"); r.data_medium_priority = Sockaddr(path + "/socket_data_medium_pri"); return r; } }; /** * RAII-style container for a LogDevice cluster running on localhost. */ class Cluster { public: using Nodes = std::map<node_index_t, std::unique_ptr<Node>>; ~Cluster(); /** * Used in conjunction with ClusterFactory::deferStart() to run a process for * each node in the cluster. Waits for all nodes to start. * * @param indices if non-empty, only a specified subset of nodes will be * started * * @return 0 on success, -1 if any of the nodes fails to start */ int start(std::vector<node_index_t> indices = {}); /** * Kill all running nodes in the cluster. */ void stop(); /** * DEPRECATED, Will be removed and replaced by the implementation of * expandViaAdminServer in the future. * * Expand the cluster by adding nodes with the given indices. * @return 0 on success, -1 on error. */ int expand(std::vector<node_index_t> new_indices, bool start = true); /** * Expand the cluster by adding `nnodes` with consecutive indices after the * highest existing one. * * The newly added nodes will be distributed among the configured number * of racks in this cluster. * @return 0 on success, -1 on error. */ int expand(int nnodes, bool start = true); /** * Expand the cluster by adding nodes with the given indices. * @return 0 on success, -1 on error. */ int expandViaAdminServer(thrift::AdminAPIAsyncClient& admin_client, std::vector<node_index_t> new_indices, bool start = true, int num_racks = 1); /** * Expand the cluster by adding `nnodes` with consecutive indices after the * highest existing one. * @return 0 on success, -1 on error. */ int expandViaAdminServer(thrift::AdminAPIAsyncClient& admin_client, int nnodes, bool start_nodes = true, int num_racks = 1); /** * Shrink the cluster by removing the given nodes. * @return 0 on success, -1 on error. * * Note that this doesn't do rebuilding. If shrink out nodes that have * some records (including metadata/internal logs), you'll likely see data * loss or underreplication. */ int shrink(std::vector<node_index_t> indices); /** * Shrink the cluster by removing `nnodes` last nodes. * @return 0 on success, -1 on error. */ int shrink(int nnodes); /** * Shrink the cluster by removing nodes with the given indices. * @return 0 on success, -1 on error. */ int shrinkViaAdminServer(thrift::AdminAPIAsyncClient& admin_client, std::vector<node_index_t> new_indices); /** * Shrink the cluster by removing `nnodes` last nodes. * * Note that these nodes must already be fully disabled/drained and stopped. * @return 0 on success, -1 on error. */ int shrinkViaAdminServer(thrift::AdminAPIAsyncClient& admin_client, int nnodes); std::shared_ptr<UpdateableConfig> getConfig() const { return config_; } std::string getConfigPath() const { return config_path_; } std::string getNCSPath() const { return ncs_path_; } /** * This creates a client by calling ClientFactory::create() that does not * share the loaded config_. * * If use_file_based_ncs is set to true, the client will use * FileBasedNodesConfigurationStore to fetch and update the * NodesConfiguration instead of the server based one. Use this *only* if you * want to create a client without estabilishing any connection to the nodes * to fetch the Nodes Config (e.g. when all the nodes are dead). */ std::shared_ptr<Client> createClient(std::chrono::milliseconds timeout = getDefaultTestTimeout(), std::unique_ptr<ClientSettings> settings = std::unique_ptr<ClientSettings>(), std::string credentials = "", bool use_file_based_ncs = false); const Nodes& getNodes() const { return nodes_; } /** * Returns the admin server instance if started. */ AdminServer* FOLLY_NULLABLE getAdminServer() { return admin_server_.get(); } Node& getNode(node_index_t index) { ld_assert(nodes_.count(index)); ld_check(nodes_[index] != nullptr); return *nodes_.at(index); } Node& getSequencerNode() { ld_check(!hash_based_sequencer_assignment_); // For now, the first node is always the sequencer return getNode(0); } // Returns the list of non-stopped storage nodes. std::vector<node_index_t> getRunningStorageNodes() const; // When using hash-based sequencer assignment, the above is not sufficient. // Hash-based sequencer assignment is necessary to have failover. // Returns -1 if there is no sequencer for the log or it is unavailable. int getHashAssignedSequencerNodeId(logid_t log_id, Client* client); // Call function for every node. Function signature is F(Node&). // By default processes each node in its own thread. // Set use_threads = false to do everything in the calling thread. template <typename F> void applyToNodes(F func, bool use_threads = true) { NodeSetIndices nodes; for (auto& node : nodes_) { nodes.push_back(node.first); } applyToNodes(nodes, func, use_threads); } template <typename F> void applyToNodes(const NodeSetIndices& nodeset, F func, bool use_threads = true) { if (use_threads) { std::vector<std::thread> ts; for (node_index_t nidx : nodeset) { ts.emplace_back([this, func, nidx] { func(getNode(nidx)); }); } for (auto& t : ts) { t.join(); } } else { for (node_index_t nidx : nodeset) { func(getNode(nidx)); } } } /** * Returns an EpochStore object representing the store that a sequencer node * will use. Intended to be used with ClusterFactory::deferStart() to set * initial epochs for logs before starting nodes. */ std::unique_ptr<EpochStore> createEpochStore(); /** * Updates epoch store to set the next epoch for log_id */ void setStartingEpoch(logid_t log_id, epoch_t epoch, epoch_t last_expected_epoch = EPOCH_INVALID); /** * Provision the initial epoch metadata on metadata storage nodes, * must be called when the storage nodes are not started * * @param selector nodeset selector for provisioning, if not * given, SELECT_ALL is used * @param allow_existing_metadata whether provisioning will succeed if a log * is already provisioned. If this is false, * it will fail with E::EXISTS. * @return 0 for success, -1 for failure */ int provisionEpochMetaData( std::shared_ptr<NodeSetSelector> selector = nullptr, bool allow_existing_metadata = false); /** * Like `provisionEpochMetaData`, but asks for an specific set of shard * indices to be used. Also needs to be called when storage nodes are not * started. * * @param shard_indices set of shard indices to use. * @allow_existing_metadata see `provisionEpochMetaData` */ int provisionEpochMetadataWithShardIDs(std::set<node_index_t> node_indices, bool allow_existing_metadata = true); /** * Updates the NC on disk via FileBasedNodesConfigurationStore. * @param nodes_config the NC to write * @return 0 for success, -1 for failure */ int updateNodesConfiguration( const configuration::nodes::NodesConfiguration& nodes_config); /** * Replaces the node at the specified index. Kills the current process if * still running, deletes the node's data, then starts up a new one and * updates the cluster config. * * @return 0 on success, -1 if node fails to start or there are no free ports */ int replace(node_index_t index, bool defer_start = false); /** * Replaces the node at the specified index. Kills the current process if * still running, deletes the node's data, then starts up a new one and * updates the cluster config using the Admin API. * * @return 0 on success, -1 if node fails to start or there are no free ports */ int replaceViaAdminServer(thrift::AdminAPIAsyncClient& admin_client, node_index_t index, bool defer_start = false); /** * Bumps up the generation using the Admin API. */ int bumpGeneration(thrift::AdminAPIAsyncClient& admin_client, node_index_t index); /** * Update node's attributes in config */ int updateNodeAttributes( node_index_t index, configuration::StorageState storage_state, int sequencer_weight, folly::Optional<bool> enable_sequencing = folly::none); // A guide to the few wait*() methods below: // - When using static sequencer placement (default), // waitUntilAllSequencersQuiescent() guarantees that subsequent appends // won't fail without a good reason and that sequencers won't reactivate // without a good reason. // - When using hash-based sequencer placement // (useHashBasedSequencerAssignment()), // waitUntilAllStartedAndPropagatedInGossip() guarantees that subsequent // appends won't fail to activate sequencer without a good reason. // If you want to wait for the newly activated sequencer to finish // recovery, metadata log write, unnecessary reactivation, etc, then you // can also call waitUntilAllSequencersQuiescent() after the append that // activated the sequencer. // - waitForConfigUpdate() is for after you updated the config file // (e.g. using writeConfig()). // - Most of the other wait methods are either obsolete or only useful for // particular test cases that want something very specific. // Many call sites are using them inappropriately (e.g. waitForRecovery() // instead of waitUntilAllSequencersQuiescent(), or waitUntilAllAvailable() // instead of waitUntilAllStartedAndPropagatedInGossip()); feel free to fix // those when they make tests flaky; I didn't dare mass-replace them. /** * Wait for all nodes to complete all sequencer activation-related activity: * activation, recoveries, metadata log writes, metadata log recoveries, * reactivations caused by metadata log writes, nodeset updates caused by * config changes, etc. If you're not making any changes to the cluster * (starting/stopping nodes, updating config/settings, etc), after this call * sequencers are not going to reactivate, get stuck in recovery (even if * there's no f-majority of available nodes), or do other unexpected things. * You'll get consecutive LSNs for appends. * * Note that this only applies to sequencers that have already at least * started activation as of the time of this call. If static sequencer * placement is used (i.e. useHashBasedSequencerAssignment() wasn't called), * that's all sequencers; otherwise, that's typically only sequencers for the * logs that received at least one append. Also note that, even though appends * done after this call should all go to the same epoch and get consecutive * LSNs, this may be a higher epoch than for appends done before the call. */ int waitUntilAllSequencersQuiescent( std::chrono::steady_clock::time_point deadline = std::chrono::steady_clock::time_point::max()); /** * Wait until the given nodes see each other and themselves as alive and * started in gossip, and see everyone else as dead. * For a freshly started cluster, until this wait is done, appends may fail * with E::ISOLATED if sequencer node happens to see itself as alive but * others as dead. * * @param nodes The set of nodes that should be alive. If folly::none, all * running nodes (i.e. with Node::stopped_ == false). */ int waitUntilAllStartedAndPropagatedInGossip( folly::Optional<std::set<node_index_t>> nodes = folly::none, std::chrono::steady_clock::time_point deadline = std::chrono::steady_clock::time_point::max()); /** * Waits until all live nodes and clients have processed the * NodesConfiguration with at least the version passed. */ void waitForServersAndClientsToProcessNodesConfiguration( membership::MembershipVersion::Type version); /** * Waits until all live nodes have a view of the config same as getConfig(). * This doesn't guarantees much about server behavior because the * config update takes some time to propagate inside the server process, * e.g. to all workers; this method does *not* wait for such propagation. * * This it not reliable for most purposes. * If you rely on it your test will probably be flaky. */ void waitForServersToPartiallyProcessConfigUpdate(); /** * Wait for all sequencer nodes in the cluster to finish log recovery. * Caller needs to ensure recovery should happen on sequencer nodes. * * Warning: currently the implementation is not fully correct and, when using * hash-based sequencer placement, may incorrectly get stuck in rare cases * if sequencers preempt each other in a somewhat unusual sequence. To avoid * flakiness, prefer either waitUntilAllSequencersQuiescent() * or waiting for recovery of a specific log on a specific node. * * @return 0 if recovery is completed, -1 if the call timed out. */ int waitForRecovery(std::chrono::steady_clock::time_point deadline = std::chrono::steady_clock::time_point::max()); // Waits until all nodes are available through gossip (ALIVE) int waitUntilAllAvailable(std::chrono::steady_clock::time_point deadline = std::chrono::steady_clock::time_point::max()); // Waits until all nodes are healthy through gossip (HEALTHY) int waitUntilAllHealthy(std::chrono::steady_clock::time_point deadline = std::chrono::steady_clock::time_point::max()); /** * Wait for all sequencer nodes in the cluster to write metadata log records * for all logs. This shouldn't block if sequencers_write_metadata_logs is * `false` in the metadata logs config. * @return 0 if all metadata logs were written, -1 if the call timed out. */ int waitForMetaDataLogWrites( std::chrono::steady_clock::time_point deadline = std::chrono::steady_clock::time_point::max()); /** * Wait for all nodes in the cluster except the ones specified in the skip * list to see the specified node in a DEAD/ALIVE state (depending on what is * submitted as the `alive` arg */ int waitUntilGossip(bool alive, /* set to false for waiting for dead */ uint64_t targetNode, std::set<uint64_t> nodesToSkip = {}, std::chrono::steady_clock::time_point deadline = std::chrono::steady_clock::time_point::max()); /** * Wait for all nodes in the cluster except the ones specified in the skip * list to see the specified node in a certain health status (depending on * what is submitted as the `health_status` arg */ int waitUntilGossipStatus( uint8_t health_status, /* set to 3 for waiting for unhealthy */ uint64_t targetNode, std::set<uint64_t> nodesToSkip = {}, std::chrono::steady_clock::time_point deadline = std::chrono::steady_clock::time_point::max()); /** * Waits until nodes specified in the parameter `nodes` are alive and fully * started (i.e. not in starting state) according to gossip. If `nodes` is * folly::none, all nodes in the cluster will be checked. */ int waitUntilNoOneIsInStartupState( folly::Optional<std::set<uint64_t>> nodes = folly::none, std::chrono::steady_clock::time_point deadline = std::chrono::steady_clock::time_point::max()); int waitUntilAllClientsPickedConfig(const std::string& serialized_config); /** * Same as ClusterFactory::setParam(). Only affects future logdeviced * instances, like the ones created by replace(). */ void setParam(std::string key, ParamScope scope = ParamScope::ALL) { ld_check(!key.empty()); cmd_param_[scope][key] = ParamValue(); } void setParam(std::string key, std::string value, ParamScope scope = ParamScope::ALL) { ld_check(!key.empty()); cmd_param_[scope][key] = value; } /** * Undoes what setParam() did. */ void unsetParam(std::string key, ParamScope scope = ParamScope::ALL) { ld_check(!key.empty()); cmd_param_[scope].erase(key); } // Returns true if gossip is enabled in the ALL scope. // This assumes that the default for the --gossip-enabled flag is true. bool isGossipEnabled() const; /** * Check that all the data in the cluster is correctly replicated. * * @return 0 if all the data is correctly replicated, -1 otherwise. */ using argv_t = std::vector<std::string>; int checkConsistency(argv_t additional_args = argv_t()); /** * Convenience function that creates a MetaDataProvisioner object for * provisioning epoch metadata for logs on the cluster. * User of the provisioner object must ensure that the object will not * outlive the Cluster object. */ std::unique_ptr<MetaDataProvisioner> createMetaDataProvisioner(); /** * Read the event log of the cluster and build a ShardAuthoritativeStatusMap. * @param map Filled with the state read from the event log. * @return 0 on success, -1 on failure and err is set to: * - E::NOTFOUND if the cluster has no event log; * - Any error that can be reported by Client::getTailLSNSync() if this * function could not retrieve the tail LSN of the event log; * - Any error that can be reported by Reader::startReading() if this * function cannot start reading the event log. */ int getShardAuthoritativeStatusMap(ShardAuthoritativeStatusMap& map); /** * Do Node::waitUntilRSMSynced() on all nodes in @param nodes. * If `nodes` is empty, all nodes. */ int waitUntilRSMSynced(const char* rsm, lsn_t sync_lsn, std::vector<node_index_t> nodes = {}, std::chrono::steady_clock::time_point deadline = std::chrono::steady_clock::time_point::max()); int waitUntilEventLogSynced( lsn_t sync_lsn, const std::vector<node_index_t>& nodes = {}, std::chrono::steady_clock::time_point deadline = std::chrono::steady_clock::time_point::max()) { return waitUntilRSMSynced("event_log", sync_lsn, nodes, deadline); } int waitUntilLogsConfigSynced( lsn_t sync_lsn, const std::vector<node_index_t>& nodes = {}, std::chrono::steady_clock::time_point deadline = std::chrono::steady_clock::time_point::max()) { return waitUntilRSMSynced("logsconfig_rsm", sync_lsn, nodes, deadline); } /** * Partitions cluster by overwriting individual node's config with invalid * address for each node belonging to a different partition. Note that, upon * receiving the config update, each node is going to close exsiting * connections to nodes outside of their partition. */ void partition(std::vector<std::set<int>> partitions); /** * Requires maintenance manager to be enabled. * This will create an internal maintenance to drain a shard. The created * maintenance will be applied via writing directly to the internal * maintenance log. This might change in the future so the caller should not * rely on this implementation detail. * * Internal maintenances will trigger rebuilding in RESTORE mode. * * @return true if the operation succeeded. On * `false` the value of `err` is set accordingly. */ bool applyInternalMaintenance(Client& client, node_index_t node_id, uint32_t shard_idx, const std::string& reason); /** * A quick helper that applies a maintenance (drain by default) to a given * shard. * * Note: this skips safety checks (creates IMMINENT maintenance) * * @return the created maintenance ID */ std::string applyMaintenance(thrift::AdminAPIAsyncClient& admin_client, node_index_t node_id, uint32_t shard_idx, const std::string& user = "integration_test", bool drain = true, bool force_restore = false, const std::string& reason = "testing", bool disable_sequencer = false); /** * Gracefully shut down the given nodes. Faster than calling shutdown() on * them one by one. * @return 0 if all processes returned zero exit status, -1 otherwise. */ int shutdownNodes(const std::vector<node_index_t>& nodes); /** * Overwrites config file. If wait_for_update is true, waits for config_ to * pick up the update. * Note that if the update is going to be rejected, e.g. because the version * is smaller than current, wait_for_update would make this method wait * forever. * * Use waitForServersToPartiallyProcessConfigUpdate() to wait for nodes to * pick up the update. */ int writeConfig(const ServerConfig* server_cfg, const LogsConfig* logs_cfg, bool wait_for_update = true); int writeConfig(const Configuration& cfg, bool wait_for_update = true); // Convenience wrappers int writeServerConfig(const ServerConfig* server_cfg) { return writeConfig(server_cfg, getConfig()->getLogsConfig().get()); } int writeLogsConfig(const LogsConfig* logs_cfg) { return writeConfig(getConfig()->getServerConfig().get(), logs_cfg); } // see node_replacement_counters_ below node_gen_t getNodeReplacementCounter(node_index_t node) const { return node_replacement_counters_.count(node) > 0 ? node_replacement_counters_.at(node) : 1; } void setNodeReplacementCounter(node_index_t node, node_gen_t rc) { node_replacement_counters_[node] = rc; } void bumpNodeReplacementCounter(node_index_t node) { ++node_replacement_counters_[node]; } void setNodeReplacementCounters(std::map<node_index_t, node_gen_t> counters) { node_replacement_counters_ = std::move(counters); } NodesConfigurationSourceOfTruth getNodesConfigurationSourceOfTruth() const { return nodes_configuration_sot_; } // require @param node must exist in the cluster bool hasStorageRole(node_index_t node) const; // Send admin command `set` to all nodes. void updateSetting(const std::string& name, const std::string& value); void unsetSetting(const std::string& name); // Build a NodesConfigurationStore to modify the NodesConfiguration directly. std::unique_ptr<configuration::nodes::NodesConfigurationStore> buildNodesConfigurationStore() const; // Reads the nodes configuration from the cluster's NodesConfigurationStore. std::shared_ptr<const NodesConfiguration> readNodesConfigurationFromStore() const; // Create a self registering node with a given name. Does not start the // process. std::unique_ptr<Node> createSelfRegisteringNode(const std::string& name) const; private: // Private constructor. Factory (friend class) is only caller. Cluster(std::string root_path, std::unique_ptr<TemporaryDirectory> root_pin, std::string config_path, std::string epoch_store_path, std::string ncs_path, std::string server_binary, std::string admin_server_binary, std::string cluster_name, bool enable_logsconfig_manager, dbg::Level default_log_level, NodesConfigurationSourceOfTruth nodes_configuration_sot); // Directory where to store the data for a node (logs, db, sockets). static std::string getNodeDataPath(const std::string& root, node_index_t index, int replacement_counter) { return getNodeDataPath(root, "N" + std::to_string(index) + ':' + std::to_string(replacement_counter)); } // Directory where to store the data for a node (logs, db, sockets) given the // directory name of the node. static std::string getNodeDataPath(const std::string& root, const std::string& name) { return root + "/" + name; } std::string getNodeDataPath(const std::string& root, node_index_t index) const { return getNodeDataPath(root, index, getNodeReplacementCounter(index)); } // Forms Sockaddr-s for the node. If use_tcp is true, picks and reserves // the ports. Otherwise forms paths for unix fomain sockets. static int pickAddressesForServers( const std::vector<node_index_t>& indices, bool use_tcp, const std::string& root_path, const std::map<node_index_t, node_gen_t>& node_replacement_counters, std::vector<ServerAddresses>& out); // Creates a Node instance for the specified config entry and starts the // process. Does not wait for process to start; call // node->waitUntilStarted() for that. std::unique_ptr<Node> createNode(node_index_t index, ServerAddresses addrs) const; // Helper for createNode(). Figures out the initial command line args for the // specified node ParamMap commandArgsForNode(const Node& node) const; // Creates an admin server instance for this cluster. This does not wait for // the process to start. std::unique_ptr<AdminServer> createAdminServer(); // Helper for createClient() to populate client // settings. void populateClientSettings(std::unique_ptr<ClientSettings>& settings, bool use_file_based_ncs) const; // We keep track whether the cluster was created using tcp ports or unix // domain sockets so that we can use the same method for new nodes created by // the expand() method. bool use_tcp_{false}; // How many times to try starting a server int outer_tries_ = 2; std::string root_path_; // If root_path_ is a temporary directory, this owns it std::unique_ptr<TemporaryDirectory> root_pin_; std::string config_path_; std::string epoch_store_path_; // path for the file-based nodes configuration store std::string ncs_path_; std::string server_binary_; std::string admin_server_binary_; std::string cluster_name_; bool enable_logsconfig_manager_ = false; const NodesConfigurationSourceOfTruth nodes_configuration_sot_; std::shared_ptr<UpdateableConfig> config_; FileConfigSource* config_source_; std::unique_ptr<ClientSettings> client_settings_; std::unique_ptr<NodesConfigurationFileUpdater> nodes_configuration_updater_; // ordered map for convenience Nodes nodes_; // The admin server object if standalone admin server is enabled. std::unique_ptr<AdminServer> admin_server_; // keep track of node replacement events. for nodes with storage role, the // counter should be in sync with the `generation' in its config. For nodes // without storage role, counter is only used for tracking/directory keeping // purpose but not reflected in the config std::map<node_index_t, node_gen_t> node_replacement_counters_; // command line parameters, set by the Factory ParamMaps cmd_param_; int num_db_shards_ = 4; // type of rocksdb local log store RocksDBType rocksdb_type_ = RocksDBType::PARTITIONED; // See ClusterFactory::hash_based_sequencer_assignment_ bool hash_based_sequencer_assignment_{false}; dbg::Level default_log_level_ = dbg::Level::INFO; // Controls whether the cluster should also update the NodesConfiguration // whenver the ServerConfig change. This is there only during the migration // period. bool sync_server_config_to_nodes_configuration_{false}; bool no_ssl_address_{false}; // keep handles around until the cluster is destroyed. std::vector<UpdateableServerConfig::HookHandle> server_config_hook_handles_; // A vector of all the clients that are created for this cluster. std::vector<std::weak_ptr<Client>> created_clients_; friend class ClusterFactory; }; /** * RAII-style container for a LogDevice server that is part of a Cluster. */ class Node { public: std::unique_ptr<folly::Subprocess> logdeviced_; std::string data_path_; std::string config_path_; std::string server_binary_; std::string name_; node_index_t node_index_; ServerAddresses addrs_; int num_db_shards_ = 4; // how many shards storage nodes will use // Random ID generated by constructor. Passed on the command line to the // server. waitUntilStarted() looks for this to verify that we are talking // to the right process. std::string server_id_; // Stopped until start() is called, as well as between suspend() and resume(), // or shutdown() and start(). bool stopped_ = true; bool gossip_enabled_ = true; // type of rocksdb local log store RocksDBType rocksdb_type_ = RocksDBType::PARTITIONED; // override cluster params for this particular node ParamMap cmd_args_; bool is_storage_node_ = true; bool is_sequencer_node_ = true; Node(); ~Node() { kill(); } /** * Creates a local log store instance for this node. Can be used with * ClusterFactory::deferStart() to prepopulate the store before logdeviced * is started, or to inspect the store after the node is stopped. */ std::unique_ptr<ShardedLocalLogStore> createLocalLogStore(); // Corrupts rocksdb DBs for given shards. rocksdb::DB::Open() will fail with // "Corruption" status. // If you've already called createLocalLogStore(), you can pass the result // here as `store` parameter, as an optimization to avoid opening DB again; // this method will close it. void corruptShards(std::vector<uint32_t> shards, std::unique_ptr<ShardedLocalLogStore> sharded_store = nullptr); void updateSetting(std::string name, std::string value); void unsetSetting(std::string name); std::string getDatabasePath() const { return data_path_ + "/db"; } std::string getShardPath(shard_index_t idx) const { return getDatabasePath() + "/shard" + std::to_string(idx); } std::string getLogPath() const { return data_path_ + "/log"; } void signal(int sig) { logdeviced_->sendSignal(sig); } /** * @return true if logdeviced is running. */ bool isRunning() const; void kill(); /** * Wipe the content of a shard on this node. */ void wipeShard(uint32_t shard); /** * Pauses logdeviced by sending SIGSTOP. Waits for the process to stop * accepting connections. */ void suspend(); /** * Resume logdeviced by sending SIGCONT. Waits for the process to start * accepting connections again. */ void resume(); /** * Starts logdeviced if not started already (without waiting for it to become * ready). */ void start(); /** * Restart server process and wait for it to be available if requested */ void restart(bool graceful = true, bool wait_until_available = true); /** * Performs a graceful shutdown of logdeviced by issuing a "stop" admin * command. * * @return logdeviced exit code. */ int shutdown(); // Creates a thrift client for admin server running on this node. std::unique_ptr<thrift::AdminAPIAsyncClient> createAdminClient() const; /** * Waits until the admin API is able to answer requests that need the event * log. This also ensures that we are in the fb303 ALIVE state before * returning. * * Note: this requires that the server is started with * --disable-rebuilding=false */ int waitUntilNodeStateReady(); /** * Waits for the server to start accepting connections. * @return 0 if started, -1 if the call timed out. */ int waitUntilStarted(std::chrono::steady_clock::time_point deadline = std::chrono::steady_clock::time_point::max()); /** * Waits for the server using a gossip-based failure detector to mark itself * as available (i.e. ready to process appends). * @return 0 if available, -1 if the call timed out. */ int waitUntilAvailable(std::chrono::steady_clock::time_point deadline = std::chrono::steady_clock::time_point::max()); void waitUntilKnownDead(node_index_t other_node_index); int waitUntilHealthy(std::chrono::steady_clock::time_point deadline = std::chrono::steady_clock::time_point::max()); /** * Waits for the server using a gossip-based failure detector to mark another * node as alive (if `alive` is set to `true`) or dead. * * @return 0 if succeeded, -1 if timed out while waiting */ int waitUntilKnownGossipState( node_index_t other_node_index, bool alive, std::chrono::steady_clock::time_point deadline = std::chrono::steady_clock::time_point::max()); /** * Waits for the server using a gossip-based failure detector to mark another * node as having a certain health status. * * @return 0 if succeeded, -1 if timed out while waiting */ int waitUntilKnownGossipStatus( node_index_t other_node_index, uint8_t health_status, std::chrono::steady_clock::time_point deadline = std::chrono::steady_clock::time_point::max()); /** * Waits for the node to activate a sequencer for this log and finish * recovery. */ int waitForRecovery(logid_t log, std::chrono::steady_clock::time_point deadline = std::chrono::steady_clock::time_point::max()); /** * See Cluster::waitUntilAllSequencersQuiescent(). */ int waitUntilAllSequencersQuiescent( std::chrono::steady_clock::time_point deadline = std::chrono::steady_clock::time_point::max()); /** * Waits for the node to advance its LCE of @param log to be at least * @param epoch. */ int waitForPurge(logid_t log, epoch_t epoch, std::chrono::steady_clock::time_point deadline = std::chrono::steady_clock::time_point::max()); /** * Wait until the node have read the event log or config log * up to @param sync_lsn and propagated it to all workers. * @param rsm is either "event_log" or "logsconfig_rsm". It gets translated * into admin command "info <rsm> --json", which we poll until the value in * column "Propagated read ptr" becomes >= sync_lsn. * * Note that in case of event_log the propagation is delayed * by --event-log-grace-period, so if you're using this method you probably * want to decrease --event-log-grace-period. In case of logsconfig_rsm, * the delay is --logsconfig-manager-grace-period. */ int waitUntilRSMSynced(const char* rsm, lsn_t sync_lsn, std::chrono::steady_clock::time_point deadline = std::chrono::steady_clock::time_point::max()); /** * Shorthand for waitUntilRSMSynced("event_log"/"logsconfig_rsm", ...). */ int waitUntilEventLogSynced( lsn_t sync_lsn, std::chrono::steady_clock::time_point deadline = std::chrono::steady_clock::time_point::max()) { return waitUntilRSMSynced("event_log", sync_lsn, deadline); } int waitUntilLogsConfigSynced( lsn_t sync_lsn, std::chrono::steady_clock::time_point deadline = std::chrono::steady_clock::time_point::max()) { return waitUntilRSMSynced("logsconfig_rsm", sync_lsn, deadline); } /** * Wait until all shards of this node are fully authoritative in event log. * Returns the lsn of the last update. * Does NOT wait for this information to propagate to the node itself; * use waitUntilEventLogSynced() for that. */ lsn_t waitUntilAllShardsFullyAuthoritative(std::shared_ptr<Client> client); /** * Wait until all shards of this node are authoritative empty. * Returns the lsn of the last update. * Does NOT wait for this information to propagate to the node itself; * use waitUntilEventLogSynced() for that. */ lsn_t waitUntilAllShardsAuthoritativeEmpty(std::shared_ptr<Client> client); /** * Waits until all internal maintenances are removed for this particular * node. */ bool waitUntilInternalMaintenances( thrift::AdminAPIAsyncClient& admin_client, folly::Function<bool(const std::vector<thrift::MaintenanceDefinition>&)> predicate, const std::string& reason, std::chrono::steady_clock::time_point deadline = std::chrono::steady_clock::time_point::max()); bool waitUntilShardState( thrift::AdminAPIAsyncClient& admin_client, shard_index_t shard, folly::Function<bool(const thrift::ShardState&)> predicate, const std::string& reason, std::chrono::steady_clock::time_point deadline = std::chrono::steady_clock::time_point::max()); /** * Sends admin command `command' to command port and returns the result. */ std::string sendCommand(const std::string& command, std::chrono::milliseconds command_timeout = std::chrono::milliseconds(30000)) const; /** * Does sendCommand() and parses the output as a json table. * If we failed to send the command, or the result is empty, or the result * looks like an error, returns empty vector. If result looks like neither * json nor error message, crashes. */ std::vector<std::map<std::string, std::string>> sendJsonCommand(const std::string& command) const; /** * Returns the admin API address for this node */ folly::SocketAddress getAdminAddress() const; /** * Connects to the admin ports and returns the running server information */ folly::Optional<test::ServerInfo> getServerInfo(std::chrono::milliseconds command_timeout = std::chrono::milliseconds(30000)) const; /** * Waits for the logdeviced process to exit. * @return logdeviced return code. */ int waitUntilExited(); /** * Issues a STATS command to the node's command port and collects all stats * into a map. * * May return an empty map if the node is not up or not ready to accept * admin commands. */ std::map<std::string, int64_t> stats() const; /** * Issues a COMPACT command to the node's command port and force a compaction * on the rocksdb locallogstore shard for the given logid. Pass in * LOGID_INVALID (default) as logid will let the node perform compaction * on all rocksdb shards. */ int compact(logid_t logid = LOGID_INVALID) const; /** * Issues a LOGSTORAGESTATE command to the node's command port and collects * the result into a map. * * May return an empty map if the node is not up or not ready to accept * admin commands. */ std::map<std::string, std::string> logState(logid_t log_id) const; /** * Issues a UP DOWN command to activate a sequencer for a given log_id on a * particular node * Returns a rsponse as a string */ std::string upDown(const logid_t log_id) const; /** * Issues an INFO SEQUENCER command to the node's command port and collects * the results in a map. * Returns an empty map if there is no sequencer for the log. */ std::map<std::string, std::string> sequencerInfo(logid_t log_id) const; /** * Issues a GOSSIP BLACKLIST command, and ld_check-s that it succeeds. */ void gossipBlacklist(node_index_t node_id) const; /** * Issues a GOSSIP WHITELIST command, and ld_check-s that it succeeds. */ void gossipWhitelist(node_index_t node_id) const; /** * Issues an INJECT SHARD_FAULT command * @returns false if in non-debug mode, as the command is only supported for * DEBUG builds, true otherwise. */ bool injectShardFault(std::string shard, std::string data_type, std::string io_type, std::string code, bool single_shot = false, folly::Optional<double> chance = folly::none, folly::Optional<uint32_t> latency_ms = folly::none); /** * Issues a NEWCONNECTIONS command, and ld_check-s that it succeeds. */ void newConnections(bool accept) const; /** * Issues a STARTRECOVERY command, and ld_check-s that it succeeds. */ void startRecovery(logid_t logid) const; /** * Issues an INFO LOGSCONFIG_RSM command to the node's command port and * collects the results in a map. Returns an empty map if the node is not * reading the event log. */ std::map<std::string, std::string> logsConfigInfo() const; /** * Issues an INFO EVENT_LOG command to the node's command port and collects * the results in a map. * Returns an empty map if the node is not reading the event log. */ std::map<std::string, std::string> eventLogInfo() const; /** * Issues an INFO SOCKET command to the node's command port and collects * the results in a vector of maps. */ std::vector<std::map<std::string, std::string>> socketInfo() const; /** * Issues an INFO PARTITIONS command to the node's command port and collects * the results in a vector of maps. */ std::vector<std::map<std::string, std::string>> partitionsInfo() const; /** * Issues an INFO GOSSIP command to the node's command port to collect info * about the availability of other nodes. Results are stored in the map, with * keys corresponding to nodes, and values being either "ALIVE" or "DEAD". * Cluster has to be started with the --gossip-enable option. */ std::map<std::string, std::string> gossipInfo() const; /** * Issues an INFO GOSSIP command to the node's command port to collect info * about the health status of other nodes. Results are stored in the map, with * keys corresponding to nodes, and values being "UNDEFINED", "HEALTHY", * "OVERLOADED" or "UNHEALTHY". Cluster has to be started with the * --gossip-enable option. */ std::map<std::string, std::string> gossipStatusInfo() const; /** * Issues an INFO GOSSIP command to collect information about whether the node * is in starting state and display it. */ std::map<std::string, bool> gossipStarting() const; /* * Sends "info gossip" to command port via nc. * Returns a map with one of the following state strings as value * "" : If node is not in config * "DEAD" : If node is DEAD * "SUSPECT" : If node is SUSPECT * "ALIVE" : If node is ALIVE */ std::map<std::string, std::string> gossipState() const; std::map<node_index_t, std::string> getRsmVersions(logid_t rsm_log, RsmVersionType rsm_type) const; std::pair<std::string, std::string> getTrimmableVersion(logid_t rsm_log) const; /* * Sends "info gossip" to command port via nc. * * Returns a map where the value is a pair of status (like gossipInfo() or * gossipState()) and the count of the number of gossip time intervals where * we haven't recevied a message. */ std::map<std::string, std::pair<std::string, uint64_t>> gossipCount() const; /** * Sends "info gossip" to command port via nc. * * Returns a map where the key is the node name and the value is true if the * node is boycotted, false otherwise */ std::map<std::string, bool> gossipBoycottState() const; void resetBoycott(node_index_t node_index) const; /** * Issues an INFO GOSSIP command to the node's command port to collect info * about the isolation status of local domains in all different node location * scopes. Results are stored in the map, with a special key-value pair of * "enabled" : "true"/"false" indicating if domain isolation dection is * enabled, and key-value pairs of "<scope name>" : "ISOLATED"/"NOT_ISOLATED". * Cluster has to be started with the --gossip-enable option. */ std::map<std::string, std::string> domainIsolationInfo() const; /** * Issues an INFO PARTITIONS command to the node's command port to collect * information about the LocalLogStore time partitions active on the given * shard. The 'level' option is passed directly to the command: '0' = terse, * '1' = detailed, '2' = detailed + expensive to collect fields. */ std::vector<std::map<std::string, std::string>> partitionsInfo(shard_index_t shard, int level) const; /** * Issues a INFO SHARD command to the node's command port and compiles * a map of dirty shard to dirty time ranges. */ std::map<shard_index_t, RebuildingRangesMetadata> dirtyShardInfo() const; /** * Issues a INFO SHARD command to the node and returns the "Rebuilding state" * field per shard. */ std::map<shard_index_t, std::string> rebuildingStateInfo() const; // Issues LOGSDB CREATE command. Returns PARTITION_INVALID if it failed. partition_id_t createPartition(uint32_t shard); Node& setParam(std::string key, std::string value) { cmd_args_[key] = value; return *this; } std::vector<std::string> commandLine() const; }; /** * Write to the event log to trigger rebuilding of a shard. * * @param client Client to use to write to event log. * @param node Node for which to rebuild a shard. * @param shard Shard to rebuild. * @param flags Flags to use. * @param rrm Time ranges for requesting time-ranged rebuilding (aka * mini rebuilding) * @return LSN of the event log record or LSN_INVALID on failure. */ lsn_t requestShardRebuilding(Client& client, node_index_t node, uint32_t shard, SHARD_NEEDS_REBUILD_flags_t flags = 0, RebuildingRangesMetadata* rrm = nullptr); /** * Undrain a shard, ie allow the shard to acknowledge rebuilding. * * @param client Client to use to write to event log. * @param node Node for which a shard is undrained. * @param shard Shard that is undrained. * @return LSN of the event log record or LSN_INVALID on failure. */ lsn_t markShardUndrained(Client& client, node_index_t node, uint32_t shard); /** * Mark a shard as unrecoverable in the event log. * * @param client Client to use to write to event log. * @param node Node for which a shard is marked unrecoverable. * @param shard Shard that is marked unrecoverable. * @return LSN of the event log record or LSN_INVALID on failure. */ lsn_t markShardUnrecoverable(Client& client, node_index_t node, uint32_t shard); /** * Wait until some shards have the given state according to the event log. * @param client Client to use for reading the event log. * @param shards Shards for which to check the state. * @param st Expected authoritative status of the shard. * @param wait_for_rebuilding If true, only return if rebuilding has completed * (regardless of if it was authoritative), ie all * donors completed rebuilding. * @return LSN of the last update. Might be more recent than the update that * triggered the state change we're waiting for. */ lsn_t waitUntilShardsHaveEventLogState(std::shared_ptr<Client> client, std::vector<ShardID> shards, std::set<AuthoritativeStatus> st, bool wait_for_rebuilding); lsn_t waitUntilShardsHaveEventLogState(std::shared_ptr<Client> client, std::vector<ShardID> shards, AuthoritativeStatus st, bool wait_for_rebuilding); lsn_t waitUntilShardHasEventLogState(std::shared_ptr<Client> client, ShardID shard, AuthoritativeStatus st, bool wait_for_rebuilding); struct SequencerState { NodeID node; lsn_t last_released_lsn; lsn_t next_lsn; }; /** * Executes a GetSeqStateRequest to find out which node is the sequencer * for the provided log ID. * * @param client Client to use to send messages to the cluster nodes * @param log_id ID of the lod * @param wait_for_recovery Sets eponym option for GetSeqStateRequest * @return Result of the GetSeqStateRequest */ Status getSeqState(Client* client, logid_t log_id, SequencerState& seq_state, bool wait_for_recovery); // Returns the default path for logdeviced std::string defaultLogdevicedPath(); // Returns the default path for ldquery-markdown std::string defaultMarkdownLDQueryPath(); // Attempts to find a binary, given a relative path to search for. Within FB // we just ask the build system for the path. For open source, calls findFile() std::string findBinary(const std::string& relative_path); } // namespace IntegrationTestUtils }} // namespace facebook::logdevice
27,167
1,570
// Copyright 2016 The SwiftShader Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef _LOCAL_INTERMEDIATE_INCLUDED_ #define _LOCAL_INTERMEDIATE_INCLUDED_ #include "intermediate.h" struct TVectorFields { int offsets[4]; int num; }; // // Set of helper functions to help parse and build the tree. // class TInfoSink; class TIntermediate { public: POOL_ALLOCATOR_NEW_DELETE() TIntermediate(TInfoSink& i) : infoSink(i) { } TIntermSymbol* addSymbol(int Id, const TString&, const TType&, const TSourceLoc&); TIntermTyped* addBinaryMath(TOperator op, TIntermTyped* left, TIntermTyped* right, const TSourceLoc&); TIntermTyped* addAssign(TOperator op, TIntermTyped* left, TIntermTyped* right, const TSourceLoc&); TIntermTyped* addIndex(TOperator op, TIntermTyped* base, TIntermTyped* index, const TSourceLoc&); TIntermTyped* addUnaryMath(TOperator op, TIntermTyped* child, const TSourceLoc&, const TType*); TIntermAggregate* growAggregate(TIntermNode* left, TIntermNode* right, const TSourceLoc&); TIntermAggregate* makeAggregate(TIntermNode* node, const TSourceLoc&); TIntermAggregate* setAggregateOperator(TIntermNode*, TOperator, const TSourceLoc&); TIntermNode* addSelection(TIntermTyped* cond, TIntermNodePair code, const TSourceLoc&); TIntermTyped* addSelection(TIntermTyped* cond, TIntermTyped* trueBlock, TIntermTyped* falseBlock, const TSourceLoc&); TIntermSwitch *addSwitch(TIntermTyped *init, TIntermAggregate *statementList, const TSourceLoc &line); TIntermCase *addCase(TIntermTyped *condition, const TSourceLoc &line); TIntermTyped* addComma(TIntermTyped* left, TIntermTyped* right, const TSourceLoc&); TIntermConstantUnion* addConstantUnion(ConstantUnion*, const TType&, const TSourceLoc&); TIntermTyped* promoteConstantUnion(TBasicType, TIntermConstantUnion*); bool parseConstTree(const TSourceLoc&, TIntermNode*, ConstantUnion*, TOperator, TType, bool singleConstantParam = false); TIntermNode* addLoop(TLoopType, TIntermNode*, TIntermTyped*, TIntermTyped*, TIntermNode*, const TSourceLoc&); TIntermBranch* addBranch(TOperator, const TSourceLoc&); TIntermBranch* addBranch(TOperator, TIntermTyped*, const TSourceLoc&); TIntermTyped* addSwizzle(TVectorFields&, const TSourceLoc&); bool postProcess(TIntermNode*); void outputTree(TIntermNode*); protected: TInfoSink& infoSink; private: void operator=(TIntermediate&); // prevent assignments }; #endif // _LOCAL_INTERMEDIATE_INCLUDED_
1,018
819
/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS-IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "seurat/image/image_util.h" #include "ion/math/range.h" #include "ion/math/vector.h" #include "gtest/gtest.h" #include "seurat/base/color.h" #include "seurat/base/ion_util_no_gl.h" #include "seurat/testing/ion_test_utils.h" namespace seurat { namespace image { namespace { using base::Color1f; using base::Color3f; using base::Color4f; using ion::math::Point2i; using ion::math::Range2i; using ion::math::Vector2i; constexpr float kEpsilon = 1.0e-5f; TEST(ImageUtilTest, ConvertSeuratImageToIonImage_Image4f) { Image4f simage(16, 8); for (int y = 0; y < 8; ++y) { for (int x = 0; x < 16; ++x) { simage.At(x, y) = {0.0f, 0.5f, 1.0f, 0.5f}; } } const ion::gfx::ImagePtr ion_image = ConvertSeuratImageToIonImage(simage); EXPECT_EQ(16, ion_image->GetWidth()); EXPECT_EQ(8, ion_image->GetHeight()); EXPECT_EQ(ion::gfx::Image::Format::kRgba8888, ion_image->GetFormat()); const uint8* ion_image_data = ion_image->GetData()->GetData<uint8>(); for (int i = 0; i < 16 * 8; ++i) { EXPECT_EQ(0, ion_image_data[i * 4 + 0]); EXPECT_EQ(128, ion_image_data[i * 4 + 1]); EXPECT_EQ(255, ion_image_data[i * 4 + 2]); EXPECT_EQ(128, ion_image_data[i * 4 + 3]); } } TEST(ImageUtilTest, ConvertSeuratImageToIonImage_Image3f) { Image3f simage(16, 8); for (int y = 0; y < 8; ++y) { for (int x = 0; x < 16; ++x) { simage.At(x, y) = {0.0f, 0.5f, 1.0f}; } } const ion::gfx::ImagePtr ion_image = ConvertSeuratImageToIonImage(simage); EXPECT_EQ(16, ion_image->GetWidth()); EXPECT_EQ(8, ion_image->GetHeight()); EXPECT_EQ(ion::gfx::Image::Format::kRgb888, ion_image->GetFormat()); const uint8* ion_image_data = ion_image->GetData()->GetData<uint8>(); for (int i = 0; i < 16 * 8; ++i) { EXPECT_EQ(0, ion_image_data[i * 3 + 0]); EXPECT_EQ(128, ion_image_data[i * 3 + 1]); EXPECT_EQ(255, ion_image_data[i * 3 + 2]); } } TEST(ImageUtilTest, ConvertSeuratImageToIonImage_Image1f) { Image1f simage(16, 8); for (int y = 0; y < 8; ++y) { for (int x = 0; x < 16; ++x) { simage.At(x, y) = Color1f(0.5f); } } const ion::gfx::ImagePtr ion_image = ConvertSeuratImageToIonImage(simage); EXPECT_EQ(16, ion_image->GetWidth()); EXPECT_EQ(8, ion_image->GetHeight()); EXPECT_EQ(ion::gfx::Image::Format::kAlpha, ion_image->GetFormat()); const uint8* ion_image_data = ion_image->GetData()->GetData<uint8>(); for (int i = 0; i < 16 * 8; ++i) { EXPECT_EQ(128, ion_image_data[i]); } } TEST(ImageUtilTest, ConvertSeuratImageToIonImage_Image4ui8) { Image4ui8 simage(16, 8); for (int y = 0; y < 8; ++y) { for (int x = 0; x < 16; ++x) { simage.At(x, y) = Image4ui8::ElementType(x, y, x, y); } } const ion::gfx::ImagePtr ion_image = ConvertSeuratImageToIonImage(simage); EXPECT_EQ(16, ion_image->GetWidth()); EXPECT_EQ(8, ion_image->GetHeight()); EXPECT_EQ(ion::gfx::Image::Format::kRgba8888, ion_image->GetFormat()); const uint8* ion_image_data = ion_image->GetData()->GetData<uint8>(); for (int i = 0; i < 16 * 8; ++i) { EXPECT_EQ(i % 16, ion_image_data[i * 4 + 0]); EXPECT_EQ(i / 16, ion_image_data[i * 4 + 1]); EXPECT_EQ(i % 16, ion_image_data[i * 4 + 2]); EXPECT_EQ(i / 16, ion_image_data[i * 4 + 3]); } } TEST(ImageUtilTest, ConvertIonImageToWImage4b) { const int kWidth = 16; const int kHeight = 8; ion::gfx::ImagePtr ion_image = base::CreateImage(ion::gfx::Image::kRgba8888, {kWidth, kHeight}); const auto data_container = ion_image->GetData(); base::Color4ui8* data = data_container->GetMutableData<base::Color4ui8>(); for (int y = 0; y < kHeight; ++y) { for (int x = 0; x < kWidth; ++x) { data[y * kWidth + x][0] = 0; data[y * kWidth + x][1] = 128; data[y * kWidth + x][2] = 255; data[y * kWidth + x][3] = 128; } } const auto simage = ConvertIonImageToSeuratImage<Image4ui8>(ion_image); EXPECT_EQ(kWidth, simage.Width()); EXPECT_EQ(kHeight, simage.Height()); for (int y = 0; y < kHeight; ++y) { for (int x = 0; x < kWidth; ++x) { EXPECT_EQ(0, simage.At(x, y)[0]); EXPECT_EQ(128, simage.At(x, y)[1]); EXPECT_EQ(255, simage.At(x, y)[2]); EXPECT_EQ(128, simage.At(x, y)[3]); } } } } // namespace } // namespace image } // namespace seurat
2,117
9,782
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.log4j; import org.apache.log4j.helpers.LogLog; import org.apache.log4j.spi.LocationInfo; import org.apache.log4j.spi.LoggingEvent; import java.util.logging.LogRecord; import static java.lang.String.format; /** * We are unable to completely remove the log4j dependency due to ZooKeeper, which explicitly requires * an internal class of the log4j package and is unavailable in the slf4j-over-log4j artifact. * <p> * This JUL appender is a workaround for this issue, appending all log4j events to JUL. */ public class JulAppender extends AppenderSkeleton { /** * Append a log event at the appropriate JUL level, depending on the log4j level. */ @Override protected void append(LoggingEvent loggingEvent) { java.util.logging.Logger logger = java.util.logging.Logger.getLogger(loggingEvent.getLoggerName()); if (logger == null) { LogLog.warn(format("Cannot obtain JUL %s. Verify that this appender is used while an appropriate LogManager is active.", loggingEvent.getLoggerName())); return; } Level level = loggingEvent.getLevel(); java.util.logging.Level julLevel = convertLog4jLevel(level); LogRecord record = new LogRecord(julLevel, loggingEvent.getRenderedMessage()); record.setMillis(loggingEvent.getTimeStamp()); LocationInfo location = loggingEvent.getLocationInformation(); if (location != null) { record.setSourceClassName(location.getClassName()); record.setSourceMethodName(location.getMethodName()); } logger.log(record); } @Override public boolean requiresLayout() { return true; } @Override public void close() {} private static java.util.logging.Level convertLog4jLevel(Level log4jLevel) { if (log4jLevel.equals(Level.TRACE)) { return java.util.logging.Level.FINEST; } if (log4jLevel.equals(Level.DEBUG)) { return java.util.logging.Level.FINER; } if (log4jLevel.equals(Level.INFO)) { return java.util.logging.Level.INFO; } if (log4jLevel.equals(Level.WARN)) { return java.util.logging.Level.WARNING; } if (log4jLevel.equals(Level.ERROR)) { return java.util.logging.Level.SEVERE; } if (log4jLevel.equals(Level.FATAL)) { return java.util.logging.Level.SEVERE; } if (log4jLevel.equals(Level.ALL)) { return java.util.logging.Level.ALL; } if (log4jLevel.equals(Level.OFF)) { return java.util.logging.Level.OFF; } return java.util.logging.Level.FINE; } }
1,298
14,668
// Copyright 2019 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/extensions/api/webstore_private/extension_install_status.h" #include "base/memory/scoped_refptr.h" #include "base/values.h" #include "chrome/browser/extensions/extension_service.h" #include "chrome/browser/profiles/profile.h" #include "chrome/common/extensions/extension_constants.h" #include "chrome/common/pref_names.h" #include "components/crx_file/id_util.h" #include "components/prefs/pref_service.h" #include "extensions/browser/extension_prefs.h" #include "extensions/common/extension.h" #include "extensions/common/extension_urls.h" #include "extensions/common/manifest_constants.h" #include "extensions/common/permissions/permission_set.h" namespace extensions { namespace { // A helper function to determine if an extension from web store with given // information should be blocked by enterprise policy. It checks extension's // installation mode, permission and manifest type. // Returns true if the extension |mode| is blocked, removed or allowed by // wildcard/update_url but blocked by |manifest type| or |required permissions|. bool IsExtensionInstallBlockedByPolicy( ExtensionManagement* extension_management, ExtensionManagement::InstallationMode mode, const ExtensionId& extension_id, const std::string& update_url, Manifest::Type manifest_type, const PermissionSet& required_permissions) { switch (mode) { case ExtensionManagement::INSTALLATION_BLOCKED: case ExtensionManagement::INSTALLATION_REMOVED: return true; case ExtensionManagement::INSTALLATION_FORCED: case ExtensionManagement::INSTALLATION_RECOMMENDED: return false; case ExtensionManagement::INSTALLATION_ALLOWED: break; } if (extension_management->IsInstallationExplicitlyAllowed(extension_id)) return false; // Extension is allowed by wildcard or update_url, checks required permissions // and manifest type. // TODO(crbug.com/1088021): Find out the right way to handle extension policy // priority. if (manifest_type != Manifest::Type::TYPE_UNKNOWN && !extension_management->IsAllowedManifestType(manifest_type, extension_id)) { return true; } if (!extension_management->IsPermissionSetAllowed(extension_id, update_url, required_permissions)) { return true; } return false; } } // namespace ExtensionInstallStatus GetWebstoreExtensionInstallStatus( const ExtensionId& extension_id, Profile* profile) { return GetWebstoreExtensionInstallStatus( extension_id, profile, Manifest::Type::TYPE_UNKNOWN, PermissionSet()); } ExtensionInstallStatus GetWebstoreExtensionInstallStatus( const ExtensionId& extension_id, Profile* profile, const Manifest::Type manifest_type, const PermissionSet& required_permission_set) { DCHECK(crx_file::id_util::IdIsValid(extension_id)); if (ExtensionPrefs::Get(profile)->HasDisableReason( extension_id, disable_reason::DISABLE_CUSTODIAN_APPROVAL_REQUIRED)) { return kCustodianApprovalRequired; } const GURL update_url = extension_urls::GetWebstoreUpdateUrl(); ExtensionManagement* extension_management = ExtensionManagementFactory::GetForBrowserContext(profile); // Always use webstore update url to check the installation mode because this // function is used by webstore private API only and there may not be any // |Extension| instance. Note that we don't handle the case where an offstore // extension with an identical ID is installed. ExtensionManagement::InstallationMode mode = extension_management->GetInstallationMode(extension_id, update_url.spec()); if (mode == ExtensionManagement::INSTALLATION_FORCED || mode == ExtensionManagement::INSTALLATION_RECOMMENDED) return kForceInstalled; ExtensionRegistry* registry = ExtensionRegistry::Get(profile); if (registry->enabled_extensions().Contains(extension_id)) return kEnabled; if (registry->terminated_extensions().Contains(extension_id)) return kTerminated; if (registry->blocklisted_extensions().Contains(extension_id)) return kBlocklisted; // If an installed extension is disabled due to policy, returns // kBlockedByPolicy, kCanRequest or kRequestPending instead of kDisabled. // By doing so, user can still request an installed and policy blocked // extension. if (!IsExtensionInstallBlockedByPolicy( extension_management, mode, extension_id, update_url.spec(), manifest_type, required_permission_set)) { if (registry->disabled_extensions().Contains(extension_id)) return kDisabled; return kInstallable; } // The ability to request extension installs is not available if the extension // request policy is disabled if (!profile->GetPrefs()->GetBoolean(prefs::kCloudExtensionRequestEnabled)) return kBlockedByPolicy; // An extension which is explicitly blocked by enterprise policy can't be // requested anymore. if (extension_management->IsInstallationExplicitlyBlocked(extension_id)) return kBlockedByPolicy; if (profile->GetPrefs() ->GetDictionary(prefs::kCloudExtensionRequestIds) ->FindKey(extension_id)) { return kRequestPending; } return kCanRequest; } } // namespace extensions
1,804
1,063
import torch import torch.nn as nn class Res(nn.Module): def __init__(self, H): super().__init__() self.u1 = nn.Linear(H, H) self.u2 = nn.Linear(H, H) self.v1 = nn.Linear(H, H) self.v2 = nn.Linear(H, H) self.w = nn.Linear(H, H) def forward(self, y): y = self.w(y) y = y + torch.relu(self.v1(torch.relu(self.u1(y)))) return y + torch.relu(self.v2(torch.relu(self.u2(y)))) class NeuralCFG(torch.nn.Module): """ NeuralCFG From Kim et al """ def __init__(self, V, T, NT, H): super().__init__() self.NT = NT self.V = V self.T = T self.word_emb = nn.Parameter(torch.Tensor(V, H)) self.term_emb = nn.Parameter(torch.Tensor(T, H)) self.nonterm_emb = nn.Parameter(torch.Tensor(NT, H)) self.nonterm_emb_c = nn.Parameter(torch.Tensor(NT + T, NT + T, H)) self.root_emb = nn.Parameter(torch.Tensor(NT, H)) self.s_emb = nn.Parameter(torch.Tensor(1, H)) self.mlp1 = Res(H) self.mlp2 = Res(H) for p in self.parameters(): if p.dim() > 1: torch.nn.init.xavier_uniform_(p) def forward(self, input): T, NT = self.T, self.NT def terms(words): b, n = input.shape[:2] term_prob = ( torch.einsum("vh,th->tv", self.word_emb, self.mlp1(self.term_emb)) .log_softmax(-1) .unsqueeze(0) .unsqueeze(0) .expand(b, n, self.T, self.V) ) indices = input.unsqueeze(2).expand(b, n, self.T).unsqueeze(3) term_prob = torch.gather(term_prob, 3, indices).squeeze(3) return term_prob def rules(b): return ( torch.einsum("sh,tuh->stu", self.nonterm_emb, self.nonterm_emb_c) .view(NT, -1) .log_softmax(-1) .view(1, NT, NT + T, NT + T) .expand(b, NT, NT + T, NT + T) ) def roots(b): return ( torch.einsum("ah,th->t", self.s_emb, self.mlp2(self.root_emb)) .log_softmax(-1) .view(1, NT) .expand(b, NT) ) batch = input.shape[0] return terms(input), rules(batch), roots(batch)
1,384
848
<reponame>PaulWang1905/tensorflow # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional tests for the op to generate vocab remapping.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import numpy as np from tensorflow.contrib import framework as contrib_framework from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import partitioned_variables from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import flags from tensorflow.python.platform import test from tensorflow.python.training import saver FLAGS = flags.FLAGS _TESTDATA_PATH = 'contrib/framework/testdata' class LoadMulticlassBiasTest(test.TestCase): """Tests for the load_linear_multiclass_bias_initializer functionality.""" def setUp(self): ops.reset_default_graph() dim = 1 num = 3 with ops.name_scope('some_scope'): # Basically from 0 to dim*num-1. flat_data = math_ops.linspace(0.0, dim * num - 1, dim * num) bias = variables.Variable( array_ops.reshape(flat_data, (num, dim)), name='bias') save = saver.Saver([bias]) with self.cached_session() as sess: variables.global_variables_initializer().run() self.bundle_file = os.path.join(test.get_temp_dir(), 'bias_checkpoint') save.save(sess, self.bundle_file) self.new_class_vocab_file = os.path.join( test.test_src_dir_path(_TESTDATA_PATH), 'keyword_new.txt') self.old_class_vocab_file = os.path.join( test.test_src_dir_path(_TESTDATA_PATH), 'keyword.txt') self.init_val = 42 def _init_val_initializer(shape, dtype=None, partition_info=None): del dtype, partition_info # Unused by this unit-testing initializer. return array_ops.tile( constant_op.constant([[self.init_val]], dtype=dtypes.float32), shape) self.initializer = _init_val_initializer def test_load_linear_multiclass_bias_initializer(self): """Tests for the bias initializer wrapper.""" bias_loading_initializer = ( contrib_framework.load_linear_multiclass_bias_initializer( new_class_vocab_file=self.new_class_vocab_file, old_class_vocab_file=self.old_class_vocab_file, new_class_vocab_size=4, bias_tensor_name='some_scope/bias', ckpt_path=[self.bundle_file], num_class_oov_buckets=1, initializer=self.initializer)) expected_remapped_bias_vector = np.reshape( [2, 0, self.init_val, 1, self.init_val], [5, 1]) # The new bias vector is of size [4 class vocab + 1 class OOV, 1]. remapped_bias_vector = variable_scope.get_variable( name='bias/obtained_bias_vector', shape=[5, 1], initializer=bias_loading_initializer, partitioner=partitioned_variables.fixed_size_partitioner(3)) with self.cached_session(): variables.global_variables_initializer().run() self.assertAllClose(expected_remapped_bias_vector, remapped_bias_vector.as_tensor().eval()) class LoadVariableSlotTest(test.TestCase): """Tests for the load_variable_slot_initializer functionality.""" def setUp(self): ops.reset_default_graph() dim = 1 num = 3 with ops.name_scope('some_scope'): # Basically from 0 to dim*num-1. flat_data = math_ops.linspace(0.0, dim * num - 1, dim * num) accum = variables.Variable( array_ops.reshape(flat_data, (num, dim)), name='accum') save = saver.Saver([accum]) with self.cached_session() as sess: variables.global_variables_initializer().run() self.bundle_file = os.path.join(test.get_temp_dir(), 'accum_checkpoint') save.save(sess, self.bundle_file) self.new_class_vocab_file = os.path.join( test.test_src_dir_path(_TESTDATA_PATH), 'keyword_new.txt') self.old_class_vocab_file = os.path.join( test.test_src_dir_path(_TESTDATA_PATH), 'keyword.txt') self.init_val = 42 def _init_val_initializer(shape, dtype=None, partition_info=None): del dtype, partition_info # Unused by this unit-testing initializer. return array_ops.tile( constant_op.constant([[self.init_val]], dtype=dtypes.float32), shape) self.initializer = _init_val_initializer def test_load_variable_slot_initializer(self): """Tests for the slot initializer wrapper.""" # We have an initializer for each of two partitioned variables, which will # be [3, 1] and [2, 1]. The partitioning information is passed here in # initializer construction, as opposed to through a variable scope during # variable creation. variable_slot_initializer_part_0 = ( contrib_framework.load_variable_slot_initializer( new_row_vocab_file=self.new_class_vocab_file, old_row_vocab_file=self.old_class_vocab_file, new_row_vocab_size=4, new_col_vocab_size=1, primary_partition_info=variable_scope._PartitionInfo( full_shape=[5, 1], var_offset=[0, 0]), old_tensor_name='some_scope/accum', ckpt_path=[self.bundle_file], num_row_oov_buckets=1, initializer=self.initializer)) variable_slot_initializer_part_1 = ( contrib_framework.load_variable_slot_initializer( new_row_vocab_file=self.new_class_vocab_file, old_row_vocab_file=self.old_class_vocab_file, new_row_vocab_size=4, new_col_vocab_size=1, primary_partition_info=variable_scope._PartitionInfo( full_shape=[5, 1], var_offset=[3, 0]), old_tensor_name='some_scope/accum', ckpt_path=[self.bundle_file], num_row_oov_buckets=1, initializer=self.initializer)) expected_remapped_accum_vector_part_0 = np.reshape([2, 0, self.init_val], [3, 1]) expected_remapped_accum_vector_part_1 = np.reshape([1, self.init_val], [2, 1]) # Since there is no variable scope here, partition_info will be None, so # if variable_slot_initializer_part_0 and variable_slot_initializer_part_1 # were instead instances of load_and_remap_matrix_initializer, the part_0 # obtained vector would still be [2, 0, self.init_val], but the part_1 # obtained vector would be [2, 0], since the partition_info would default to # assuming a single partition. remapped_accum_vector_part_0 = variable_scope.get_variable( name='accum/obtained_accum_vector_part_0', shape=[3, 1], initializer=variable_slot_initializer_part_0) remapped_accum_vector_part_1 = variable_scope.get_variable( name='accum/obtained_accum_vector_part_1', shape=[2, 1], initializer=variable_slot_initializer_part_1) with self.cached_session(): variables.global_variables_initializer().run() self.assertAllClose(expected_remapped_accum_vector_part_0, remapped_accum_vector_part_0.eval()) self.assertAllClose(expected_remapped_accum_vector_part_1, remapped_accum_vector_part_1.eval()) if __name__ == '__main__': test.main()
3,324
605
//===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #ifndef TEST_STD_RANGES_RANGE_ADAPTORS_RANGE_COMMON_VIEW_TYPES_H #define TEST_STD_RANGES_RANGE_ADAPTORS_RANGE_COMMON_VIEW_TYPES_H #include <ranges> #include "test_iterators.h" struct DefaultConstructibleView : std::ranges::view_base { int* begin_ = nullptr; int* end_ = nullptr; explicit DefaultConstructibleView() = default; constexpr int *begin() const { return begin_; } constexpr auto end() const { return sentinel_wrapper<int*>(end_); } }; static_assert(std::ranges::view<DefaultConstructibleView>); static_assert(std::default_initializable<DefaultConstructibleView>); struct MoveOnlyView : std::ranges::view_base { int* begin_; int* end_; constexpr explicit MoveOnlyView(int* b, int* e) : begin_(b), end_(e) { } constexpr MoveOnlyView(MoveOnlyView&&) = default; constexpr MoveOnlyView& operator=(MoveOnlyView&&) = default; constexpr int *begin() const { return begin_; } constexpr auto end() const { return sentinel_wrapper<int*>(end_); } }; static_assert( std::ranges::view<MoveOnlyView>); static_assert( std::ranges::contiguous_range<MoveOnlyView>); static_assert(!std::copyable<MoveOnlyView>); struct CopyableView : std::ranges::view_base { int* begin_; int* end_; constexpr explicit CopyableView(int* b, int* e) : begin_(b), end_(e) { } constexpr int *begin() const { return begin_; } constexpr auto end() const { return sentinel_wrapper<int*>(end_); } }; static_assert(std::ranges::view<CopyableView>); static_assert(std::copyable<CopyableView>); using ForwardIter = forward_iterator<int*>; struct SizedForwardView : std::ranges::view_base { int* begin_; int* end_; constexpr explicit SizedForwardView(int* b, int* e) : begin_(b), end_(e) { } constexpr auto begin() const { return forward_iterator<int*>(begin_); } constexpr auto end() const { return sized_sentinel<forward_iterator<int*>>(forward_iterator<int*>(end_)); } }; static_assert(std::ranges::view<SizedForwardView>); static_assert(std::ranges::forward_range<SizedForwardView>); static_assert(std::ranges::sized_range<SizedForwardView>); using RandomAccessIter = random_access_iterator<int*>; struct SizedRandomAccessView : std::ranges::view_base { int* begin_; int* end_; constexpr explicit SizedRandomAccessView(int* b, int* e) : begin_(b), end_(e) { } constexpr auto begin() const { return random_access_iterator<int*>(begin_); } constexpr auto end() const { return sized_sentinel<random_access_iterator<int*>>(random_access_iterator<int*>(end_)); } }; static_assert(std::ranges::view<SizedRandomAccessView>); static_assert(std::ranges::random_access_range<SizedRandomAccessView>); static_assert(std::ranges::sized_range<SizedRandomAccessView>); struct CommonView : std::ranges::view_base { int* begin_; int* end_; constexpr explicit CommonView(int* b, int* e) : begin_(b), end_(e) { } constexpr int *begin() const { return begin_; } constexpr int *end() const { return end_; } }; static_assert(std::ranges::view<CommonView>); static_assert(std::ranges::common_range<CommonView>); struct NonCommonView : std::ranges::view_base { int* begin_; int* end_; constexpr explicit NonCommonView(int* b, int* e) : begin_(b), end_(e) { } constexpr int *begin() const { return begin_; } constexpr auto end() const { return sentinel_wrapper<int*>(end_); } }; static_assert( std::ranges::view<NonCommonView>); static_assert(!std::ranges::common_range<NonCommonView>); #endif // TEST_STD_RANGES_RANGE_ADAPTORS_RANGE_COMMON_VIEW_TYPES_H
1,286
631
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.metron.indexing.dao.metaalert; import java.util.ArrayList; import java.util.DoubleSummaryStatistics; import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.commons.math3.stat.descriptive.rank.Median; import org.apache.metron.indexing.dao.update.Document; import org.apache.metron.stellar.common.utils.ConversionUtils; public class MetaScores { protected Map<String, Object> metaScores = new HashMap<>(); public MetaScores(List<Double> scores) { // A meta alert could be entirely alerts with no values. DoubleSummaryStatistics stats = scores .stream() .mapToDouble(a -> a) .summaryStatistics(); metaScores.put("max", stats.getMax()); metaScores.put("min", stats.getMin()); metaScores.put("average", stats.getAverage()); metaScores.put("count", stats.getCount()); metaScores.put("sum", stats.getSum()); // median isn't in the stats summary double[] arr = scores .stream() .mapToDouble(d -> d) .toArray(); metaScores.put("median", new Median().evaluate(arr)); } public Map<String, Object> getMetaScores() { return metaScores; } /** * Calculate the meta alert scores for a Document. The scores are placed directly in the provided * document. * @param metaAlert The Document containing scores */ @SuppressWarnings("unchecked") public static void calculateMetaScores(Document metaAlert, String threatTriageField, String threatSort) { MetaScores metaScores = new MetaScores(new ArrayList<>()); List<Object> alertsRaw = ((List<Object>) metaAlert.getDocument() .get(MetaAlertConstants.ALERT_FIELD)); if (alertsRaw != null && !alertsRaw.isEmpty()) { ArrayList<Double> scores = new ArrayList<>(); for (Object alertRaw : alertsRaw) { Map<String, Object> alert = (Map<String, Object>) alertRaw; Double scoreNum = parseThreatField(alert.get(threatTriageField)); if (scoreNum != null) { scores.add(scoreNum); } } metaScores = new MetaScores(scores); } // add a summary (max, min, avg, ...) of all the threat scores from the child alerts metaAlert.getDocument().putAll(metaScores.getMetaScores()); // add the overall threat score for the metaalert; one of the summary aggregations as defined // by `threatSort` Object threatScore = metaScores.getMetaScores().get(threatSort); // add the threat score as a float; type needs to match the threat score field from each of // the sensor indices metaAlert.getDocument() .put(threatTriageField, ConversionUtils.convert(threatScore, Float.class)); } protected static Double parseThreatField(Object threatRaw) { Double threat = null; if (threatRaw instanceof Number) { threat = ((Number) threatRaw).doubleValue(); } else if (threatRaw instanceof String) { threat = Double.parseDouble((String) threatRaw); } return threat; } }
1,241
6,197
package com.kickstarter.ui.views; import android.content.Context; import android.text.Html; import android.util.AttributeSet; import androidx.annotation.NonNull; import androidx.annotation.Nullable; import androidx.appcompat.widget.AppCompatTextView; public final class HtmlTextView extends AppCompatTextView { public HtmlTextView(final @NonNull Context context) { super(context); init(); } public HtmlTextView(final @NonNull Context context, final @Nullable AttributeSet attrs) { super(context, attrs); init(); } private void init(){ setText(Html.fromHtml(getText().toString())); } }
203
3,073
<reponame>louis-pre/NewsBlur import redis from django.conf import settings from django.shortcuts import render from django.views import View class Updates(View): def get(self, request): r = redis.Redis(connection_pool=settings.REDIS_FEED_UPDATE_POOL) data = { 'update_queue': r.scard("queued_feeds"), 'feeds_fetched': r.zcard("fetched_feeds_last_hour"), 'tasked_feeds': r.zcard("tasked_feeds"), 'error_feeds': r.zcard("error_feeds"), 'celery_update_feeds': r.llen("update_feeds"), 'celery_new_feeds': r.llen("new_feeds"), 'celery_push_feeds': r.llen("push_feeds"), 'celery_work_queue': r.llen("work_queue"), 'celery_search_queue': r.llen("search_indexer"), } chart_name = "updates" chart_type = "counter" formatted_data = {} for k, v in data.items(): formatted_data[k] = f'{chart_name}{{category="{k}"}} {v}' context = { "data": formatted_data, "chart_name": chart_name, "chart_type": chart_type, } return render(request, 'monitor/prometheus_data.html', context, content_type="text/plain")
611
1,729
<gh_stars>1000+ /************************************************************************ * file name : descriptors_tree_widget.cpp * ----------------- : * creation time : 2016/09/17 * author : <NAME> * email : <EMAIL> * ----------------- : * description : The file contains implementation of DescriptorsTreeWidget and it's auxiliary classes * : for displyaing EasyProfiler blocks descriptors tree. * ----------------- : * change log : * 2016/09/17 <NAME>: initial commit. * : * : * * ----------------- : * license : Lightweight profiler library for c++ * : Copyright(C) 2016-2019 <NAME>, <NAME> * : * : Licensed under either of * : * MIT license (LICENSE.MIT or http://opensource.org/licenses/MIT) * : * Apache License, Version 2.0, (LICENSE.APACHE or http://www.apache.org/licenses/LICENSE-2.0) * : at your option. * : * : The MIT License * : * : Permission is hereby granted, free of charge, to any person obtaining a copy * : of this software and associated documentation files (the "Software"), to deal * : in the Software without restriction, including without limitation the rights * : to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies * : of the Software, and to permit persons to whom the Software is furnished * : to do so, subject to the following conditions: * : * : The above copyright notice and this permission notice shall be included in all * : copies or substantial portions of the Software. * : * : THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, * : INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR * : PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * : LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * : TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * : USE OR OTHER DEALINGS IN THE SOFTWARE. * : * : The Apache License, Version 2.0 (the "License") * : * : You may not use this file except in compliance with the License. * : You may obtain a copy of the License at * : * : http://www.apache.org/licenses/LICENSE-2.0 * : * : Unless required by applicable law or agreed to in writing, software * : distributed under the License is distributed on an "AS IS" BASIS, * : WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * : See the License for the specific language governing permissions and * : limitations under the License. ************************************************************************/ #include <QAbstractTextDocumentLayout> #include <QAction> #include <QActionGroup> #include <QApplication> #include <QClipboard> #include <QContextMenuEvent> #include <QHBoxLayout> #include <QHeaderView> #include <QKeyEvent> #include <QLabel> #include <QLineEdit> #include <QMenu> #include <QSettings> #include <QSignalBlocker> #include <QSplitter> #include <QString> #include <QTextDocument> #include <QTimer> #include <QToolBar> #include <QVariant> #include <QVBoxLayout> #include "descriptors_tree_widget.h" #include "globals.h" #include "arbitrary_value_inspector.h" #include "text_highlighter.h" #include "thread_pool.h" #ifdef max #undef max #endif #ifdef min #undef min #endif ////////////////////////////////////////////////////////////////////////// ::profiler::EasyBlockStatus nextStatus(::profiler::EasyBlockStatus _status) { switch (_status) { case ::profiler::OFF: return ::profiler::ON; case ::profiler::ON: return ::profiler::FORCE_ON; case ::profiler::FORCE_ON: return ::profiler::OFF_RECURSIVE; case ::profiler::OFF_RECURSIVE: return ::profiler::ON_WITHOUT_CHILDREN; case ::profiler::ON_WITHOUT_CHILDREN: return ::profiler::FORCE_ON_WITHOUT_CHILDREN; case ::profiler::FORCE_ON_WITHOUT_CHILDREN: return ::profiler::OFF; } return ::profiler::OFF; } const char* statusText(::profiler::EasyBlockStatus _status) { switch (_status) { case ::profiler::OFF: return "OFF"; case ::profiler::ON: return "ON"; case ::profiler::FORCE_ON: return "FORCE_ON"; case ::profiler::OFF_RECURSIVE: return "OFF_RECURSIVE"; case ::profiler::ON_WITHOUT_CHILDREN: return "ON_WITHOUT_CHILDREN"; case ::profiler::FORCE_ON_WITHOUT_CHILDREN: return "FORCE_ON_WITHOUT_CHILDREN"; } return ""; } ::profiler::color_t statusColor(::profiler::EasyBlockStatus _status) { switch (_status) { case ::profiler::OFF: return ::profiler::colors::Red900; case ::profiler::ON: return ::profiler::colors::LightGreen900; case ::profiler::FORCE_ON: return ::profiler::colors::LightGreen900; case ::profiler::OFF_RECURSIVE: return ::profiler::colors::Red900; case ::profiler::ON_WITHOUT_CHILDREN: return ::profiler::colors::Lime900; case ::profiler::FORCE_ON_WITHOUT_CHILDREN: return ::profiler::colors::Lime900; } return ::profiler::colors::Black; } ////////////////////////////////////////////////////////////////////////// DescriptorsTreeItem::DescriptorsTreeItem(::profiler::block_id_t _desc, Parent* _parent) : Parent(_parent, QTreeWidgetItem::UserType) , m_desc(_desc) , m_type(DescriptorsTreeItem::Type::File) { } DescriptorsTreeItem::~DescriptorsTreeItem() { } bool DescriptorsTreeItem::operator < (const Parent& _other) const { const auto col = treeWidget()->sortColumn(); switch (col) { case DESC_COL_FILE_LINE: { if (parent() != nullptr) return data(col, Qt::UserRole).toInt() < _other.data(col, Qt::UserRole).toInt(); } } return Parent::operator < (_other); } QVariant DescriptorsTreeItem::data(int _column, int _role) const { switch (_column) { case DESC_COL_TYPE: { if (_role == Qt::ToolTipRole) { switch (m_type) { case Type::File: return QStringLiteral("File"); case Type::Event: return QStringLiteral("Event"); case Type::Block: return QStringLiteral("Block"); case Type::Value: return QStringLiteral("Arbitrary Value"); } } else if (_role == Qt::DisplayRole) { switch (m_type) { case Type::File: return QStringLiteral("F"); case Type::Event: return QStringLiteral("E"); case Type::Block: return QStringLiteral("B"); case Type::Value: return QStringLiteral("V"); } } break; } case DESC_COL_FILE_LINE: { if (parent() != nullptr) { if (_role == Qt::ToolTipRole) { const int row = data(_column, Qt::UserRole).toInt(); return QString("%1:%2").arg(parent()->data(_column, Qt::UserRole).toString()).arg(row); } else if (_role == Qt::DisplayRole) { const int row = data(_column, Qt::UserRole).toInt(); return QString("%1:%2").arg(parent()->text(_column)).arg(row); } } else if (_role == Qt::ToolTipRole) { return data(_column, Qt::UserRole).toString(); } break; } default: { break; } } return Parent::data(_column, _role); } ////////////////////////////////////////////////////////////////////////// DescriptorsTreeWidget::DescriptorsTreeWidget(QWidget* _parent) : Parent(_parent) , m_lastFound(nullptr) , m_lastFoundIndex(0) , m_lastSearchColumn(-1) , m_searchColumn(DESC_COL_NAME) , m_bLocked(false) , m_bCaseSensitiveSearch(false) , m_bInitialized(false) { memset(m_columnsMinimumWidth, 0, sizeof(m_columnsMinimumWidth)); setAutoFillBackground(false); setAlternatingRowColors(true); setItemsExpandable(true); setAnimated(true); setSortingEnabled(false); setColumnCount(DESC_COL_COLUMNS_NUMBER); setSelectionBehavior(QAbstractItemView::SelectRows); auto header_item = new QTreeWidgetItem(); header_item->setText(DESC_COL_FILE_LINE, "File/Line"); header_item->setText(DESC_COL_TYPE, "Type"); header_item->setText(DESC_COL_NAME, "Name"); header_item->setText(DESC_COL_STATUS, "Status"); setHeaderItem(header_item); connect(&EASY_GLOBALS.events, &::profiler_gui::GlobalSignals::selectedBlockChanged, this, &This::onSelectedBlockChange); connect(&EASY_GLOBALS.events, &::profiler_gui::GlobalSignals::blockStatusChanged, this, &This::onBlockStatusChange); connect(this, &Parent::itemExpanded, this, &This::onItemExpand); connect(this, &Parent::itemDoubleClicked, this, &This::onDoubleClick); connect(this, &Parent::currentItemChanged, this, &This::onCurrentItemChange); connect(header(), &QHeaderView::sectionResized, this, &This::onHeaderSectionResized); loadSettings(); setItemDelegateForColumn(m_searchColumn, new DescWidgetItemDelegate(this)); } DescriptorsTreeWidget::~DescriptorsTreeWidget() { if (::profiler_gui::is_max(EASY_GLOBALS.selected_block) && !::profiler_gui::is_max(EASY_GLOBALS.selected_block_id)) { ::profiler_gui::set_max(EASY_GLOBALS.selected_block_id); emit EASY_GLOBALS.events.refreshRequired(); } saveSettings(); } void DescriptorsTreeWidget::showEvent(QShowEvent* event) { Parent::showEvent(event); if (!m_bInitialized) { #if !defined(_WIN32) && !defined(__APPLE__) const auto padding = px(9); #else const auto padding = px(6); #endif auto header = this->header(); auto headerItem = this->headerItem(); auto f = header->font(); #if !defined(_WIN32) && !defined(__APPLE__) f.setBold(true); #endif QFontMetrics fm(f); const auto indicatorSize = header->isSortIndicatorShown() ? px(11) : 0; for (int i = 0; i < DESC_COL_COLUMNS_NUMBER; ++i) { auto minSize = static_cast<int>(fm.width(headerItem->text(i)) * profiler_gui::FONT_METRICS_FACTOR + padding); m_columnsMinimumWidth[i] = minSize; if (header->isSortIndicatorShown() && header->sortIndicatorSection() == i) { minSize += indicatorSize; } if (header->sectionSize(i) < minSize) { header->resizeSection(i, minSize); } } m_bInitialized = true; } } ////////////////////////////////////////////////////////////////////////// void DescriptorsTreeWidget::resetSearch(bool repaint) { if (m_lastSearch.isEmpty()) { return; } m_lastSearchColumn = m_searchColumn; m_bCaseSensitiveSearch = false; m_lastSearch.clear(); m_lastFound = nullptr; m_lastFoundIndex = 0; if (repaint) { viewport()->update(); } } void DescriptorsTreeWidget::setSearchColumn(int column) { const int prevColumn = m_searchColumn; m_searchColumn = column; if (m_searchColumn != prevColumn) { auto delegate = itemDelegateForColumn(prevColumn); setItemDelegateForColumn(prevColumn, nullptr); delete delegate; setItemDelegateForColumn(m_searchColumn, new DescWidgetItemDelegate(this)); } emit searchColumnChanged(column); } int DescriptorsTreeWidget::searchColumn() const { return m_searchColumn; } QTreeWidgetItem* DescriptorsTreeWidget::lastFoundItem() const { return m_lastFound; } bool DescriptorsTreeWidget::caseSensitiveSearch() const { return m_bCaseSensitiveSearch; } const QString& DescriptorsTreeWidget::searchString() const { return m_lastSearch; } int DescriptorsTreeWidget::lastFoundIndex() const { return m_lastFoundIndex; } ////////////////////////////////////////////////////////////////////////// void DescriptorsTreeWidget::contextMenuEvent(QContextMenuEvent* _event) { _event->accept(); QMenu menu; menu.setToolTipsVisible(true); auto action = menu.addAction("Expand All"); action->setIcon(QIcon(imagePath("expand"))); connect(action, &QAction::triggered, this, &This::expandAll); action = menu.addAction("Collapse All"); action->setIcon(QIcon(imagePath("collapse"))); connect(action, &QAction::triggered, this, &This::collapseAll); auto item = currentItem(); if (item != nullptr && item->parent() != nullptr && currentColumn() >= DESC_COL_TYPE) { const auto& desc = easyDescriptor(static_cast<DescriptorsTreeItem*>(item)->desc()); menu.addSeparator(); auto submenu = menu.addMenu("Change Status"); submenu->setToolTipsVisible(true); #define ADD_STATUS_ACTION(NameValue, StatusValue, ToolTipValue)\ action = submenu->addAction(NameValue);\ action->setCheckable(true);\ action->setChecked(desc.status() == StatusValue);\ action->setData(static_cast<quint32>(StatusValue));\ action->setToolTip(ToolTipValue);\ connect(action, &QAction::triggered, this, &This::onBlockStatusChangeClicked) ADD_STATUS_ACTION("Off", ::profiler::OFF, "Do not profile this block."); ADD_STATUS_ACTION("On", ::profiler::ON, "Profile this block\nif parent enabled children."); ADD_STATUS_ACTION("Force-On", ::profiler::FORCE_ON, "Always profile this block even\nif it's parent disabled children."); ADD_STATUS_ACTION("Off-recursive", ::profiler::OFF_RECURSIVE, "Do not profile neither this block\nnor it's children."); ADD_STATUS_ACTION("On-without-children", ::profiler::ON_WITHOUT_CHILDREN, "Profile this block, but\ndo not profile it's children."); ADD_STATUS_ACTION("Force-On-without-children", ::profiler::FORCE_ON_WITHOUT_CHILDREN, "Always profile this block, but\ndo not profile it's children."); #undef ADD_STATUS_ACTION submenu->setEnabled(EASY_GLOBALS.connected); if (!EASY_GLOBALS.connected) submenu->setTitle(QString("%1 (connection needed)").arg(submenu->title())); } if (item != nullptr) { menu.addSeparator(); action = menu.addAction(QStringLiteral("Copy Full Path")); connect(action, &QAction::triggered, [this, item] (bool) { auto fileItem = item->parent() == nullptr ? item : item->parent(); auto fullName = fileItem->data(DESC_COL_FILE_LINE, Qt::UserRole).toString(); qApp->clipboard()->setText(fullName); }); } menu.exec(QCursor::pos()); } ////////////////////////////////////////////////////////////////////////// void DescriptorsTreeWidget::clearSilent(bool _global) { const QSignalBlocker b(this); setSortingEnabled(false); resetSearch(false); m_items.clear(); if (topLevelItemCount() != 0) { ::std::vector<QTreeWidgetItem*> topLevelItems; topLevelItems.reserve(static_cast<size_t>(topLevelItemCount())); for (int i = topLevelItemCount() - 1; i >= 0; --i) { const bool expanded = !_global && topLevelItem(i)->isExpanded(); auto item = takeTopLevelItem(i); if (expanded) m_expandedFilesTemp.insert(item->text(DESC_COL_FILE_LINE).toStdString()); topLevelItems.push_back(item); } #ifdef EASY_LAMBDA_MOVE_CAPTURE ThreadPool::instance().backgroundJob([items = std::move(topLevelItems)] { for (auto item : items) #else ThreadPool::instance().backgroundJob([topLevelItems] { for (auto item : topLevelItems) #endif profiler_gui::deleteTreeItem(item); }); } //clear(); } ////////////////////////////////////////////////////////////////////////// struct FileItems { using Items = ::std::unordered_map<int, DescriptorsTreeItem*, ::estd::hash<int> >; Items children; QTreeWidgetItem* item = nullptr; }; void DescriptorsTreeWidget::build() { auto f = font(); f.setBold(true); typedef ::std::unordered_map<::std::string, FileItems> Files; Files fileItems; m_items.resize(EASY_GLOBALS.descriptors.size()); memset(m_items.data(), 0, sizeof(void*) * m_items.size()); const QSignalBlocker b(this); profiler::block_id_t id = 0, count = 0; QString commonDir; for (auto desc : EASY_GLOBALS.descriptors) { if (desc != nullptr) { ++count; auto& p = fileItems[desc->file()]; if (p.item == nullptr) { auto item = new DescriptorsTreeItem(0); auto fullName = QString(desc->file()).remove(QRegExp("^(\\.{2}\\\\+)+")); // without leading "..\" auto fileName = QString(desc->file()).remove(QRegExp("^(.+(\\\\|\\/)+)+")); auto dir = fullName.left(fullName.length() - fileName.length()); if (count == 1) { commonDir = dir; } else if (!commonDir.isEmpty()) { if (dir.length() < commonDir.length()) { commonDir.truncate(dir.length()); } int i = 0; for (; i < commonDir.length(); ++i) { if (commonDir[i] != dir[i]) { break; } } commonDir.truncate(i); } item->setData(DESC_COL_FILE_LINE, Qt::UserRole, fullName); item->setType(DescriptorsTreeItem::Type::File); p.item = item; } auto it = p.children.find(desc->line()); if (it == p.children.end()) { auto item = new DescriptorsTreeItem(desc->id(), p.item); item->setData(DESC_COL_FILE_LINE, Qt::UserRole, desc->line()); item->setText(DESC_COL_NAME, desc->name()); switch (desc->type()) { case ::profiler::BlockType::Block: item->setType(DescriptorsTreeItem::Type::Block); break; case ::profiler::BlockType::Event: item->setType(DescriptorsTreeItem::Type::Event); break; case ::profiler::BlockType::Value: item->setType(DescriptorsTreeItem::Type::Value); break; case ::profiler::BlockType::TypesCount: break; } item->setFont(DESC_COL_STATUS, f); item->setText(DESC_COL_STATUS, statusText(desc->status())); item->setForeground(DESC_COL_STATUS, QColor::fromRgba(statusColor(desc->status()))); m_items[id] = item; p.children.insert(::std::make_pair(desc->line(), item)); } else { m_items[id] = it->second; } } ++id; } for (auto& p : fileItems) { auto fullName = p.second.item->data(DESC_COL_FILE_LINE, Qt::UserRole).toString(); p.second.item->setText(DESC_COL_FILE_LINE, fullName.right(fullName.length() - commonDir.length())); addTopLevelItem(p.second.item); if (m_expandedFilesTemp.find(p.first) != m_expandedFilesTemp.end()) p.second.item->setExpanded(true); } m_expandedFilesTemp.clear(); setSortingEnabled(true); sortByColumn(DESC_COL_FILE_LINE, Qt::AscendingOrder); resizeColumnsToContents(); QTimer::singleShot(100, [this] { onSelectedBlockChange(EASY_GLOBALS.selected_block); }); } ////////////////////////////////////////////////////////////////////////// void DescriptorsTreeWidget::onHeaderSectionResized(int logicalIndex, int /*oldSize*/, int newSize) { const auto indicatorSize = header()->isSortIndicatorShown() && header()->sortIndicatorSection() == logicalIndex ? px(11) : 0; const auto minSize = m_columnsMinimumWidth[logicalIndex] + indicatorSize; if (!m_bInitialized || newSize >= minSize) { return; } header()->resizeSection(logicalIndex, minSize); } ////////////////////////////////////////////////////////////////////////// void DescriptorsTreeWidget::onItemExpand(QTreeWidgetItem*) { resizeColumnsToContents(); } ////////////////////////////////////////////////////////////////////////// void DescriptorsTreeWidget::onDoubleClick(QTreeWidgetItem* _item, int _column) { if (!EASY_GLOBALS.connected) return; if (_column >= DESC_COL_TYPE && _item->parent() != nullptr) { auto item = static_cast<DescriptorsTreeItem*>(_item); auto& desc = easyDescriptor(item->desc()); desc.setStatus(nextStatus(desc.status())); item->setText(DESC_COL_STATUS, statusText(desc.status())); item->setForeground(DESC_COL_STATUS, QColor::fromRgba(statusColor(desc.status()))); m_bLocked = true; emit EASY_GLOBALS.events.blockStatusChanged(desc.id(), desc.status()); m_bLocked = false; } } ////////////////////////////////////////////////////////////////////////// void DescriptorsTreeWidget::onCurrentItemChange(QTreeWidgetItem* _item, QTreeWidgetItem* _prev) { if (_item != nullptr) { if (::profiler_gui::is_max(EASY_GLOBALS.selected_block) && _item->parent() != nullptr) { const auto id = static_cast<DescriptorsTreeItem*>(_item)->desc(); if (EASY_GLOBALS.selected_block_id != id) { EASY_GLOBALS.selected_block_id = id; emit EASY_GLOBALS.events.selectedBlockIdChanged(id); } } } else if (::profiler_gui::is_max(EASY_GLOBALS.selected_block) && !::profiler_gui::is_max(EASY_GLOBALS.selected_block_id)) { ::profiler_gui::set_max(EASY_GLOBALS.selected_block_id); emit EASY_GLOBALS.events.selectedBlockIdChanged(EASY_GLOBALS.selected_block_id); } } ////////////////////////////////////////////////////////////////////////// void DescriptorsTreeWidget::onBlockStatusChangeClicked(bool _checked) { if (!_checked || !EASY_GLOBALS.connected) return; auto item = currentItem(); if (item == nullptr || item->parent() == nullptr) return; auto action = qobject_cast<QAction*>(sender()); if (action != nullptr) { auto& desc = easyDescriptor(static_cast<DescriptorsTreeItem*>(item)->desc()); desc.setStatus(static_cast<::profiler::EasyBlockStatus>(action->data().toUInt())); item->setText(DESC_COL_STATUS, statusText(desc.status())); item->setForeground(DESC_COL_STATUS, QColor::fromRgba(statusColor(desc.status()))); m_bLocked = true; emit EASY_GLOBALS.events.blockStatusChanged(desc.id(), desc.status()); m_bLocked = false; } } void DescriptorsTreeWidget::onBlockStatusChange(::profiler::block_id_t _id, ::profiler::EasyBlockStatus _status) { if (m_bLocked) return; auto item = m_items[_id]; if (item == nullptr) return; auto& desc = easyDescriptor(item->desc()); item->setText(DESC_COL_STATUS, statusText(desc.status())); item->setForeground(DESC_COL_STATUS, QColor::fromRgba(statusColor(desc.status()))); } ////////////////////////////////////////////////////////////////////////// void DescriptorsTreeWidget::resizeColumnsToContents() { for (int i = 0; i < DESC_COL_COLUMNS_NUMBER; ++i) resizeColumnToContents(i); } ////////////////////////////////////////////////////////////////////////// void DescriptorsTreeWidget::onSelectedBlockChange(uint32_t _block_index) { if (::profiler_gui::is_max(_block_index)) { setCurrentItem(nullptr); return; } auto item = m_items[easyBlocksTree(_block_index).node->id()]; if (item == nullptr) return; scrollToItem(item, QAbstractItemView::PositionAtCenter); setCurrentItem(item); } ////////////////////////////////////////////////////////////////////////// void DescriptorsTreeWidget::loadSettings() { QSettings settings(::profiler_gui::ORGANAZATION_NAME, ::profiler_gui::APPLICATION_NAME); settings.beginGroup("desc_tree_widget"); auto val = settings.value("searchColumn"); if (!val.isNull()) m_searchColumn = val.toInt(); settings.endGroup(); } void DescriptorsTreeWidget::saveSettings() { QSettings settings(::profiler_gui::ORGANAZATION_NAME, ::profiler_gui::APPLICATION_NAME); settings.beginGroup("desc_tree_widget"); settings.setValue("searchColumn", m_searchColumn); settings.endGroup(); } ////////////////////////////////////////////////////////////////////////// int DescriptorsTreeWidget::findNext(const QString& _str, Qt::MatchFlags _flags) { if (_str.isEmpty()) { resetSearch(); return 0; } const bool isNewSearch = (m_lastSearchColumn != m_searchColumn || m_lastSearch != _str); auto itemsList = findItems(_str, Qt::MatchContains | Qt::MatchRecursive | _flags, m_searchColumn); m_bCaseSensitiveSearch = _flags.testFlag(Qt::MatchCaseSensitive); if (!isNewSearch) { if (!itemsList.empty()) { bool stop = false; int i = 0; decltype(m_lastFound) next = nullptr; for (auto item : itemsList) { if (stop) { next = item; break; } stop = item == m_lastFound; ++i; } m_lastFound = next == nullptr ? itemsList.front() : next; m_lastFoundIndex = next == nullptr ? 0 : i; } else { m_lastFound = nullptr; m_lastFoundIndex = 0; } } else { m_lastSearchColumn = m_searchColumn; m_lastSearch = _str; m_lastFound = !itemsList.empty() ? itemsList.front() : nullptr; m_lastFoundIndex = 0; } if (m_lastFound != nullptr) { scrollToItem(m_lastFound, QAbstractItemView::PositionAtCenter); setCurrentItem(m_lastFound); } viewport()->update(); return itemsList.size(); } int DescriptorsTreeWidget::findPrev(const QString& _str, Qt::MatchFlags _flags) { if (_str.isEmpty()) { resetSearch(); return 0; } const bool isNewSearch = (m_lastSearchColumn != m_searchColumn || m_lastSearch != _str); auto itemsList = findItems(_str, Qt::MatchContains | Qt::MatchRecursive | _flags, m_searchColumn); m_bCaseSensitiveSearch = _flags.testFlag(Qt::MatchCaseSensitive); if (!isNewSearch) { if (!itemsList.empty()) { int i = 0; decltype(m_lastFound) prev = nullptr; for (auto item : itemsList) { if (item == m_lastFound) { --i; break; } prev = item; ++i; } m_lastFound = prev == nullptr ? itemsList.back() : prev; m_lastFoundIndex = prev == nullptr ? itemsList.length() - 1 : i; } else { m_lastFound = nullptr; m_lastFoundIndex = 0; } } else { m_lastSearchColumn = m_searchColumn; m_lastSearch = _str; if (!itemsList.empty()) { m_lastFound = itemsList.back(); m_lastFoundIndex = itemsList.length() - 1; } else { m_lastFound = nullptr; m_lastFoundIndex = 0; } } if (m_lastFound != nullptr) { scrollToItem(m_lastFound, QAbstractItemView::PositionAtCenter); setCurrentItem(m_lastFound); } viewport()->update(); return itemsList.size(); } ////////////////////////////////////////////////////////////////////////// BlockDescriptorsWidget::BlockDescriptorsWidget(QWidget* _parent) : Parent(_parent) , m_splitter(new QSplitter(Qt::Vertical, this)) , m_tree(new DescriptorsTreeWidget(this)) , m_values(new ArbitraryValuesWidget(this)) , m_searchBox(new QLineEdit(this)) , m_foundNumber(new QLabel(QStringLiteral("<font color=\"red\">0</font> matches"), this)) , m_searchButton(nullptr) , m_bCaseSensitiveSearch(false) { m_splitter->setHandleWidth(1); m_splitter->setContentsMargins(0, 0, 0, 0); m_splitter->addWidget(m_tree); m_splitter->addWidget(m_values); m_splitter->setStretchFactor(0, 1); m_splitter->setStretchFactor(1, 1); m_searchBox->setContentsMargins(5, 0, 0, 0); m_searchBox->setClearButtonEnabled(true); m_searchBox->setPlaceholderText("Search"); auto tb = new QToolBar(this); tb->setIconSize(applicationIconsSize()); auto refreshButton = tb->addAction(QIcon(imagePath("reload")), tr("Refresh blocks list")); refreshButton->setEnabled(EASY_GLOBALS.connected); refreshButton->setToolTip(tr("Refresh blocks list.\nConnection needed.")); connect(refreshButton, &QAction::triggered, &EASY_GLOBALS.events, &::profiler_gui::GlobalSignals::blocksRefreshRequired); auto menu = new QMenu(this); m_searchButton = menu->menuAction(); m_searchButton->setText("Find next"); m_searchButton->setIcon(QIcon(imagePath("find-next"))); m_searchButton->setData(true); connect(m_searchButton, &QAction::triggered, this, &This::findNext); auto actionGroup = new QActionGroup(this); actionGroup->setExclusive(true); auto a = new QAction(tr("Find next"), actionGroup); a->setCheckable(true); a->setChecked(true); connect(a, &QAction::triggered, this, &This::findNextFromMenu); menu->addAction(a); a = new QAction(tr("Find previous"), actionGroup); a->setCheckable(true); connect(a, &QAction::triggered, this, &This::findPrevFromMenu); menu->addAction(a); a = menu->addAction("Case sensitive"); a->setCheckable(true); a->setChecked(m_bCaseSensitiveSearch); connect(a, &QAction::triggered, [this](bool _checked){ m_bCaseSensitiveSearch = _checked; }); menu->addAction(a); QAction* caseSensitiveSwitch = a; menu->addSeparator(); auto headerItem = m_tree->headerItem(); actionGroup = new QActionGroup(this); actionGroup->setExclusive(true); for (int i = 0; i < DESC_COL_STATUS; ++i) { if (i == DESC_COL_TYPE) continue; a = new QAction(QStringLiteral("Search by ") + headerItem->text(i), actionGroup); a->setData(i); a->setCheckable(true); if (i == m_tree->searchColumn()) a->setChecked(true); connect(a, &QAction::triggered, this, &This::onSearchColumnChange); menu->addAction(a); } tb->addSeparator(); tb->addAction(m_searchButton); tb->addWidget(m_searchBox); auto searchbox = new QHBoxLayout(); searchbox->setContentsMargins(0, 0, 5, 0); searchbox->addWidget(tb); searchbox->addSpacing(5); searchbox->addWidget(m_foundNumber); searchbox->addStretch(100); auto lay = new QVBoxLayout(this); lay->setContentsMargins(1, 1, 1, 1); lay->addLayout(searchbox); lay->addWidget(m_splitter); connect(m_searchBox, &QLineEdit::returnPressed, this, &This::onSeachBoxReturnPressed); connect(m_searchBox, &QLineEdit::textChanged, this, &This::onSearchBoxTextChanged); connect(m_tree, &DescriptorsTreeWidget::searchColumnChanged, this, &This::onSearchColumnChanged); connect(&EASY_GLOBALS.events, &::profiler_gui::GlobalSignals::connectionChanged, refreshButton, &QAction::setEnabled); connect(&EASY_GLOBALS.events, &profiler_gui::GlobalSignals::allDataGoingToBeDeleted, this, &This::clear); connect(&EASY_GLOBALS.events, &profiler_gui::GlobalSignals::fileOpened, this, &This::build); loadSettings(); caseSensitiveSwitch->setChecked(m_bCaseSensitiveSearch); onSearchColumnChanged(m_tree->searchColumn()); m_foundNumber->hide(); } BlockDescriptorsWidget::~BlockDescriptorsWidget() { saveSettings(); } void BlockDescriptorsWidget::loadSettings() { QSettings settings(::profiler_gui::ORGANAZATION_NAME, ::profiler_gui::APPLICATION_NAME); settings.beginGroup("BlockDescriptorsWidget"); auto val = settings.value("case_sensitive"); if (!val.isNull()) m_bCaseSensitiveSearch = val.toBool(); auto geometry = settings.value("vsplitter/geometry").toByteArray(); if (!geometry.isEmpty()) m_splitter->restoreGeometry(geometry); auto state = settings.value("vsplitter/state").toByteArray(); if (!state.isEmpty()) m_splitter->restoreState(state); settings.endGroup(); } void BlockDescriptorsWidget::saveSettings() { QSettings settings(::profiler_gui::ORGANAZATION_NAME, ::profiler_gui::APPLICATION_NAME); settings.beginGroup("BlockDescriptorsWidget"); settings.setValue("case_sensitive", m_bCaseSensitiveSearch); settings.setValue("vsplitter/geometry", m_splitter->saveGeometry()); settings.setValue("vsplitter/state", m_splitter->saveState()); settings.endGroup(); } void BlockDescriptorsWidget::keyPressEvent(QKeyEvent* _event) { switch (_event->key()) { case Qt::Key_F3: { if (_event->modifiers() & Qt::ShiftModifier) findPrev(true); else findNext(true); break; } case Qt::Key_Escape: { m_searchBox->clear(); break; } default: break; } _event->accept(); } void BlockDescriptorsWidget::contextMenuEvent(QContextMenuEvent* _event) { m_tree->contextMenuEvent(_event); } void BlockDescriptorsWidget::showEvent(QShowEvent* event) { Parent::showEvent(event); m_searchBox->setFixedWidth(px(300)); } void BlockDescriptorsWidget::build() { m_tree->clearSilent(false); m_foundNumber->setText(QStringLiteral("<font color=\"red\">0</font> matches")); m_foundNumber->hide(); m_tree->build(); m_values->rebuild(); } void BlockDescriptorsWidget::clear() { m_tree->clearSilent(true); m_foundNumber->setText(QStringLiteral("<font color=\"red\">0</font> matches")); m_foundNumber->hide(); m_values->clear(); } ArbitraryValuesWidget* BlockDescriptorsWidget::dataViewer() const { return m_values; } void BlockDescriptorsWidget::onSeachBoxReturnPressed() { if (m_searchButton->data().toBool()) findNext(true); else findPrev(true); } void BlockDescriptorsWidget::onSearchBoxTextChanged(const QString& _text) { if (_text.isEmpty()) { m_foundNumber->hide(); m_tree->resetSearch(); } } void BlockDescriptorsWidget::onSearchColumnChanged(int column) { switch (column) { case DESC_COL_NAME: m_searchBox->setPlaceholderText("Search by name"); break; case DESC_COL_FILE_LINE: m_searchBox->setPlaceholderText("Search by filename"); break; default: m_searchBox->setPlaceholderText("Search"); break; } onSeachBoxReturnPressed(); } void BlockDescriptorsWidget::onSearchColumnChange(bool) { auto action = qobject_cast<QAction*>(sender()); if (action != nullptr) m_tree->setSearchColumn(action->data().toInt()); } void BlockDescriptorsWidget::findNext(bool) { auto text = m_searchBox->text(); if (text.isEmpty()) { if (m_foundNumber->isVisible()) m_foundNumber->hide(); m_tree->resetSearch(); return; } auto matches = m_tree->findNext(text, m_bCaseSensitiveSearch ? Qt::MatchCaseSensitive : Qt::MatchFlags()); if (matches == 0) { m_foundNumber->setText(QStringLiteral("<font color=\"red\">0</font> matches")); } else if (matches == 1) { m_foundNumber->setText(QStringLiteral("<font color=\"#f5f5f5\" style=\"background:#e040fb\">&nbsp;1&nbsp;</font> match")); } else { auto i = m_tree->lastFoundIndex() + 1; m_foundNumber->setText(QString("<font color=\"#f5f5f5\" style=\"background:#e040fb\">&nbsp;%1&nbsp;</font> of " "<font style=\"background:#ffeb3b\">&nbsp;%2&nbsp;</font> matches") .arg(i).arg(matches)); } if (!m_foundNumber->isVisible()) m_foundNumber->show(); } void BlockDescriptorsWidget::findPrev(bool) { auto text = m_searchBox->text(); if (text.isEmpty()) { if (m_foundNumber->isVisible()) m_foundNumber->hide(); m_tree->resetSearch(); return; } auto matches = m_tree->findPrev(text, m_bCaseSensitiveSearch ? Qt::MatchCaseSensitive : Qt::MatchFlags()); if (matches == 0) { m_foundNumber->setText(QStringLiteral("<font color=\"red\">0</font> matches")); } else if (matches == 1) { m_foundNumber->setText(QStringLiteral("<font color=\"#f5f5f5\" style=\"background:#e040fb\">&nbsp;1&nbsp;</font> match")); } else { auto i = m_tree->lastFoundIndex() + 1; m_foundNumber->setText(QString("<font color=\"#f5f5f5\" style=\"background:#e040fb\">&nbsp;%1&nbsp;</font> of " "<font style=\"background:#ffeb3b\">&nbsp;%2&nbsp;</font> matches") .arg(i).arg(matches)); } if (!m_foundNumber->isVisible()) m_foundNumber->show(); } void BlockDescriptorsWidget::findNextFromMenu(bool _checked) { if (!_checked) return; if (!m_searchButton->data().toBool()) { m_searchButton->setData(true); m_searchButton->setText(tr("Find next")); m_searchButton->setIcon(QIcon(imagePath("find-next"))); disconnect(m_searchButton, &QAction::triggered, this, &This::findPrev); connect(m_searchButton, &QAction::triggered, this, &This::findNext); } findNext(true); } void BlockDescriptorsWidget::findPrevFromMenu(bool _checked) { if (!_checked) return; if (m_searchButton->data().toBool()) { m_searchButton->setData(false); m_searchButton->setText(tr("Find prev")); m_searchButton->setIcon(QIcon(imagePath("find-prev"))); disconnect(m_searchButton, &QAction::triggered, this, &This::findNext); connect(m_searchButton, &QAction::triggered, this, &This::findPrev); } findPrev(true); } ////////////////////////////////////////////////////////////////////////// DescWidgetItemDelegate::DescWidgetItemDelegate(DescriptorsTreeWidget* parent) : QStyledItemDelegate(parent) , m_treeWidget(parent) { } DescWidgetItemDelegate::~DescWidgetItemDelegate() { } void DescWidgetItemDelegate::paint(QPainter* painter, const QStyleOptionViewItem& option, const QModelIndex& index) const { QStyledItemDelegate::paint(painter, option, index); highlightMatchingText(painter, option, index); } void DescWidgetItemDelegate::highlightMatchingText( QPainter* painter, const QStyleOptionViewItem& option, const QModelIndex& index ) const { if (m_treeWidget->lastFoundItem() != nullptr && !m_treeWidget->searchString().isEmpty()) { // Highlight matching word auto displayData = m_treeWidget->model()->data(index); if (displayData.canConvert<QString>()) { const auto text = displayData.toString(); const auto caseSensitivity = m_treeWidget->caseSensitiveSearch() ? Qt::CaseSensitive : Qt::CaseInsensitive; if (text.contains(m_treeWidget->searchString(), caseSensitivity)) { auto lastFoundIndex = m_treeWidget->indexFromItem(m_treeWidget->lastFoundItem(), index.column()); highlightMatchingText( painter, option, text, m_treeWidget->searchString(), caseSensitivity, lastFoundIndex == index ); } } } } void DescWidgetItemDelegate::highlightMatchingText( QPainter* painter, const QStyleOptionViewItem& option, const QString& text, const QString& pattern, Qt::CaseSensitivity caseSensitivity, bool current ) const { const auto str = pattern.toStdString(); (void)str; QTextDocument doc; doc.setDefaultFont(painter->font()); auto textOption = doc.defaultTextOption(); textOption.setWrapMode(QTextOption::NoWrap); doc.setDefaultTextOption(textOption); doc.setTextWidth(option.rect.width()); const auto elidedText = painter->fontMetrics().elidedText(text, Qt::ElideRight, option.rect.width()); doc.setHtml(elidedText); TextHighlighter highlighter( &doc, painter->pen().color(), QColor::fromRgb(profiler::colors::Grey100), pattern, caseSensitivity, current ); painter->save(); #ifdef _WIN32 EASY_CONSTEXPR int fixed_padding_x = -1; EASY_CONSTEXPR int fixed_padding_y = 0; #else EASY_CONSTEXPR int fixed_padding_x = -1; EASY_CONSTEXPR int fixed_padding_y = -1; #endif auto dh = std::max((option.rect.height() - doc.size().height()) * 0.5, 0.); painter->translate(option.rect.left() + fixed_padding_x, option.rect.top() + dh + fixed_padding_y); QRect clip(0, 0, option.rect.width(), option.rect.height()); painter->setClipRect(clip); QAbstractTextDocumentLayout::PaintContext ctx; ctx.clip = clip; ctx.palette.setColor(QPalette::Text, Qt::transparent); doc.documentLayout()->draw(painter, ctx); painter->restore(); } //////////////////////////////////////////////////////////////////////////
18,723
4,756
<reponame>fantasyRqg/mace // Copyright 2020 The MACE Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef MICRO_BASE_LOGGER_H_ #define MICRO_BASE_LOGGER_H_ #include <stdint.h> namespace micro { enum LogLevel { CLEAN = 0, INFO = 1, WARNING = 2, ERROR = 3, FATAL = 4, INVALID_MAX, }; namespace base { class Logger { public: Logger(const char *fname, uint32_t line, LogLevel severity); ~Logger(); const Logger &operator<<(const char *str) const; const Logger &operator<<(const char c) const; const Logger &operator<<(const float value) const; const Logger &operator<<(const int64_t value) const; const Logger &operator<<(const int32_t value) const; const Logger &operator<<(const uint32_t value) const; const Logger &operator<<(const int16_t value) const; const Logger &operator<<(const uint16_t value) const; const Logger &operator<<(const int8_t value) const; const Logger &operator<<(const uint8_t value) const; const Logger &operator<<(const bool value) const; private: LogLevel severity_; }; } // namespace base } // namespace micro #endif // MICRO_BASE_LOGGER_H_
535
4,216
/** * @file core/util/binding_details.hpp * @author <NAME> * * This defines the structure that holds documentation details for bindings. * * mlpack is free software; you may redistribute it and/or modify it under the * terms of the 3-clause BSD license. You should have received a copy of the * 3-clause BSD license along with mlpack. If not, see * http://www.opensource.org/licenses/BSD-3-Clause for more information. */ #ifndef MLPACK_CORE_UTIL_BINDING_DETAILS_HPP #define MLPACK_CORE_UTIL_BINDING_DETAILS_HPP #include <mlpack/prereqs.hpp> #include "program_doc.hpp" namespace mlpack { namespace util { /** * This structure holds all of the information about bindings documentation. */ struct BindingDetails { //! User-friendly name of the binding. std::string name; //! A short two-sentence description of the binding, what it does, and what //! it is useful for. std::string shortDescription; //! Long string containing documentation on what it is. No newline characters //! are necessary; this is taken care of by IO later. std::function<std::string()> longDescription; //! Documentation on how to use the binding. std::vector<std::function<std::string()>> example; //! A set of pairs of strings with useful "see also" information; each pair //! is <description, url>. std::vector<std::pair<std::string, std::string>> seeAlso; }; } // namespace util } // namespace mlpack #endif
440
1,388
// Emacs style mode select -*- C++ -*- //----------------------------------------------------------------------------- // // $Id:$ // // Copyright (C) 1993-1996 by id Software, Inc. // // This source is available for distribution and/or modification // only under the terms of the DOOM Source Code License as // published by id Software. All rights reserved. // // The source is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // FITNESS FOR A PARTICULAR PURPOSE. See the DOOM Source Code License // for more details. // // DESCRIPTION: // Setup a game, startup stuff. // //----------------------------------------------------------------------------- #ifndef __P_SETUP__ #define __P_SETUP__ #ifdef __GNUG__ #pragma interface #endif // NOT called by W_Ticker. Fixme. void P_SetupLevel ( int episode, int map, int playermask, skill_t skill); // Called by startup code. void P_Init (void); #endif //----------------------------------------------------------------------------- // // $Log:$ // //-----------------------------------------------------------------------------
295
4,538
/* * Copyright (C) 2015-2020 Alibaba Group Holding Limited */ #ifndef AMP_TASK_H #define AMP_TASK_H #include <aos/kernel.h> #include <stdint.h> typedef void (*amp_engine_call_t)(void *data); typedef enum { AMP_TASK_MSG_CALLBACK = 0, /* default JSE callback */ AMP_TASK_MSG_EXIT = 1, AMP_TASK_MSG_TYPE_MAX } amp_task_msg_type_t; typedef enum { AMP_TIMER_ONCE = 0, /* one shot timer */ AMP_TIMER_REPEAT /* repeat timer */ } amp_timer_type_t; typedef struct { amp_task_msg_type_t type; amp_engine_call_t callback; void *param; } amp_task_msg_t; int32_t amp_task_schedule_call(amp_engine_call_t call, void *arg); /** * * JSEngine task initialization * */ int32_t amp_task_init(void); /** * JSEngine yield task, for asynchronous event process * */ int32_t amp_task_yield(uint32_t timeout); aos_timer_t *amp_task_timer_action(uint32_t ms, amp_engine_call_t action, void *arg, amp_timer_type_t type, void **timer_msg); int32_t amp_task_exit_call(amp_engine_call_t call, void *arg); void amp_module_free(void); int32_t amp_module_free_register(void (*callback)(void)); void amp_task_main(); int32_t amp_task_deinit(); #endif /* AMP_TASK_H */
545
1,068
<filename>md5/md5assert.h<gh_stars>1000+ #ifndef __ASSERT_H__ #define __ASSERT_H__ #define _str(x) #x #define str(x) _str(x) #ifndef NDEBUG extern VOID NORETURN _assert(LPCTSTR, UINT, LPCTSTR); #define assert(x) ((x) ? SetLastErrorEx(0, SLE_WARNING) : _assert(TEXT(__FILE__), __LINE__, TEXT(#x))) #else #define assert(x) ((void) (x)) #endif #endif
188
918
<gh_stars>100-1000 /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.converter.objectstore; import org.apache.avro.Schema; import org.apache.avro.generic.GenericRecord; import org.apache.avro.util.Utf8; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Charsets; import com.google.common.base.Optional; import com.google.common.base.Preconditions; import com.google.common.primitives.Ints; import com.google.common.primitives.Longs; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.converter.DataConversionException; import org.apache.gobblin.converter.SingleRecordIterable; import org.apache.gobblin.util.AvroUtils; import org.apache.gobblin.writer.objectstore.ObjectStoreDeleteOperation; import org.apache.gobblin.writer.objectstore.ObjectStoreOperationBuilder; /** * A converter to build {@link ObjectStoreDeleteOperation}s using an Avro {@link GenericRecord}. The object id field in * input avro record can be set using {@link #OBJECT_ID_FIELD}. The field name is a required property. * * Supports objectIdField schema types string, int, long and bytes. * */ @Alpha public class ObjectStoreDeleteConverter extends ObjectStoreConverter<Schema, GenericRecord, ObjectStoreDeleteOperation> { @VisibleForTesting public static final String OBJECT_ID_FIELD = "gobblin.converter.objectstore.delete.objectIdField"; private String objectIdField; @Override public ObjectStoreDeleteConverter init(WorkUnitState workUnit) { Preconditions.checkArgument(workUnit.contains(OBJECT_ID_FIELD), String.format("%s is a required property. ", OBJECT_ID_FIELD)); this.objectIdField = workUnit.getProp(OBJECT_ID_FIELD); return this; } @Override public Iterable<ObjectStoreDeleteOperation> convertRecord(Class<?> outputSchema, GenericRecord inputRecord, WorkUnitState workUnit) throws DataConversionException { Optional<Object> fieldValue = AvroUtils.getFieldValue(inputRecord, this.objectIdField); byte[] objectId; if (fieldValue.isPresent()) { if (fieldValue.get() instanceof Utf8) { objectId = ((Utf8) fieldValue.get()).getBytes(); } else if (fieldValue.get() instanceof String) { objectId = ((String) fieldValue.get()).getBytes(Charsets.UTF_8); } else if (fieldValue.get() instanceof Long) { objectId = Longs.toByteArray((Long) fieldValue.get()); } else if (fieldValue.get() instanceof Integer) { objectId = Ints.toByteArray((Integer) fieldValue.get()); } else { objectId = (byte[]) fieldValue.get(); } return new SingleRecordIterable<ObjectStoreDeleteOperation>(ObjectStoreOperationBuilder.deleteBuilder() .withObjectId(objectId).build()); } else { throw new DataConversionException(String.format("Object Id field %s not found in record %s", this.objectIdField, inputRecord)); } } }
1,172
320
// Copyright 2020 <NAME> // // The contents of this file may be used under the terms of // the Apache License v2.0 with LLVM Exceptions. // // (See accompanying file LICENSE-Apache or copy at // https://llvm.org/foundation/relicensing/LICENSE.txt) // // Alternatively, the contents of this file may be used under the terms of // the Boost Software License, Version 1.0. // (See accompanying file LICENSE-Boost or copy at // https://www.boost.org/LICENSE_1_0.txt) // // Unless required by applicable law or agreed to in writing, this software // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. #include "dragonbox/dragonbox_to_chars.h" #include "random_float.h" #include "ryu/ryu.h" #include <iostream> #include <string_view> template <class Float, class TypenameString> static bool uniform_random_test(std::size_t number_of_tests, TypenameString&& type_name_string) { char buffer1[64]; char buffer2[64]; auto rg = generate_correctly_seeded_mt19937_64(); bool success = true; for (std::size_t test_idx = 0; test_idx < number_of_tests; ++test_idx) { auto x = uniformly_randomly_generate_general_float<Float>(rg); // Check if the output is identical to that of Ryu jkj::dragonbox::to_chars(x, buffer1); if constexpr (std::is_same_v<Float, float>) { f2s_buffered(x, buffer2); } else { d2s_buffered(x, buffer2); } std::string_view view1(buffer1); std::string_view view2(buffer2); if (view1 != view2) { std::cout << "Error detected! [Ryu = " << buffer2 << ", Dragonbox = " << buffer1 << "]\n"; success = false; } } if (success) { std::cout << "Uniform random test for " << type_name_string << " with " << number_of_tests << " examples succeeded.\n"; } return success; } int main() { constexpr bool run_float = true; constexpr std::size_t number_of_uniform_random_tests_float = 10000000; constexpr bool run_double = true; constexpr std::size_t number_of_uniform_random_tests_double = 10000000; bool success = true; if constexpr (run_float) { std::cout << "[Testing uniformly randomly generated float inputs...]\n"; success &= uniform_random_test<float>(number_of_uniform_random_tests_float, "float"); std::cout << "Done.\n\n\n"; } if constexpr (run_double) { std::cout << "[Testing uniformly randomly generated double inputs...]\n"; success &= uniform_random_test<double>(number_of_uniform_random_tests_double, "double"); std::cout << "Done.\n\n\n"; } if (!success) { return -1; } }
920
412
/* Copyright (c) 2011, pGina Team All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the pGina Team nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include <Windows.h> #include <pGinaNativeLib.h> #include "DialogBase.h" #include "resource.h" namespace pGina { namespace GINA { class DialogLoggedOutSAS : public DialogBase { public: typedef enum DialogResult { SAS_ACTION_LOGON = WLX_SAS_ACTION_LOGON, SAS_ACTION_NONE = WLX_SAS_ACTION_NONE, SAS_ACTION_LOCK_WKSTA = WLX_SAS_ACTION_LOCK_WKSTA, SAS_ACTION_LOGOFF = WLX_SAS_ACTION_LOGOFF, SAS_ACTION_SHUTDOWN = WLX_SAS_ACTION_SHUTDOWN, SAS_ACTION_PWD_CHANGED = WLX_SAS_ACTION_PWD_CHANGED, SAS_ACTION_TASKLIST = WLX_SAS_ACTION_TASKLIST, SAS_ACTION_UNLOCK_WKSTA = WLX_SAS_ACTION_UNLOCK_WKSTA, SAS_ACTION_FORCE_LOGOFF = WLX_SAS_ACTION_FORCE_LOGOFF, SAS_ACTION_SHUTDOWN_POWER_OFF = WLX_SAS_ACTION_SHUTDOWN_POWER_OFF, SAS_ACTION_SHUTDOWN_REBOOT = WLX_SAS_ACTION_SHUTDOWN_REBOOT, SAS_ACTION_SHUTDOWN_SLEEP = WLX_SAS_ACTION_SHUTDOWN_SLEEP, SAS_ACTION_SHUTDOWN_SLEEP2 = WLX_SAS_ACTION_SHUTDOWN_SLEEP2, SAS_ACTION_SHUTDOWN_HIBERNATE = WLX_SAS_ACTION_SHUTDOWN_HIBERNATE, SAS_ACTION_RECONNECTED = WLX_SAS_ACTION_RECONNECTED, SAS_ACTION_DELAYED_FORCE_LOGOFF = WLX_SAS_ACTION_DELAYED_FORCE_LOGOFF, SAS_ACTION_SWITCH_CONSOLE = WLX_SAS_ACTION_SWITCH_CONSOLE, SAS_ACTION_MIN = SAS_ACTION_LOGON, SAS_ACTION_MAX = SAS_ACTION_SWITCH_CONSOLE }; public: DialogLoggedOutSAS(WinlogonInterface *iface) : DialogBase(iface, IDD_LOGGEDOUT_SAS), m_bitmap(NULL), m_statusTimerId(0) { } virtual void DialogInit(); virtual bool Command(int itemId); virtual bool Timer(int timerId); virtual INT_PTR DialogProcImpl(UINT msg, WPARAM wparam, LPARAM lparam); std::wstring Username() { return m_username; } void Username(std::wstring const& v) { m_username = v; } std::wstring Password() { return m_password; } void Password(std::wstring const& v) { m_password = v; } private: void ApplyLogoImage(); void SetServiceStatus(); private: std::wstring m_username; std::wstring m_password; HBITMAP m_bitmap; int m_statusTimerId; }; } }
1,645
2,151
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/page_load_metrics/metrics_web_contents_observer.h" #include <algorithm> #include <ostream> #include <string> #include <utility> #include "base/location.h" #include "base/memory/ptr_util.h" #include "base/metrics/histogram_macros.h" #include "chrome/browser/page_load_metrics/browser_page_track_decider.h" #include "chrome/browser/page_load_metrics/page_load_metrics_embedder_interface.h" #include "chrome/browser/page_load_metrics/page_load_metrics_update_dispatcher.h" #include "chrome/browser/page_load_metrics/page_load_metrics_util.h" #include "chrome/browser/page_load_metrics/page_load_tracker.h" #include "chrome/browser/prerender/prerender_contents.h" #include "chrome/common/page_load_metrics/page_load_timing.h" #include "content/public/browser/browser_thread.h" #include "content/public/browser/global_request_id.h" #include "content/public/browser/navigation_details.h" #include "content/public/browser/navigation_handle.h" #include "content/public/browser/render_frame_host.h" #include "content/public/browser/render_view_host.h" #include "content/public/browser/web_contents.h" #include "content/public/browser/web_contents_observer.h" #include "content/public/browser/web_contents_user_data.h" #include "net/base/net_errors.h" #include "ui/base/page_transition_types.h" DEFINE_WEB_CONTENTS_USER_DATA_KEY( page_load_metrics::MetricsWebContentsObserver); namespace page_load_metrics { namespace { content::RenderFrameHost* GetMainFrame(content::RenderFrameHost* rfh) { // Don't use rfh->GetRenderViewHost()->GetMainFrame() here because // RenderViewHost is being deprecated and because in OOPIF, // RenderViewHost::GetMainFrame() returns nullptr for child frames hosted in a // different process from the main frame. while (rfh->GetParent() != nullptr) rfh = rfh->GetParent(); return rfh; } UserInitiatedInfo CreateUserInitiatedInfo( content::NavigationHandle* navigation_handle, PageLoadTracker* committed_load) { if (!navigation_handle->IsRendererInitiated()) return UserInitiatedInfo::BrowserInitiated(); return UserInitiatedInfo::RenderInitiated( navigation_handle->HasUserGesture(), committed_load && committed_load->input_tracker()->FindAndConsumeInputEventsBefore( navigation_handle->NavigationStart())); } } // namespace MetricsWebContentsObserver::MetricsWebContentsObserver( content::WebContents* web_contents, std::unique_ptr<PageLoadMetricsEmbedderInterface> embedder_interface) : content::WebContentsObserver(web_contents), in_foreground_(web_contents->GetVisibility() != content::Visibility::HIDDEN), embedder_interface_(std::move(embedder_interface)), has_navigated_(false), page_load_metrics_binding_(web_contents, this) { // Prerenders erroneously report that they are initially visible, so we // manually override visibility state for prerender. const bool is_prerender = prerender::PrerenderContents::FromWebContents(web_contents) != nullptr; if (is_prerender) in_foreground_ = false; RegisterInputEventObserver(web_contents->GetRenderViewHost()); } // static MetricsWebContentsObserver* MetricsWebContentsObserver::CreateForWebContents( content::WebContents* web_contents, std::unique_ptr<PageLoadMetricsEmbedderInterface> embedder_interface) { DCHECK(web_contents); MetricsWebContentsObserver* metrics = FromWebContents(web_contents); if (!metrics) { metrics = new MetricsWebContentsObserver(web_contents, std::move(embedder_interface)); web_contents->SetUserData(UserDataKey(), base::WrapUnique(metrics)); } return metrics; } MetricsWebContentsObserver::~MetricsWebContentsObserver() {} void MetricsWebContentsObserver::WebContentsWillSoonBeDestroyed() { web_contents_will_soon_be_destroyed_ = true; } void MetricsWebContentsObserver::WebContentsDestroyed() { // TODO(csharrison): Use a more user-initiated signal for CLOSE. NotifyPageEndAllLoads(END_CLOSE, UserInitiatedInfo::NotUserInitiated()); // We tear down PageLoadTrackers in WebContentsDestroyed, rather than in the // destructor, since |web_contents()| returns nullptr in the destructor, and // PageLoadMetricsObservers can cause code to execute that wants to be able to // access the current WebContents. committed_load_ = nullptr; provisional_loads_.clear(); aborted_provisional_loads_.clear(); for (auto& observer : testing_observers_) observer.OnGoingAway(); } void MetricsWebContentsObserver::RegisterInputEventObserver( content::RenderViewHost* host) { if (host != nullptr) host->GetWidget()->AddInputEventObserver(this); } void MetricsWebContentsObserver::UnregisterInputEventObserver( content::RenderViewHost* host) { if (host != nullptr) host->GetWidget()->RemoveInputEventObserver(this); } void MetricsWebContentsObserver::RenderViewHostChanged( content::RenderViewHost* old_host, content::RenderViewHost* new_host) { UnregisterInputEventObserver(old_host); RegisterInputEventObserver(new_host); } void MetricsWebContentsObserver::MediaStartedPlaying( const content::WebContentsObserver::MediaPlayerInfo& video_type, const content::WebContentsObserver::MediaPlayerId& id) { content::RenderFrameHost* render_frame_host = id.first; if (GetMainFrame(render_frame_host) != web_contents()->GetMainFrame()) { // Ignore media that starts playing in a document that was navigated away // from. return; } if (committed_load_) committed_load_->MediaStartedPlaying( video_type, render_frame_host == web_contents()->GetMainFrame()); } void MetricsWebContentsObserver::WillStartNavigationRequest( content::NavigationHandle* navigation_handle) { // Same-document navigations should never go through // WillStartNavigationRequest. DCHECK(!navigation_handle->IsSameDocument()); if (!navigation_handle->IsInMainFrame()) return; UserInitiatedInfo user_initiated_info( CreateUserInitiatedInfo(navigation_handle, committed_load_.get())); std::unique_ptr<PageLoadTracker> last_aborted = NotifyAbortedProvisionalLoadsNewNavigation(navigation_handle, user_initiated_info); int chain_size_same_url = 0; int chain_size = 0; if (last_aborted) { if (last_aborted->MatchesOriginalNavigation(navigation_handle)) { chain_size_same_url = last_aborted->aborted_chain_size_same_url() + 1; } else if (last_aborted->aborted_chain_size_same_url() > 0) { LogAbortChainSameURLHistogram( last_aborted->aborted_chain_size_same_url()); } chain_size = last_aborted->aborted_chain_size() + 1; } if (!ShouldTrackNavigation(navigation_handle)) return; // Pass in the last committed url to the PageLoadTracker. If the MWCO has // never observed a committed load, use the last committed url from this // WebContent's opener. This is more accurate than using referrers due to // referrer sanitizing and origin referrers. Note that this could potentially // be inaccurate if the opener has since navigated. content::RenderFrameHost* opener = web_contents()->GetOpener(); const GURL& opener_url = !has_navigated_ && opener ? opener->GetLastCommittedURL() : GURL::EmptyGURL(); const GURL& currently_committed_url = committed_load_ ? committed_load_->url() : opener_url; has_navigated_ = true; // We can have two provisional loads in some cases. E.g. a same-site // navigation can have a concurrent cross-process navigation started // from the omnibox. DCHECK_GT(2ul, provisional_loads_.size()); // Passing raw pointers to observers_ and embedder_interface_ is safe because // the MetricsWebContentsObserver owns them both list and they are torn down // after the PageLoadTracker. The PageLoadTracker does not hold on to // committed_load_ or navigation_handle beyond the scope of the constructor. auto insertion_result = provisional_loads_.insert(std::make_pair( navigation_handle, std::make_unique<PageLoadTracker>( in_foreground_, embedder_interface_.get(), currently_committed_url, navigation_handle, user_initiated_info, chain_size, chain_size_same_url))); DCHECK(insertion_result.second) << "provisional_loads_ already contains NavigationHandle."; for (auto& observer : testing_observers_) observer.OnTrackerCreated(insertion_result.first->second.get()); } void MetricsWebContentsObserver::WillProcessNavigationResponse( content::NavigationHandle* navigation_handle) { auto it = provisional_loads_.find(navigation_handle); if (it == provisional_loads_.end()) return; it->second->WillProcessNavigationResponse(navigation_handle); } PageLoadTracker* MetricsWebContentsObserver::GetTrackerOrNullForRequest( const content::GlobalRequestID& request_id, content::RenderFrameHost* render_frame_host_or_null, content::ResourceType resource_type, base::TimeTicks creation_time) { if (resource_type == content::RESOURCE_TYPE_MAIN_FRAME) { DCHECK(request_id != content::GlobalRequestID()); // The main frame request can complete either before or after commit, so we // look at both provisional loads and the committed load to find a // PageLoadTracker with a matching request id. See https://goo.gl/6TzCYN for // more details. for (const auto& kv : provisional_loads_) { PageLoadTracker* candidate = kv.second.get(); if (candidate->HasMatchingNavigationRequestID(request_id)) { return candidate; } } if (committed_load_ && committed_load_->HasMatchingNavigationRequestID(request_id)) { return committed_load_.get(); } } else { // Non main frame resources are always associated with the currently // committed load. If the resource request was started before this // navigation then it should be ignored. if (!committed_load_ || creation_time < committed_load_->navigation_start()) return nullptr; // Sub-frame resources have a null RFH when browser-side navigation is // enabled, so we can't perform the RFH check below for them. // // TODO(bmcquade): consider tracking GlobalRequestIDs for sub-frame // navigations in each PageLoadTracker, and performing a lookup for // sub-frames similar to the main-frame lookup above. if (resource_type == content::RESOURCE_TYPE_SUB_FRAME) return committed_load_.get(); // There is a race here: a completed resource for the previously committed // page can arrive after the new page has committed. In this case, we may // attribute the resource to the wrong page load. We do our best to guard // against this by verifying that the RFH for the resource matches the RFH // for the currently committed load, however there are cases where the same // RFH is used across page loads (same origin navigations, as well as some // cross-origin render-initiated navigations). // // TODO(crbug.com/738577): use a DocumentId here instead, to eliminate this // race. DCHECK(render_frame_host_or_null != nullptr); content::RenderFrameHost* main_frame_for_resource = GetMainFrame(render_frame_host_or_null); if (main_frame_for_resource == web_contents()->GetMainFrame()) return committed_load_.get(); } return nullptr; } void MetricsWebContentsObserver::OnRequestComplete( const GURL& url, const net::HostPortPair& host_port_pair, int frame_tree_node_id, const content::GlobalRequestID& request_id, content::RenderFrameHost* render_frame_host_or_null, content::ResourceType resource_type, bool was_cached, std::unique_ptr<data_reduction_proxy::DataReductionProxyData> data_reduction_proxy_data, int64_t raw_body_bytes, int64_t original_content_length, base::TimeTicks creation_time, int net_error, std::unique_ptr<net::LoadTimingInfo> load_timing_info) { // Ignore non-HTTP(S) resources (blobs, data uris, etc). if (!url.SchemeIsHTTPOrHTTPS()) return; PageLoadTracker* tracker = GetTrackerOrNullForRequest( request_id, render_frame_host_or_null, resource_type, creation_time); if (tracker) { ExtraRequestCompleteInfo extra_request_complete_info( url, host_port_pair, frame_tree_node_id, was_cached, raw_body_bytes, was_cached ? 0 : original_content_length, std::move(data_reduction_proxy_data), resource_type, net_error, std::move(load_timing_info)); tracker->OnLoadedResource(extra_request_complete_info); } } const PageLoadExtraInfo MetricsWebContentsObserver::GetPageLoadExtraInfoForCommittedLoad() { DCHECK(committed_load_); return committed_load_->ComputePageLoadExtraInfo(); } void MetricsWebContentsObserver::DidFinishNavigation( content::NavigationHandle* navigation_handle) { if (!navigation_handle->IsInMainFrame()) { if (committed_load_ && navigation_handle->GetParentFrame() && GetMainFrame(navigation_handle->GetParentFrame()) == web_contents()->GetMainFrame()) { committed_load_->DidFinishSubFrameNavigation(navigation_handle); committed_load_->metrics_update_dispatcher()->DidFinishSubFrameNavigation( navigation_handle); } return; } std::unique_ptr<PageLoadTracker> finished_nav( std::move(provisional_loads_[navigation_handle])); provisional_loads_.erase(navigation_handle); // Ignore same-document navigations. if (navigation_handle->HasCommitted() && navigation_handle->IsSameDocument()) { if (finished_nav) finished_nav->StopTracking(); if (committed_load_) committed_load_->DidCommitSameDocumentNavigation(navigation_handle); return; } // Ignore internally generated aborts for navigations with HTTP responses that // don't commit, such as HTTP 204 responses and downloads. if (!navigation_handle->HasCommitted() && navigation_handle->GetNetErrorCode() == net::ERR_ABORTED && navigation_handle->GetResponseHeaders()) { if (finished_nav) finished_nav->StopTracking(); return; } const bool should_track = finished_nav && ShouldTrackNavigation(navigation_handle); if (finished_nav && !should_track) finished_nav->StopTracking(); if (navigation_handle->HasCommitted()) { UserInitiatedInfo user_initiated_info = finished_nav ? finished_nav->user_initiated_info() : CreateUserInitiatedInfo(navigation_handle, committed_load_.get()); // Notify other loads that they may have been aborted by this committed // load. is_certainly_browser_timestamp is set to false because // NavigationStart() could be set in either the renderer or browser process. NotifyPageEndAllLoadsWithTimestamp( EndReasonForPageTransition(navigation_handle->GetPageTransition()), user_initiated_info, navigation_handle->NavigationStart(), false); if (should_track) { HandleCommittedNavigationForTrackedLoad(navigation_handle, std::move(finished_nav)); } else { committed_load_.reset(); } } else if (should_track) { HandleFailedNavigationForTrackedLoad(navigation_handle, std::move(finished_nav)); } } // Handle a pre-commit error. Navigations that result in an error page will be // ignored. void MetricsWebContentsObserver::HandleFailedNavigationForTrackedLoad( content::NavigationHandle* navigation_handle, std::unique_ptr<PageLoadTracker> tracker) { const base::TimeTicks now = base::TimeTicks::Now(); tracker->FailedProvisionalLoad(navigation_handle, now); const net::Error error = navigation_handle->GetNetErrorCode(); // net::OK: This case occurs when the NavigationHandle finishes and reports // !HasCommitted(), but reports no net::Error. This should not occur // pre-PlzNavigate, but afterwards it should represent the navigation stopped // by the user before it was ready to commit. // net::ERR_ABORTED: An aborted provisional load has error net::ERR_ABORTED. const bool is_aborted_provisional_load = error == net::OK || error == net::ERR_ABORTED; // If is_aborted_provisional_load, the page end reason is not yet known, and // will be updated as additional information is available from subsequent // navigations. tracker->NotifyPageEnd( is_aborted_provisional_load ? END_OTHER : END_PROVISIONAL_LOAD_FAILED, UserInitiatedInfo::NotUserInitiated(), now, true); if (is_aborted_provisional_load) aborted_provisional_loads_.push_back(std::move(tracker)); } void MetricsWebContentsObserver::HandleCommittedNavigationForTrackedLoad( content::NavigationHandle* navigation_handle, std::unique_ptr<PageLoadTracker> tracker) { if (!IsNavigationUserInitiated(navigation_handle) && (navigation_handle->GetPageTransition() & ui::PAGE_TRANSITION_CLIENT_REDIRECT) != 0 && committed_load_) { // TODO(bmcquade): consider carrying the user_gesture bit forward to the // redirected navigation. committed_load_->NotifyClientRedirectTo(*tracker); } committed_load_ = std::move(tracker); committed_load_->Commit(navigation_handle); DCHECK(committed_load_->did_commit()); for (auto& observer : testing_observers_) observer.OnCommit(committed_load_.get()); } void MetricsWebContentsObserver::NavigationStopped() { // TODO(csharrison): Use a more user-initiated signal for STOP. NotifyPageEndAllLoads(END_STOP, UserInitiatedInfo::NotUserInitiated()); } void MetricsWebContentsObserver::OnInputEvent( const blink::WebInputEvent& event) { // Ignore browser navigation or reload which comes with type Undefined. if (event.GetType() == blink::WebInputEvent::Type::kUndefined) return; if (committed_load_) committed_load_->OnInputEvent(event); } void MetricsWebContentsObserver::FlushMetricsOnAppEnterBackground() { // Note that, while a call to FlushMetricsOnAppEnterBackground usually // indicates that the app is about to be backgrounded, there are cases where // the app may not end up getting backgrounded. Thus, we should not assume // anything about foreground / background state of the associated tab as part // of this method call. if (committed_load_) committed_load_->FlushMetricsOnAppEnterBackground(); for (const auto& kv : provisional_loads_) { kv.second->FlushMetricsOnAppEnterBackground(); } for (const auto& tracker : aborted_provisional_loads_) { tracker->FlushMetricsOnAppEnterBackground(); } } void MetricsWebContentsObserver::DidRedirectNavigation( content::NavigationHandle* navigation_handle) { if (!navigation_handle->IsInMainFrame()) return; auto it = provisional_loads_.find(navigation_handle); if (it == provisional_loads_.end()) return; it->second->Redirect(navigation_handle); } void MetricsWebContentsObserver::OnVisibilityChanged( content::Visibility visibility) { if (web_contents_will_soon_be_destroyed_) return; // TODO(bmcquade): Consider handling an OCCLUDED tab as not in foreground. bool was_in_foreground = in_foreground_; in_foreground_ = visibility != content::Visibility::HIDDEN; if (in_foreground_ == was_in_foreground) return; if (in_foreground_) { if (committed_load_) committed_load_->WebContentsShown(); for (const auto& kv : provisional_loads_) { kv.second->WebContentsShown(); } } else { if (committed_load_) committed_load_->WebContentsHidden(); for (const auto& kv : provisional_loads_) { kv.second->WebContentsHidden(); } } } // This will occur when the process for the main RenderFrameHost exits, either // normally or from a crash. We eagerly log data from the last committed load if // we have one. void MetricsWebContentsObserver::RenderProcessGone( base::TerminationStatus status) { // Other code paths will be run for normal renderer shutdown. Note that we // sometimes get the STILL_RUNNING value on fast shutdown. if (status == base::TERMINATION_STATUS_NORMAL_TERMINATION || status == base::TERMINATION_STATUS_STILL_RUNNING) { return; } // RenderProcessGone is associated with the render frame host for the // currently committed load. We don't know if the pending navs or aborted // pending navs are associated w/ the render process that died, so we can't be // sure the info should propagate to them. if (committed_load_) { committed_load_->NotifyPageEnd(END_RENDER_PROCESS_GONE, UserInitiatedInfo::NotUserInitiated(), base::TimeTicks::Now(), true); } // If this is a crash, eagerly log the aborted provisional loads and the // committed load. |provisional_loads_| don't need to be destroyed here // because their lifetime is tied to the NavigationHandle. committed_load_.reset(); aborted_provisional_loads_.clear(); } void MetricsWebContentsObserver::NotifyPageEndAllLoads( PageEndReason page_end_reason, UserInitiatedInfo user_initiated_info) { NotifyPageEndAllLoadsWithTimestamp(page_end_reason, user_initiated_info, base::TimeTicks::Now(), true); } void MetricsWebContentsObserver::NotifyPageEndAllLoadsWithTimestamp( PageEndReason page_end_reason, UserInitiatedInfo user_initiated_info, base::TimeTicks timestamp, bool is_certainly_browser_timestamp) { if (committed_load_) { committed_load_->NotifyPageEnd(page_end_reason, user_initiated_info, timestamp, is_certainly_browser_timestamp); } for (const auto& kv : provisional_loads_) { kv.second->NotifyPageEnd(page_end_reason, user_initiated_info, timestamp, is_certainly_browser_timestamp); } for (const auto& tracker : aborted_provisional_loads_) { if (tracker->IsLikelyProvisionalAbort(timestamp)) { tracker->UpdatePageEnd(page_end_reason, user_initiated_info, timestamp, is_certainly_browser_timestamp); } } aborted_provisional_loads_.clear(); } std::unique_ptr<PageLoadTracker> MetricsWebContentsObserver::NotifyAbortedProvisionalLoadsNewNavigation( content::NavigationHandle* new_navigation, UserInitiatedInfo user_initiated_info) { // If there are multiple aborted loads that can be attributed to this one, // just count the latest one for simplicity. Other loads will fall into the // OTHER bucket, though there shouldn't be very many. if (aborted_provisional_loads_.size() == 0) return nullptr; if (aborted_provisional_loads_.size() > 1) RecordInternalError(ERR_NAVIGATION_SIGNALS_MULIPLE_ABORTED_LOADS); std::unique_ptr<PageLoadTracker> last_aborted_load = std::move(aborted_provisional_loads_.back()); aborted_provisional_loads_.pop_back(); base::TimeTicks timestamp = new_navigation->NavigationStart(); if (last_aborted_load->IsLikelyProvisionalAbort(timestamp)) { last_aborted_load->UpdatePageEnd( EndReasonForPageTransition(new_navigation->GetPageTransition()), user_initiated_info, timestamp, false); } aborted_provisional_loads_.clear(); return last_aborted_load; } void MetricsWebContentsObserver::OnTimingUpdated( content::RenderFrameHost* render_frame_host, const mojom::PageLoadTiming& timing, const mojom::PageLoadMetadata& metadata, const mojom::PageLoadFeatures& new_features) { // We may receive notifications from frames that have been navigated away // from. We simply ignore them. if (GetMainFrame(render_frame_host) != web_contents()->GetMainFrame()) { RecordInternalError(ERR_IPC_FROM_WRONG_FRAME); return; } const bool is_main_frame = (render_frame_host->GetParent() == nullptr); if (is_main_frame) { // While timings arriving for the wrong frame are expected, we do not expect // any of the errors below for main frames. Thus, we track occurrences of // all errors below, rather than returning early after encountering an // error. bool error = false; if (!committed_load_) { RecordInternalError(ERR_IPC_WITH_NO_RELEVANT_LOAD); error = true; } if (!web_contents()->GetLastCommittedURL().SchemeIsHTTPOrHTTPS()) { RecordInternalError(ERR_IPC_FROM_BAD_URL_SCHEME); error = true; } if (error) return; } else if (!committed_load_) { RecordInternalError(ERR_SUBFRAME_IPC_WITH_NO_RELEVANT_LOAD); } if (committed_load_) { committed_load_->metrics_update_dispatcher()->UpdateMetrics( render_frame_host, timing, metadata, new_features); } } void MetricsWebContentsObserver::UpdateTiming( const mojom::PageLoadTimingPtr timing, const mojom::PageLoadMetadataPtr metadata, const mojom::PageLoadFeaturesPtr new_features) { content::RenderFrameHost* render_frame_host = page_load_metrics_binding_.GetCurrentTargetFrame(); OnTimingUpdated(render_frame_host, *timing, *metadata, *new_features); } bool MetricsWebContentsObserver::ShouldTrackNavigation( content::NavigationHandle* navigation_handle) const { DCHECK(navigation_handle->IsInMainFrame()); DCHECK(!navigation_handle->HasCommitted() || !navigation_handle->IsSameDocument()); return BrowserPageTrackDecider(embedder_interface_.get(), navigation_handle) .ShouldTrack(); } void MetricsWebContentsObserver::AddTestingObserver(TestingObserver* observer) { if (!testing_observers_.HasObserver(observer)) testing_observers_.AddObserver(observer); } void MetricsWebContentsObserver::RemoveTestingObserver( TestingObserver* observer) { testing_observers_.RemoveObserver(observer); } MetricsWebContentsObserver::TestingObserver::TestingObserver( content::WebContents* web_contents) : observer_(page_load_metrics::MetricsWebContentsObserver::FromWebContents( web_contents)) { observer_->AddTestingObserver(this); } MetricsWebContentsObserver::TestingObserver::~TestingObserver() { if (observer_) { observer_->RemoveTestingObserver(this); observer_ = nullptr; } } void MetricsWebContentsObserver::TestingObserver::OnGoingAway() { observer_ = nullptr; } void MetricsWebContentsObserver::BroadcastEventToObservers( const void* const event_key) { if (committed_load_) committed_load_->BroadcastEventToObservers(event_key); } } // namespace page_load_metrics
9,209
3,301
<filename>core/src/main/java/com/alibaba/alink/common/sql/builtin/agg/StddevSampUdaf.java<gh_stars>1000+ package com.alibaba.alink.common.sql.builtin.agg; public class StddevSampUdaf extends BaseSummaryUdaf { public StddevSampUdaf() { super(); } public StddevSampUdaf(boolean dropLast) { super(dropLast); } @Override public Number getValue(SummaryData accumulator) { return accumulator.getStdSamp(); } }
198
2,151
<gh_stars>1000+ // Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef MEDIA_FILTERS_VIDEO_CADENCE_ESTIMATOR_H_ #define MEDIA_FILTERS_VIDEO_CADENCE_ESTIMATOR_H_ #include <stddef.h> #include <stdint.h> #include <vector> #include "base/macros.h" #include "base/time/time.h" #include "media/base/media_export.h" namespace media { // Estimates whether a given frame duration and render interval length have a // render cadence which would allow for optimal uniformity of displayed frame // durations over time. // // Cadence is the ideal repeating frame pattern for a group of frames; currently // VideoCadenceEstimator supports N-frame ([a1:a2:..:aN]) cadences where N <= 5. // Details on what this means are below. // // The perfect cadence of a set of frames is the ratio of the frame duration to // render interval length. I.e. for 30fps in 60Hz the cadence would be (1/30) / // (1/60) = 60 / 30 = 2. It's common that this is not an exact integer, e.g., // 29.974fps in 60Hz which would have a cadence of (1/29.974) / (1/60) = // ~2.0029. // // The perfect cadence is always a real number. All N-cadences [a1:a2:..:aN] // where aK is an integer are an approximation of the perfect cadence; i.e. the // average of [a1:..:aN] will approximately equal the perfect cadence. When N=1 // we have a 1-frame cadence, when N=2, we have a 2-frame cadence, etc. // // For single frame cadence we just round the perfect cadence (~2.0029 in the // previous example) to the nearest integer value (2 in this case; which is // denoted as a cadence of [2]). If the delta between those values is small we // can choose to render frames for the integer number of render intervals; // shortening or lengthening the actual rendered frame duration. Doing so // ensures each frame gets an optimal amount of display time. // // For N-frame cadence, the idea is similar, we just round the perfect cadence // to some K/N, where K is an integer, and distribute [floor(K/N), floor(K/N)+1] // into the cadence vector as evenly as possible. For example, 23.97fps in // 60Hz, the perfect cadence is 2.50313, we can round it to 2.5 = 5/2, and we // can then construct the cadence vector as [2:3]. // // The delta between the perfect cadence and the rounded cadence leads to drift // over time of the actual VideoFrame timestamp relative to its rendered time, // so we perform some calculations to ensure we only use a cadence when it will // take some time to drift an undesirable amount; see CalculateCadence() for // details on how this calculation is made. // // In practice this works out to the following for common setups if we use // cadence based selection: // // 29.5fps in 60Hz, ~17ms max drift => exhausted in ~1 second. // 29.9fps in 60Hz, ~17ms max drift => exhausted in ~16.4 seconds. // 24fps in 59.9Hz, ~21ms max drift => exhausted in ~12.6 seconds. // 24.9fps in 60Hz, ~20ms max drift => exhausted in ~4.0 seconds. // 59.9fps in 60Hz, ~8.3ms max drift => exhausted in ~8.2 seconds. // 24.9fps in 50Hz, ~20ms max drift => exhausted in ~20.5 seconds. // 120fps in 59.9Hz, ~8.3ms max drift => exhausted in ~8.2 seconds. // class MEDIA_EXPORT VideoCadenceEstimator { public: using Cadence = std::vector<int>; // As mentioned in the introduction, the determination of whether to clamp to // a given cadence is based on how long it takes before a frame would have to // be dropped or repeated to compensate for reaching the maximum acceptable // drift; this time length is controlled by |minimum_time_until_max_drift|. explicit VideoCadenceEstimator(base::TimeDelta minimum_time_until_max_drift); ~VideoCadenceEstimator(); // Clears stored cadence information. void Reset(); // Updates the estimates for |cadence_| based on the given values as described // in the introduction above. // // Clients should call this and then update the cadence for all frames via the // GetCadenceForFrame() method if the cadence changes. // // Cadence changes will not take affect until enough render intervals have // elapsed. For the purposes of hysteresis, each UpdateCadenceEstimate() call // is assumed to elapse one |render_interval| worth of time. // // Returns true if the cadence has changed since the last call. bool UpdateCadenceEstimate(base::TimeDelta render_interval, base::TimeDelta frame_duration, base::TimeDelta frame_duration_deviation, base::TimeDelta max_acceptable_drift); // Returns true if a useful cadence was found. bool has_cadence() const { return !cadence_.empty(); } // Given a |frame_number|, where zero is the most recently rendered frame, // returns the ideal cadence for that frame. // // Note: Callers must track the base |frame_number| relative to all frames // rendered or removed after the first frame for which cadence is detected. // The first frame after cadence is detected has a |frame_number| of 0. // // Frames which come in before the last rendered frame should be ignored in // terms of impact to the base |frame_number|. int GetCadenceForFrame(uint64_t frame_number) const; void set_cadence_hysteresis_threshold_for_testing(base::TimeDelta threshold) { cadence_hysteresis_threshold_ = threshold; } size_t cadence_size_for_testing() const { return cadence_.size(); } std::string GetCadenceForTesting() const { return CadenceToString(cadence_); } private: // Attempts to find an N-frame cadence. Returns the cadence vector if cadence // is found and sets |time_until_max_drift| for the computed cadence. If // multiple cadences satisfying the max drift constraint exist, we are going // to return the one with largest |time_until_max_drift|. // For details on the math and algorithm, see https://goo.gl/QK0vbz Cadence CalculateCadence(base::TimeDelta render_interval, base::TimeDelta frame_duration, base::TimeDelta max_acceptable_drift, base::TimeDelta* time_until_max_drift) const; // Converts a cadence vector into a human readable string of the form // "[a: b: ...: z]". std::string CadenceToString(const Cadence& cadence) const; // The approximate best N-frame cadence for all frames seen thus far; updated // by UpdateCadenceEstimate(). Empty when no cadence has been detected. Cadence cadence_; // Used as hysteresis to prevent oscillation between cadence approximations // for spurious blips in the render interval or frame duration. // // Once a new cadence is detected, |render_intervals_cadence_held_| is // incremented for each UpdateCadenceEstimate() call where |cadence_| matches // |pending_cadence_|. |render_intervals_cadence_held_| is cleared when a // "new" cadence matches |cadence_| or |pending_cadence_|. // // Once |kMinimumCadenceDurationMs| is exceeded in render intervals, the // detected cadence is set in |cadence_|. Cadence pending_cadence_; int render_intervals_cadence_held_; base::TimeDelta cadence_hysteresis_threshold_; // Tracks how many times cadence has switched during a given playback, used to // histogram the number of cadence changes in a playback. bool first_update_call_; int cadence_changes_; // The minimum amount of time allowed before a glitch occurs before confirming // cadence for a given render interval and frame duration. const base::TimeDelta minimum_time_until_max_drift_; bool is_variable_frame_rate_; DISALLOW_COPY_AND_ASSIGN(VideoCadenceEstimator); }; } // namespace media #endif // MEDIA_FILTERS_VIDEO_CADENCE_ESTIMATOR_H_
2,495
938
package slimeknights.tconstruct.library.tools.capability; import net.minecraft.entity.Entity; import net.minecraft.entity.player.PlayerEntity; import net.minecraft.nbt.CompoundNBT; import net.minecraft.nbt.INBT; import net.minecraft.util.Direction; import net.minecraft.util.ResourceLocation; import net.minecraftforge.common.MinecraftForge; import net.minecraftforge.common.capabilities.Capability; import net.minecraftforge.common.capabilities.CapabilityInject; import net.minecraftforge.common.capabilities.CapabilityManager; import net.minecraftforge.common.capabilities.ICapabilitySerializable; import net.minecraftforge.common.util.Lazy; import net.minecraftforge.common.util.LazyOptional; import net.minecraftforge.event.AttachCapabilitiesEvent; import net.minecraftforge.event.entity.player.PlayerEvent; import net.minecraftforge.eventbus.api.EventPriority; import slimeknights.tconstruct.TConstruct; import slimeknights.tconstruct.common.network.SyncPersistentDataPacket; import slimeknights.tconstruct.common.network.TinkerNetwork; import slimeknights.tconstruct.library.tools.nbt.NamespacedNBT; import javax.annotation.Nonnull; import javax.annotation.Nullable; /** * Capability to store persistent NBT data on an entity. For players, this is automatically synced to the client on load, but not during gameplay. * Persists after death, will reassess if we need some data to not persist death */ public class PersistentDataCapability implements Capability.IStorage<NamespacedNBT> { private PersistentDataCapability() {} /** Capability ID */ private static final ResourceLocation ID = TConstruct.getResource("persistent_data"); /** Instance of the capability storage because forge requires it */ private static final PersistentDataCapability INSTANCE = new PersistentDataCapability(); /** Capability type */ @CapabilityInject(NamespacedNBT.class) public static Capability<NamespacedNBT> CAPABILITY = null; /** Registers this capability */ public static void register() { CapabilityManager.INSTANCE.register(NamespacedNBT.class, INSTANCE, NamespacedNBT::new); MinecraftForge.EVENT_BUS.addGenericListener(Entity.class, PersistentDataCapability::attachCapability); MinecraftForge.EVENT_BUS.addListener(EventPriority.NORMAL, false, PlayerEvent.Clone.class, PersistentDataCapability::playerClone); MinecraftForge.EVENT_BUS.addListener(EventPriority.NORMAL, false, PlayerEvent.PlayerRespawnEvent.class, PersistentDataCapability::playerRespawn); MinecraftForge.EVENT_BUS.addListener(EventPriority.NORMAL, false, PlayerEvent.PlayerChangedDimensionEvent.class, PersistentDataCapability::playerChangeDimension); MinecraftForge.EVENT_BUS.addListener(EventPriority.NORMAL, false, PlayerEvent.PlayerLoggedInEvent.class, PersistentDataCapability::playerLoggedIn); } /** Event listener to attach the capability */ private static void attachCapability(AttachCapabilitiesEvent<Entity> event) { if (event.getObject() instanceof PlayerEntity) { Provider provider = new Provider(); event.addCapability(ID, provider); event.addListener(provider); } } /** Syncs the data to the given player */ private static void sync(PlayerEntity player) { player.getCapability(CAPABILITY).ifPresent(data -> TinkerNetwork.getInstance().sendTo(new SyncPersistentDataPacket(data.getCopy()), player)); } /** copy caps when the player respawns/returns from the end */ private static void playerClone(PlayerEvent.Clone event) { event.getOriginal().getCapability(CAPABILITY).ifPresent(oldData -> { CompoundNBT nbt = oldData.getCopy(); if (!nbt.isEmpty()) { event.getPlayer().getCapability(CAPABILITY).ifPresent(newData -> newData.copyFrom(nbt)); } }); } /** sync caps when the player respawns/returns from the end */ private static void playerRespawn(PlayerEvent.PlayerRespawnEvent event) { sync(event.getPlayer()); } /** sync caps when the player changes dimensions */ private static void playerChangeDimension(PlayerEvent.PlayerChangedDimensionEvent event) { sync(event.getPlayer()); } /** sync caps when the player logs in */ private static void playerLoggedIn(PlayerEvent.PlayerLoggedInEvent event) { sync(event.getPlayer()); } /* Required methods */ @Nullable @Override public INBT writeNBT(Capability<NamespacedNBT> capability, NamespacedNBT instance, Direction side) { return null; } @Override public void readNBT(Capability<NamespacedNBT> capability, NamespacedNBT instance, Direction side, INBT nbt) {} /** Capability provider instance */ private static class Provider implements ICapabilitySerializable<CompoundNBT>, Runnable { private Lazy<CompoundNBT> nbt; private LazyOptional<NamespacedNBT> capability; private Provider() { this.nbt = Lazy.of(CompoundNBT::new); this.capability = LazyOptional.of(() -> NamespacedNBT.readFromNBT(nbt.get())); } @Nonnull @Override public <T> LazyOptional<T> getCapability(Capability<T> cap, @Nullable Direction side) { return CAPABILITY.orEmpty(cap, capability); } @Override public void run() { // called when capabilities invalidate, create a new cap just in case they are revived later capability.invalidate(); capability = LazyOptional.of(() -> NamespacedNBT.readFromNBT(nbt.get())); } @Override public CompoundNBT serializeNBT() { return nbt.get().copy(); } @Override public void deserializeNBT(CompoundNBT nbt) { this.nbt = Lazy.of(() -> nbt); run(); } } }
1,720
376
# Generated by Django 3.1 on 2019-09-22 20:53 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('good_flow_app', '0023_drop_index'), ] operations = [ migrations.AddIndex( model_name='testtable', index=models.Index( condition=models.Q(test_field_int__isnull=False), fields=['test_field_int'], name='test_index', ), ), ]
243
675
<reponame>clayne/cpuinfo #include <stdbool.h> #include <stdint.h> #include <stdlib.h> #include <stddef.h> #include <string.h> #include <sys/system_properties.h> #include <linux/api.h> #include <arm/android/api.h> #include <arm/linux/api.h> #include <cpuinfo/log.h> #if CPUINFO_MOCK #include <cpuinfo-mock.h> static struct cpuinfo_mock_property* cpuinfo_mock_properties = NULL; void CPUINFO_ABI cpuinfo_mock_android_properties(struct cpuinfo_mock_property* properties) { cpuinfo_log_info("Android properties mocking enabled"); cpuinfo_mock_properties = properties; } static int cpuinfo_android_property_get(const char* key, char* value) { if (cpuinfo_mock_properties != NULL) { for (const struct cpuinfo_mock_property* prop = cpuinfo_mock_properties; prop->key != NULL; prop++) { if (strncmp(key, prop->key, CPUINFO_BUILD_PROP_NAME_MAX) == 0) { strncpy(value, prop->value, CPUINFO_BUILD_PROP_VALUE_MAX); return (int) strnlen(prop->value, CPUINFO_BUILD_PROP_VALUE_MAX); } } } *value = '\0'; return 0; } #else static inline int cpuinfo_android_property_get(const char* key, char* value) { return __system_property_get(key, value); } #endif void cpuinfo_arm_android_parse_properties(struct cpuinfo_android_properties properties[restrict static 1]) { const int ro_product_board_length = cpuinfo_android_property_get("ro.product.board", properties->ro_product_board); cpuinfo_log_debug("read ro.product.board = \"%.*s\"", ro_product_board_length, properties->ro_product_board); const int ro_board_platform_length = cpuinfo_android_property_get("ro.board.platform", properties->ro_board_platform); cpuinfo_log_debug("read ro.board.platform = \"%.*s\"", ro_board_platform_length, properties->ro_board_platform); const int ro_mediatek_platform_length = cpuinfo_android_property_get("ro.mediatek.platform", properties->ro_mediatek_platform); cpuinfo_log_debug("read ro.mediatek.platform = \"%.*s\"", ro_mediatek_platform_length, properties->ro_mediatek_platform); const int ro_arch_length = cpuinfo_android_property_get("ro.arch", properties->ro_arch); cpuinfo_log_debug("read ro.arch = \"%.*s\"", ro_arch_length, properties->ro_arch); const int ro_chipname_length = cpuinfo_android_property_get("ro.chipname", properties->ro_chipname); cpuinfo_log_debug("read ro.chipname = \"%.*s\"", ro_chipname_length, properties->ro_chipname); const int ro_hardware_chipname_length = cpuinfo_android_property_get("ro.hardware.chipname", properties->ro_hardware_chipname); cpuinfo_log_debug("read ro.hardware.chipname = \"%.*s\"", ro_hardware_chipname_length, properties->ro_hardware_chipname); }
961
2,151
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "content/public/browser/web_contents_binding_set.h" #include <utility> #include "base/logging.h" #include "content/browser/web_contents/web_contents_impl.h" namespace content { void WebContentsBindingSet::Binder::OnRequestForFrame( RenderFrameHost* render_frame_host, mojo::ScopedInterfaceEndpointHandle handle) { NOTREACHED(); } WebContentsBindingSet::WebContentsBindingSet(WebContents* web_contents, const std::string& interface_name, std::unique_ptr<Binder> binder) : remove_callback_(static_cast<WebContentsImpl*>(web_contents) ->AddBindingSet(interface_name, this)), binder_(std::move(binder)) {} WebContentsBindingSet::~WebContentsBindingSet() { remove_callback_.Run(); } // static WebContentsBindingSet* WebContentsBindingSet::GetForWebContents( WebContents* web_contents, const char* interface_name) { return static_cast<WebContentsImpl*>(web_contents) ->GetBindingSet(interface_name); } void WebContentsBindingSet::CloseAllBindings() { binder_for_testing_.reset(); binder_.reset(); } void WebContentsBindingSet::OnRequestForFrame( RenderFrameHost* render_frame_host, mojo::ScopedInterfaceEndpointHandle handle) { if (binder_for_testing_) { binder_for_testing_->OnRequestForFrame(render_frame_host, std::move(handle)); return; } DCHECK(binder_); binder_->OnRequestForFrame(render_frame_host, std::move(handle)); } } // namespace content
700
515
<reponame>ntoussaint/CTK import qt if (_testWrappedQInvokableInstance.value() != 0): qt.QApplication.exit(1) _testWrappedQInvokableInstance.setValue(74) if (_testWrappedQInvokableInstance.value() != 74): qt.QApplication.exit(1) qt.QApplication.exit(0)
104
307
"""Geometry class for PlotOptiX raytracer. Basic geometry properties and interface to underlaying data buffers. https://github.com/rnd-team-dev/plotoptix/blob/master/LICENSE.txt Have a look at examples on GitHub: https://github.com/rnd-team-dev/plotoptix. """ from typing import Optional, Union from ctypes import byref, c_ubyte, c_float, c_uint, c_int, c_longlong import numpy as np from plotoptix.enums import GeomBuffer from plotoptix._load_lib import load_optix class GeometryMeta: _name = None """Unique name for the geometry object. """ _handle = None """Unique int handle for the geometry object. """ _size = 0 """Number of primitives or data points. """ def __init__(self, name: str, handle: int, size: int) -> None: """Geometry metadata for all mesh-less mesh based objects in the scene. Basic geometry properties and an interface to underlaying data buffers. """ self._optix = load_optix() self._name = name self._handle = handle self._size = size def _pin_buffer(self, buffer: Union[GeomBuffer, str]) -> Optional[np.ndarray]: if isinstance(buffer, str): buffer = GeomBuffer[buffer] c_buffer = c_longlong() c_shape = c_longlong() c_size = c_int() c_type = c_uint() if self._optix.pin_geometry_buffer( self._name, buffer.value, byref(c_buffer), byref(c_shape), byref(c_size), byref(c_type)): if c_type.value == 4: elem = c_float elif c_type.value == 3: elem = c_uint elif c_type.value == 2: elem = c_int elif c_type.value == 1: elem = c_ubyte else: msg = "Data type not supported." self._logger.error(msg) if self._raise_on_error: raise RuntimeError(msg) shape_buf = (c_int * c_size.value).from_address(c_shape.value) shape = np.ctypeslib.as_array(shape_buf) for s in shape: elem = elem * s return elem.from_address(c_buffer.value) else: msg = "Buffer not pinned." raise RuntimeError(msg) return None def _release_buffer(self, buffer: GeomBuffer) -> None: if isinstance(buffer, str): buffer = GeomBuffer[buffer] if not self._optix.unpin_geometry_buffer(self._name, buffer.value): msg = "Buffer not released." raise RuntimeError(msg) class PinnedBuffer: """Pins an internal buffer memory and exposes it as an ``np.ndarray``. Use only within the ``with`` block as in the provided example. The exposed array is not going out of scope nor is anyhow protected outside that expression due to the current limitations of the array interface; be careful and do not use the array outside ``with`` as memory can be reallocated. Parameters ---------- geom : GeometryMeta Geometry metadata for the object, available in :attr:`plotoptix.NpOptiX.geometry_data` dictionary. buffer : GeomBuffer or string Buffer kind to pin. Returns ------- out : ndarray Buffer data wrapped in ``np.ndarray``. Examples -------- >>> rt = NpOptiX() >>> rt.set_data("plot", xyz=np.random.random((100, 3)), r=0.05) >>> >>> with PinnedBuffer(rt.geometry_data["plot"], "Positions") as b: >>> print("internal data:", b.shape) >>> print("b[:3]) >>> >>> b *= 1.5 >>> rt.update_geom_buffers("plot", "Positions", forced=True) """ _buffer = None """Buffer kind. """ _data = None """Buffer array. """ _geometry = None """Geometry metadata. """ def __init__(self, geom: GeometryMeta, buffer: GeomBuffer) -> None: """Constructor. """ self._geometry = geom self._buffer = buffer def __enter__(self) -> Optional[np.ndarray]: """Pin memory, wrap it in ``np.ndarray``. """ buf = self._geometry._pin_buffer(self._buffer) if buf is not None: self._data = np.ctypeslib.as_array(buf) return self._data def __exit__(self, exc_type, exc_val, exc_tb) -> None: """Free pinned memory. Note: array syrvives on the Python side, do not use it. """ self._geometry._release_buffer(self._buffer) #print(self._data.__array_interface__)
2,021
2,151
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_UI_VIEWS_TABS_TAB_ICON_H_ #define CHROME_BROWSER_UI_VIEWS_TABS_TAB_ICON_H_ #include "base/macros.h" #include "base/time/time.h" #include "chrome/browser/ui/tabs/tab_network_state.h" #include "ui/gfx/image/image_skia.h" #include "ui/gfx/paint_throbber.h" #include "ui/views/view.h" class GURL; // View that displays the favicon, sad tab, throbber, and attention indicator // in a tab. // // The icon will be drawn in the upper left (upper right for RTL). Normally you // would lay this out so the top is where you want the icon to be positioned, // the width is TabIcon::GetIdealWidth(), and the height goes down to the // bottom of the enclosing view (this is so the crashed tab can animate out of // the bottom). class TabIcon : public views::View { public: // Attention indicator types (use as a bitmask). There is only one visual // representation, but the state of each of these is tracked separately and // the indicator is shown as long as one is enabled. enum class AttentionType { kBlockedWebContents = 1 << 0, // The WebContents is marked as blocked. kTabWantsAttentionStatus = 1 << 1, // Tab::SetTabNeedsAttention() called. }; TabIcon(); ~TabIcon() override; // NOTE The state setting functions below do not automatically scheule a // repaint. They are normally updated all at once, and if each scheduled // a paint it would be extra work. The caller should SchedulePaint() when the // state changes. // Sets the icon. Depending on the URL the icon may be automatically themed. void SetIcon(const GURL& url, const gfx::ImageSkia& favicon); // For certain types of tabs the loading animation is not desired so the // caller can set inhibit_loading_animation to true. When false, the loading // animation state will be derived from the network state. void SetNetworkState(TabNetworkState network_state, bool inhibit_loading_animation); void SetIsCrashed(bool is_crashed); // Enables or disables the given attention type. The attention indicator // will be shown as long as any of the types are enabled. void SetAttention(AttentionType type, bool enabled); bool ShowingLoadingAnimation() const; bool ShowingAttentionIndicator() const; // Sets whether this object can paint to a layer. When the loading animation // is running, painting to a layer saves painting overhead. But if the tab is // being painted to some other context than the window, the layered painting // won't work. void SetCanPaintToLayer(bool can_paint_to_layer); // The loading animation only steps when this function is called. This // ensures that all loading animations step at the same time and it keeps the // number of timers down when lots of tabs are loading. void StepLoadingAnimation(); private: class CrashAnimation; friend CrashAnimation; // views::View: void OnPaint(gfx::Canvas* canvas) override; void OnThemeChanged() override; // Paints the attention indicator and |favicon_| at the given location. void PaintAttentionIndicatorAndIcon(gfx::Canvas* canvas, const gfx::ImageSkia& icon, const gfx::Rect& bounds); void PaintLoadingAnimation(gfx::Canvas* canvas, const gfx::Rect& bounds); // Creates or destroys the layer according to the current animation state and // whether a layer can be used. void RefreshLayer(); void UpdateThemedFavicon(); gfx::ImageSkia ThemeImage(const gfx::ImageSkia& source); gfx::ImageSkia favicon_; TabNetworkState network_state_ = TabNetworkState::kNone; bool is_crashed_ = false; int attention_types_ = 0; // Bitmask of AttentionType. // Value from last call to SetNetworkState. When true, the network loading // animation will not be shown. bool inhibit_loading_animation_ = false; // The point in time when the tab icon was first painted in the waiting state. base::TimeTicks waiting_start_time_; // The point in time when the tab icon was first painted in the loading state. base::TimeTicks loading_start_time_; // Paint state for the loading animation after the most recent waiting paint. gfx::ThrobberWaitingState waiting_state_; // When the favicon_ has theming applied to it, the themed version will be // cached here. If this isNull(), then there is no theming and favicon_ // should be used. gfx::ImageSkia themed_favicon_; // May be different than is_crashed when the crashed icon is animating in. bool should_display_crashed_favicon_ = false; // Drawn when should_display_crashed_favicon_ is set. Created lazily. gfx::ImageSkia crashed_icon_; // The fraction the icon is hidden by for the crashed tab animation. // When this is 0 it will be drawn at the normal location, and when this is 1 // it will be drawn off the bottom. double hiding_fraction_ = 0.0; // Crash animation (in place of favicon). Lazily created since most of the // time it will be unneeded. std::unique_ptr<CrashAnimation> crash_animation_; bool can_paint_to_layer_ = false; DISALLOW_COPY_AND_ASSIGN(TabIcon); }; #endif // CHROME_BROWSER_UI_VIEWS_TABS_TAB_ICON_H_
1,626
372
<gh_stars>100-1000 /* * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ /* * This code was generated by https://github.com/googleapis/google-api-java-client-services/ * Modify at your own risk. */ package com.google.api.services.ml.v1.model; /** * The request message for the SuggestTrial service method. * * <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is * transmitted over HTTP when working with the AI Platform Training & Prediction API. For a detailed * explanation see: * <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a> * </p> * * @author Google, Inc. */ @SuppressWarnings("javadoc") public final class GoogleCloudMlV1SuggestTrialsRequest extends com.google.api.client.json.GenericJson { /** * Required. The identifier of the client that is requesting the suggestion. If multiple * SuggestTrialsRequests have the same `client_id`, the service will return the identical * suggested trial if the trial is pending, and provide a new trial if the last suggested trial * was completed. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.String clientId; /** * Required. The number of suggestions requested. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.Integer suggestionCount; /** * Required. The identifier of the client that is requesting the suggestion. If multiple * SuggestTrialsRequests have the same `client_id`, the service will return the identical * suggested trial if the trial is pending, and provide a new trial if the last suggested trial * was completed. * @return value or {@code null} for none */ public java.lang.String getClientId() { return clientId; } /** * Required. The identifier of the client that is requesting the suggestion. If multiple * SuggestTrialsRequests have the same `client_id`, the service will return the identical * suggested trial if the trial is pending, and provide a new trial if the last suggested trial * was completed. * @param clientId clientId or {@code null} for none */ public GoogleCloudMlV1SuggestTrialsRequest setClientId(java.lang.String clientId) { this.clientId = clientId; return this; } /** * Required. The number of suggestions requested. * @return value or {@code null} for none */ public java.lang.Integer getSuggestionCount() { return suggestionCount; } /** * Required. The number of suggestions requested. * @param suggestionCount suggestionCount or {@code null} for none */ public GoogleCloudMlV1SuggestTrialsRequest setSuggestionCount(java.lang.Integer suggestionCount) { this.suggestionCount = suggestionCount; return this; } @Override public GoogleCloudMlV1SuggestTrialsRequest set(String fieldName, Object value) { return (GoogleCloudMlV1SuggestTrialsRequest) super.set(fieldName, value); } @Override public GoogleCloudMlV1SuggestTrialsRequest clone() { return (GoogleCloudMlV1SuggestTrialsRequest) super.clone(); } }
1,074
474
<gh_stars>100-1000 /* Copyright 2018 Samsung Electronics Co., LTD * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gearvrf.physics; import org.gearvrf.GVRContext; import org.gearvrf.GVRScene; import org.gearvrf.animation.GVRAvatar; import org.gearvrf.animation.GVRSkeleton; import java.io.IOException; public class GVRPhysicsAvatar extends GVRAvatar { protected GVRWorld.IPhysicsEvents mPhysicsListener = new GVRWorld.IPhysicsEvents() { @Override public void onAddRigidBody(GVRWorld world, GVRRigidBody body) { } @Override public void onRemoveRigidBody(GVRWorld world, GVRRigidBody body) { } @Override public void onStepPhysics(GVRWorld world) { if (mSkeleton != null) { mSkeleton.poseFromBones(GVRSkeleton.BONE_PHYSICS); } } }; public GVRPhysicsAvatar(GVRContext ctx, String name) { super(ctx, name); } /** * Load physics information for the current avatar * @param filename name of physics file * @param scene scene the avatar is part of * @throws IOException if physics file cannot be parsed */ public void loadPhysics(String filename, GVRScene scene) throws IOException { GVRPhysicsLoader.loadPhysicsFile(getGVRContext(), filename, true, scene); } };
724
309
#include <vtkSmartPointer.h> #include <vtkRenderWindowInteractor.h> #include <vtkRenderWindow.h> #include <vtkRenderer.h> #include <vtkActor.h> #include <vtkProperty.h> #include <vtkPolyDataMapper.h> #include <vtkStripper.h> #include <vtkCutter.h> #include <vtkPlane.h> #include <vtkSphereSource.h> #include <vtkPoints.h> #include <vtkCellArray.h> #include <vtkNamedColors.h> #include <vtkColor.h> #ifdef VTK_CELL_ARRAY_V2 #include <vtkCellArrayIterator.h> #endif // VTK_CELL_ARRAY_V2 int main (int, char *[]) { // Define colors for example auto colors = vtkSmartPointer<vtkNamedColors>::New(); vtkColor3d lineColor = colors->GetColor3d("peacock"); vtkColor3d modelColor = colors->GetColor3d("silver"); vtkColor3d backgroundColor = colors->GetColor3d("wheat"); auto modelSource = vtkSmartPointer<vtkSphereSource>::New(); auto plane = vtkSmartPointer<vtkPlane>::New(); auto cutter = vtkSmartPointer<vtkCutter>::New(); cutter->SetInputConnection(modelSource->GetOutputPort()); cutter->SetCutFunction(plane); cutter->GenerateValues(10, -.5, .5); auto modelMapper = vtkSmartPointer<vtkPolyDataMapper>::New(); modelMapper->SetInputConnection(modelSource->GetOutputPort()); auto model = vtkSmartPointer<vtkActor>::New(); model->SetMapper(modelMapper); model->GetProperty()->SetDiffuseColor(modelColor.GetData()); model->GetProperty()->SetInterpolationToFlat(); auto stripper = vtkSmartPointer<vtkStripper>::New(); stripper->SetInputConnection(cutter->GetOutputPort()); stripper->JoinContiguousSegmentsOn(); auto linesMapper = vtkSmartPointer<vtkPolyDataMapper>::New(); linesMapper->SetInputConnection(stripper->GetOutputPort()); auto lines = vtkSmartPointer<vtkActor>::New(); lines->SetMapper(linesMapper); lines->GetProperty()->SetDiffuseColor(lineColor.GetData()); lines->GetProperty()->SetLineWidth(3.0); auto renderer = vtkSmartPointer<vtkRenderer>::New(); auto renderWindow = vtkSmartPointer<vtkRenderWindow>::New(); renderWindow->AddRenderer(renderer); renderWindow->SetSize(640, 480); auto interactor = vtkSmartPointer<vtkRenderWindowInteractor>::New(); interactor->SetRenderWindow(renderWindow); // Add the actors to the renderer renderer->AddActor(model); renderer->AddActor(lines); renderer->SetBackground(backgroundColor.GetData()); // This starts the event loop and as a side effect causes an initial // render. renderWindow->Render(); interactor->Start(); // Extract the lines from the polydata vtkIdType numberOfLines = cutter->GetOutput()->GetNumberOfLines(); std::cout << "-----------Lines without using vtkStripper" << std::endl; std::cout << "There are " << numberOfLines << " lines in the polydata" << std::endl; numberOfLines = stripper->GetOutput()->GetNumberOfLines(); vtkPoints *points = stripper->GetOutput()->GetPoints(); vtkCellArray *cells = stripper->GetOutput()->GetLines(); std::cout << "-----------Lines using vtkStripper" << std::endl; std::cout << "There are " << numberOfLines << " lines in the polydata" << std::endl; #ifdef VTK_CELL_ARRAY_V2 // Newer versions of vtkCellArray prefer local iterators: auto cellIter = vtk::TakeSmartPointer(cells->NewIterator()); for (cellIter->GoToFirstCell(); !cellIter->IsDoneWithTraversal(); cellIter->GoToNextCell()) { std::cout << "Line " << cellIter->GetCurrentCellId() << ":\n"; vtkIdList *cell = cellIter->GetCurrentCell(); for (vtkIdType i = 0; i < cell->GetNumberOfIds(); ++i) { double point[3]; points->GetPoint(cell->GetId(i), point); std::cout << "\t(" << point[0] << ", " << point[1] << ", " << point[2] << ")" << std::endl; } } #else // VTK_CELL_ARRAY_V2 // Older implementations of vtkCellArray use internal iterator APIs (not // thread safe): vtkIdType *indices; vtkIdType numberOfPoints; unsigned int lineCount = 0; for (cells->InitTraversal(); cells->GetNextCell(numberOfPoints, indices); lineCount++) { std::cout << "Line " << lineCount << ": " << std::endl; for (vtkIdType i = 0; i < numberOfPoints; i++) { double point[3]; points->GetPoint(indices[i], point); std::cout << "\t(" << point[0] << ", " << point[1] << ", " << point[2] << ")" << std::endl; } } #endif // VTK_CELL_ARRAY_V2 return EXIT_SUCCESS; }
1,777
1,217
<filename>Modules/object_detection/py_nodes/stats/depth_single_est_stat.py #!/usr/bin/env python # -*- coding: utf-8 -*- import rospy from sensor_msgs.msg import Image from geometry_msgs.msg import Pose, Point, Quaternion from cv_bridge import CvBridge from std_msgs.msg import String from std_msgs.msg import Bool import numpy as np import cv2 import os import yaml import math from prometheus_msgs.msg import DetectionInfo, MultiDetectionInfo import time rospy.init_node('depth_single_est_stat', anonymous=True) global depths_list, time_s depths_list = [] time_s = time.time() def depth_callback(detmsg): global depths_list, time_s if detmsg.detected > 0: depth = detmsg.position[2] depths_list.append(depth) d_s = time.time() - time_s if d_s > 3.: time_s = time.time() n_dat = len(depths_list) dat = np.array(depths_list) dat_mean = np.mean(dat) dat_std = np.std(dat) print("mean: {:.3f}, std: {:.3f}, n_points: {}".format(dat_mean, dat_std, n_dat)) depths_list = [] if __name__ == '__main__': rospy.Subscriber('/prometheus/object_detection/landpad_det', DetectionInfo, depth_callback) rospy.spin()
535
339
import math import os import re import json from convlab2.nlu.svm import sutils class tuples(object): def __init__(self, config): self.acts = json.loads(config.get("grammar", "acts")) # self.nonempty_acts = json.loads(config.get("grammar", "nonempty_acts")) # self.nonfull_acts = [act for act in self.acts if act not in self.nonempty_acts] rootpath=os.path.dirname(os.path.abspath(__file__)) # if "semi" not in rootpath: # rootpath+="/semi/CNetTrain/" # else: # rootpath+="/CNetTrain/" self.ontology = json.load( open(rootpath+'/'+config.get("grammar", "ontology")) ) self.slots_informable = self.ontology["informable"] self.slots = self.ontology["requestable"] self.slots_enumerated = json.loads(config.get("grammar", "slots_enumerated")) self.config = config self.all_tuples = self._getAllTuples() self.max_active = 10 if config.has_option("decode","max_active_tuples") : self.max_active = int(config.get("decode","max_active_tuples")) self.tail_cutoff = 0.001 if config.has_option("decode","tail_cutoff") : self.tail_cutoff = float(config.get("decode","tail_cutoff")) self.log_tail_cutoff = math.log(self.tail_cutoff) def uactsToTuples(self, uacts): out = [] for uact in uacts: act =uact["act"] if uact["slots"] == [] : out.append((act,)) for slot,value in uact["slots"]: if act == "request" : out.append(("request", value)) elif slot in self.slots_informable or slot == "this": if slot in self.slots_enumerated or slot == "this": out.append((act,slot,value)) else : out.append((act,slot, genericValue(slot, value))) return out def _getAllTuples(self): out = [] for slot in self.slots: out.append(("request", slot)) for x in self.ontology["all_tuples"]: slot = x[1] if slot in self.slots_enumerated: out.append(tuple(x)) else: out.append((x[0], slot, genericValue(slot))) out.append((x[0], slot, "do n't care")) return list(set(out)) def activeTuples(self, log_turn): asr_hyps = log_turn["input"]["live"]["asr-hyps"] out = [] asr_hyps_conc = ", ".join([asr_hyp['asr-hyp'].lower() for asr_hyp in asr_hyps]) for this_tuple in self.all_tuples: if is_generic(this_tuple[-1]) : # this is a generic value act, slot, gvalue = this_tuple for value in self.ontology["informable"][this_tuple[-2]]: if value.lower() in asr_hyps_conc : out.append((act, slot, genericValue(slot, value))) if slot == 'Phone': matchObj = re.search(r'\d{11}',asr_hyps_conc) if matchObj: out.append((act, slot, genericValue(slot, matchObj.group()))) elif slot == 'Ticket': matchObj = re.search(r'([0-9.]*?) (GBP|gbp)', asr_hyps_conc) if matchObj: out.append((act, slot, genericValue(slot, matchObj.group()))) elif slot == 'Ref': matchObj = re.search(r'reference number is(\s*?)([a-zA-Z0-9]+)', asr_hyps_conc) if matchObj: out.append((act, slot, genericValue(slot, matchObj.group(2)))) elif slot == 'Time' or slot == 'Arrive' or slot == 'Leave': matchObj = re.search(r'\d+?:\d\d', asr_hyps_conc) if matchObj: out.append((act, slot, genericValue(slot, matchObj.group(0)))) else : out.append(this_tuple) return out def activeTuples_sent(self, log_turn): asr_hyps = log_turn["asr-hyps"] out = [] asr_hyps_conc = ", ".join([asr_hyp['asr-hyp'].lower() for asr_hyp in asr_hyps]) for this_tuple in self.all_tuples: if is_generic(this_tuple[-1]) : # this is a generic value act, slot, gvalue = this_tuple if slot not in self.ontology["informable"]: continue for value in self.ontology["informable"][this_tuple[-2]]: if value.lower() in asr_hyps_conc : out.append((act, slot, genericValue(slot, value))) if slot == 'Phone': matchObj = re.search(r'\d{11}',asr_hyps_conc) if matchObj: out.append((act, slot, genericValue(slot, matchObj.group()))) elif slot == 'Ticket': matchObj = re.search(r'([0-9.]*?) (GBP|gbp)', asr_hyps_conc) if matchObj: out.append((act, slot, genericValue(slot, matchObj.group()))) elif slot == 'Ref': matchObj = re.search(r'reference number is(\s*?)([a-zA-Z0-9]+)', asr_hyps_conc) if matchObj: out.append((act, slot, genericValue(slot, matchObj.group(2)))) elif slot == 'Time' or slot == 'Arrive' or slot == 'Leave': matchObj = re.search(r'\d+?:\d\d', asr_hyps_conc) if matchObj: out.append((act, slot, genericValue(slot, matchObj.group(0)))) else : out.append(this_tuple) return out def distributionToNbest(self, tuple_distribution): # convert a tuple distribution to an nbest list tuple_distribution = tuple_distribution.items() output = [] ps = [p for _t,p in tuple_distribution] eps = 0.00001 tuple_distribution = [(t, math.log(max(eps,p)), math.log(max(eps, 1-p))) for t,p in tuple_distribution if p > 0] tuple_distribution = sorted(tuple_distribution,key=lambda x:-x[1]) # prune tuple_distribution = tuple_distribution[:self.max_active] n = len(tuple_distribution) powerset = sutils.powerset(range(n)) acts = [] for subset in powerset: act = [] score = 0 for i in range(n): this_tuple, logp, log1_p = tuple_distribution[i] if i in subset : act.append(this_tuple) score += logp else : score += log1_p if (score> self.log_tail_cutoff or not act) and makes_valid_act(act) : acts.append((act,score)) if not act: null_score = score acts = sorted(acts,key=lambda x:-x[1]) acts = acts[:10] found_null = False for act,score in acts: if not act: found_null = True break if not found_null : acts.append(([], null_score)) #normalise acts = [(act,math.exp(logp)) for act,logp in acts] totalp = sum([p for act,p in acts]) acts = [{"slu-hyp":[tuple_to_act(a) for a in act],"score":p/totalp} for act,p in acts] return acts def tuple_to_act(t) : if len(t) == 1 : return {"act":t[0],"slots":[]} elif len(t) == 2: assert t[0] == "request" return {"act": "request", "slots": [["slot", t[1]]]} return {"act": t[0], "slots": [[t[1], t[2]]]} def makes_valid_act(tuples): # check if uacts is a valid list of tuples # - can't affirm and negate # - can't deny and inform same thing # - can't inform(a=x) inform(a=y) if x!=u singles = [t for t in tuples if len(t)==1] if ("affirm",) in tuples and ("negate",) in tuples : return False triples = [t for t in tuples if len(t)==3] informed = [(slot, value) for act,slot,value in triples if act=="inform"] denied = [(slot, value) for act,slot,value in triples if act=="deny" ] for s,v in informed: if (s,v) in denied: return False informed_slots = [slot for slot, _value in informed] if len(informed_slots) != len(set(informed_slots)) : return False return True def actual_value(value): try: return value.value except AttributeError: return value class genericValue(object): # useful class to use to represent a generic value # x = genericValue("food") # y = genericValue("food","chinese") # z = genericValue("food","indian") # x == y # y in [x] # y.value != z.value def __init__(self, slot, value=None): self.slot = slot self.value = value def __str__(self): paren = "" if self.value is not None : paren = " (%s)" % self.value return ("(generic value for %s"% self.slot) + paren + ")" def __repr__(self): return self.__str__() def __eq__(self, other): try: return self.slot == other.slot except AttributeError : return False def __hash__(self): return self.slot.__hash__() def is_generic(value): return not isinstance(value, str) def generic_to_specific(tup) : if len(tup) == 3 : act,slot,value = tup value = actual_value(value) return (act,slot,value) return tup
5,056
5,169
<filename>Specs/4/7/b/ConfettiView/0.1.5/ConfettiView.podspec.json { "name": "ConfettiView", "version": "0.1.5", "summary": "Add a magnificent Confetti to any view in your app", "description": "This pod allows you to add Confetti to any view youd like in a very clean and easy way. It was inspired by HouseParty app", "homepage": "https://github.com/orron/ConfettiView", "license": { "type": "MIT", "file": "LICENSE" }, "authors": { "<NAME>": "<EMAIL>" }, "source": { "git": "https://github.com/orron/ConfettiView.git", "tag": "0.1.5" }, "social_media_url": "https://twitter.com/or_ron", "platforms": { "ios": "8.1" }, "source_files": "ConfettiView/Classes/**/*" }
288
709
<reponame>security-geeks/jackhammer package com.olacabs.jackhammer.db; import com.olacabs.jackhammer.models.Filter; import com.olacabs.jackhammer.models.Finding; import com.olacabs.jackhammer.models.VulnerableType; import com.olacabs.jackhammer.models.mapper.FindingMapper; import com.olacabs.jackhammer.models.mapper.VulnerableTypeMapper; import org.skife.jdbi.v2.sqlobject.BindBean; import org.skife.jdbi.v2.sqlobject.SqlQuery; import org.skife.jdbi.v2.sqlobject.customizers.Define; import org.skife.jdbi.v2.sqlobject.customizers.RegisterMapper; import org.skife.jdbi.v2.sqlobject.stringtemplate.UseStringTemplate3StatementLocator; import java.util.List; @UseStringTemplate3StatementLocator public interface FilterDAO { @SqlQuery("select distinct(name) from findings where scanTypeId=:scanTypeId and ownerTypeId=:ownerTypeId and isDeleted=false") @RegisterMapper(VulnerableTypeMapper.class) List<VulnerableType> getVulnerableTypes(@BindBean Filter filter); @SqlQuery("select * from findings where <where> order by <sortColumn> <order> LIMIT :limit OFFSET :offset") @RegisterMapper(FindingMapper.class) List<Finding> getFilterResults(@BindBean Filter filter,@Define("where") String where,@Define("sortColumn") String sortColumn, @Define("order") String order); @SqlQuery("select count(*) from findings where <where>") long totalFilterCount(@BindBean Filter filter,@Define("where") String where); @SqlQuery("select * from findings where <where> " + " and (name like concat('%', :searchTerm,'%') " + "or severity like concat('%', :searchTerm,'%') " + "or toolName like concat('%', :searchTerm,'%')) " + "order by <sortColumn> <order> LIMIT :limit OFFSET :offset") @RegisterMapper(FindingMapper.class) List<Finding> getFilterSearchResults(@BindBean Filter filter, @Define("where") String where,@Define("sortColumn") String sortColumn, @Define("order") String order); @SqlQuery("select count(*) from findings where <where> " + " and (name like concat('%', :searchTerm,'%') " + "or severity like concat('%', :searchTerm,'%') " + "or toolName like concat('%', :searchTerm,'%')) ") long totalFilterSearchCount(@BindBean Filter filter,@Define("where") String where); }
835
534
{ "parent": "mekanism:block/factory/combining/advanced", "textures": { "front": "mekanism:block/factory/advanced/combining/front_active", "south": "mekanism:block/factory/advanced/combining/back_active", "west": "mekanism:block/factory/advanced/combining/right_active", "east": "mekanism:block/factory/advanced/combining/left_active", "up": "mekanism:block/factory/advanced/combining/top_active" } }
169
637
// // Copyright 2016 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #import <EarlGrey/GREYProvider.h> /** * A provider for UIApplication windows. By default, all application windows are returned unless * this provider is initialized with custom windows. */ @interface GREYUIWindowProvider : NSObject<GREYProvider> /** * Class method to get a provider with the specified @c windows. * * @param windows An array of UIApplication windows to populate the provider. * * @return A GREYUIWindowProvider instance populated with the UIApplication windows in @c windows. */ + (instancetype)providerWithWindows:(NSArray *)windows; /** * Class method to get a provider with all the windows currently registed with the app. * * @return A GREYUIWindowProvider instance populated by all windows currently * registered with the app. */ + (instancetype)providerWithAllWindows; /** * @remark init is not an available initializer. Use the other initializers. */ - (instancetype)init NS_UNAVAILABLE; /** * Designated Initializer. * * @param windows UIApplication windows to populate the provider with. If @c windows is @c nil, it will initialize this provider with all windows currently registered with the app. Use initWithAllWindows constructor instead to make your intention explicit. * * @return A GREYUIWindowProvider instance, populated with the specified windows. */ - (instancetype)initWithWindows:(NSArray *)windows NS_DESIGNATED_INITIALIZER; /** * Initializes this provider with all application windows. * * @return A GREYUIWindowProvider instance populated by all windows currently * registered with the app. */ - (instancetype)initWithAllWindows; /** * * @return A set of all application windows ordered by window-level from back to front. */ + (NSArray *)allWindows; #pragma mark - GREYProvider /** * * @return An enumerator for @c windows populating the window provider. */ - (NSEnumerator *)dataEnumerator; @end
735
4,140
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hive.metastore; import java.io.IOException; import java.lang.reflect.InvocationTargetException; import java.util.concurrent.TimeUnit; import javax.jdo.JDOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; import org.mockito.Mockito; @Category(MetastoreCheckinTest.class) public class TestRetriesInRetryingHMSHandler { private static Configuration conf; private static final int RETRY_ATTEMPTS = 3; @BeforeClass public static void setup() throws IOException { conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setLongVar(conf, ConfVars.HMS_HANDLER_ATTEMPTS, RETRY_ATTEMPTS); MetastoreConf.setTimeVar(conf, ConfVars.HMS_HANDLER_INTERVAL, 10, TimeUnit.MILLISECONDS); MetastoreConf.setBoolVar(conf, ConfVars.HMS_HANDLER_FORCE_RELOAD_CONF, false); } /* * If the init method of HMSHandler throws exception for the first time * while creating RetryingHMSHandler it should be retried */ @Test public void testRetryInit() throws MetaException { IHMSHandler mockBaseHandler = Mockito.mock(IHMSHandler.class); Mockito.when(mockBaseHandler.getConf()).thenReturn(conf); Mockito .doThrow(JDOException.class) .doNothing() .when(mockBaseHandler).init(); RetryingHMSHandler.getProxy(conf, mockBaseHandler, false); Mockito.verify(mockBaseHandler, Mockito.times(2)).init(); } /* * init method in HMSHandler should not be retried if there are no exceptions */ @Test public void testNoRetryInit() throws MetaException { IHMSHandler mockBaseHandler = Mockito.mock(IHMSHandler.class); Mockito.when(mockBaseHandler.getConf()).thenReturn(conf); Mockito.doNothing().when(mockBaseHandler).init(); RetryingHMSHandler.getProxy(conf, mockBaseHandler, false); Mockito.verify(mockBaseHandler, Mockito.times(1)).init(); } /* * If the init method in HMSHandler throws exception all the times it should be retried until * HiveConf.ConfVars.HMSHANDLERATTEMPTS is reached before giving up */ @Test(expected = MetaException.class) public void testRetriesLimit() throws MetaException { IHMSHandler mockBaseHandler = Mockito.mock(IHMSHandler.class); Mockito.when(mockBaseHandler.getConf()).thenReturn(conf); Mockito.doThrow(JDOException.class).when(mockBaseHandler).init(); RetryingHMSHandler.getProxy(conf, mockBaseHandler, false); Mockito.verify(mockBaseHandler, Mockito.times(RETRY_ATTEMPTS)).init(); } /* * Test retries when InvocationException wrapped in MetaException wrapped in JDOException * is thrown */ @Test public void testWrappedMetaExceptionRetry() throws MetaException { IHMSHandler mockBaseHandler = Mockito.mock(IHMSHandler.class); Mockito.when(mockBaseHandler.getConf()).thenReturn(conf); //JDOException wrapped in MetaException wrapped in InvocationException MetaException me = new MetaException("Dummy exception"); me.initCause(new JDOException()); InvocationTargetException ex = new InvocationTargetException(me); Mockito .doThrow(me) .doNothing() .when(mockBaseHandler).init(); RetryingHMSHandler.getProxy(conf, mockBaseHandler, false); Mockito.verify(mockBaseHandler, Mockito.times(2)).init(); } }
1,437
2,236
<filename>TABAnimatedDemo/TABAnimated/Decorate/DarkMode/TABAnimatedDarkModeImpl.h // // TABAnimatedDarkModeSwitchImpl.h // AnimatedDemo // // Created by tigerAndBull on 2020/5/5. // Copyright © 2020 tigerAndBull. All rights reserved. // #import <Foundation/Foundation.h> NS_ASSUME_NONNULL_BEGIN @protocol TABAnimatedDarkModeInterface; @interface TABAnimatedDarkModeImpl : NSObject<TABAnimatedDarkModeInterface> @end NS_ASSUME_NONNULL_END
163
1,144
package de.metas.rest_api.v1.ordercandidates.impl; import com.fasterxml.jackson.core.JsonParseException; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.JsonMappingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.SerializationFeature; import de.metas.JsonObjectMapperHolder; import de.metas.common.ordercandidates.v1.request.JsonOLCandCreateBulkRequest; import de.metas.common.ordercandidates.v1.request.JsonOLCandCreateRequest; import de.metas.util.Check; import lombok.NonNull; import lombok.experimental.UtilityClass; import java.io.IOException; import java.io.InputStream; /* * #%L * de.metas.ordercandidate.rest-api * %% * Copyright (C) 2018 metas GmbH * %% * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as * published by the Free Software Foundation, either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program. If not, see * <http://www.gnu.org/licenses/gpl-2.0.html>. * #L% */ @UtilityClass class JsonOLCandUtil { /** * Sends the given request's JSON to std-out in a pretty-printed way */ public static void printJsonString(@NonNull final JsonOLCandCreateBulkRequest object) { System.out.println(writeValueAsString(object)); } private static String writeValueAsString(@NonNull final JsonOLCandCreateBulkRequest object) { final ObjectMapper jsonObjectMapper = JsonObjectMapperHolder.newJsonObjectMapper() .enable(SerializationFeature.INDENT_OUTPUT); try { final String json = jsonObjectMapper.writeValueAsString(object); return json; } catch (final JsonProcessingException e) { throw new JsonOLCandUtilException("JsonProcessingException", e); } } public static class JsonOLCandUtilException extends RuntimeException { private static final long serialVersionUID = -626001461757553239L; public JsonOLCandUtilException(final String msg, final Throwable cause) { super(msg, cause); } } public static JsonOLCandCreateBulkRequest loadJsonOLCandCreateBulkRequest(@NonNull final String resourceName) { return fromRessource(resourceName, JsonOLCandCreateBulkRequest.class); } public static JsonOLCandCreateRequest loadJsonOLCandCreateRequest(@NonNull final String resourceName) { return fromRessource(resourceName, JsonOLCandCreateRequest.class); } private static <T> T fromRessource(@NonNull final String resourceName, @NonNull final Class<T> clazz) { final InputStream inputStream = Check.assumeNotNull( JsonOLCandUtil.class.getResourceAsStream(resourceName), "There needs to be a loadable resource with name={}", resourceName); final ObjectMapper jsonObjectMapper = JsonObjectMapperHolder.sharedJsonObjectMapper(); try { return jsonObjectMapper.readValue(inputStream, clazz); } catch (final JsonParseException e) { throw new JsonOLCandUtilException("JsonParseException", e); } catch (final JsonMappingException e) { throw new JsonOLCandUtilException("JsonMappingException", e); } catch (final IOException e) { throw new JsonOLCandUtilException("IOException", e); } } }
1,177
357
/* * Copyright © 2012-2015 VMware, Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the “License”); you may not * use this file except in compliance with the License. You may obtain a copy * of the License at http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an “AS IS” BASIS, without * warranties or conditions of any kind, EITHER EXPRESS OR IMPLIED. See the * License for the specific language governing permissions and limitations * under the License. */ VOID VmKdcFreeChecksum( PVMKDC_CHECKSUM pChecksum); DWORD VmKdcMakeChecksum( VMKDC_CKSUMTYPE type, PUCHAR contents, DWORD length, PVMKDC_CHECKSUM *ppRetChecksum);
257
430
<gh_stars>100-1000 #ifndef _PERLIOL_H #define _PERLIOL_H typedef struct { PerlIO_funcs *funcs; SV *arg; } PerlIO_pair_t; struct PerlIO_list_s { IV refcnt; IV cur; IV len; PerlIO_pair_t *array; }; struct _PerlIO_funcs { Size_t fsize; char *name; Size_t size; U32 kind; IV (*Pushed) (pTHX_ PerlIO *f, const char *mode, SV *arg, PerlIO_funcs *tab); IV (*Popped) (pTHX_ PerlIO *f); PerlIO *(*Open) (pTHX_ PerlIO_funcs *tab, PerlIO_list_t *layers, IV n, const char *mode, int fd, int imode, int perm, PerlIO *old, int narg, SV **args); IV (*Binmode)(pTHX_ PerlIO *f); SV *(*Getarg) (pTHX_ PerlIO *f, CLONE_PARAMS *param, int flags); IV (*Fileno) (pTHX_ PerlIO *f); PerlIO *(*Dup) (pTHX_ PerlIO *f, PerlIO *o, CLONE_PARAMS *param, int flags); /* Unix-like functions - cf sfio line disciplines */ SSize_t(*Read) (pTHX_ PerlIO *f, void *vbuf, Size_t count); SSize_t(*Unread) (pTHX_ PerlIO *f, const void *vbuf, Size_t count); SSize_t(*Write) (pTHX_ PerlIO *f, const void *vbuf, Size_t count); IV (*Seek) (pTHX_ PerlIO *f, Off_t offset, int whence); Off_t(*Tell) (pTHX_ PerlIO *f); IV (*Close) (pTHX_ PerlIO *f); /* Stdio-like buffered IO functions */ IV (*Flush) (pTHX_ PerlIO *f); IV (*Fill) (pTHX_ PerlIO *f); IV (*Eof) (pTHX_ PerlIO *f); IV (*Error) (pTHX_ PerlIO *f); void (*Clearerr) (pTHX_ PerlIO *f); void (*Setlinebuf) (pTHX_ PerlIO *f); /* Perl's snooping functions */ STDCHAR *(*Get_base) (pTHX_ PerlIO *f); Size_t(*Get_bufsiz) (pTHX_ PerlIO *f); STDCHAR *(*Get_ptr) (pTHX_ PerlIO *f); SSize_t(*Get_cnt) (pTHX_ PerlIO *f); void (*Set_ptrcnt) (pTHX_ PerlIO *f, STDCHAR * ptr, SSize_t cnt); }; /*--------------------------------------------------------------------------------------*/ /* Kind values */ #define PERLIO_K_RAW 0x00000001 #define PERLIO_K_BUFFERED 0x00000002 #define PERLIO_K_CANCRLF 0x00000004 #define PERLIO_K_FASTGETS 0x00000008 #define PERLIO_K_DUMMY 0x00000010 #define PERLIO_K_UTF8 0x00008000 #define PERLIO_K_DESTRUCT 0x00010000 #define PERLIO_K_MULTIARG 0x00020000 /*--------------------------------------------------------------------------------------*/ struct _PerlIO { PerlIOl *next; /* Lower layer */ PerlIO_funcs *tab; /* Functions for this layer */ U32 flags; /* Various flags for state */ }; /*--------------------------------------------------------------------------------------*/ /* Flag values */ #define PERLIO_F_EOF 0x00000100 #define PERLIO_F_CANWRITE 0x00000200 #define PERLIO_F_CANREAD 0x00000400 #define PERLIO_F_ERROR 0x00000800 #define PERLIO_F_TRUNCATE 0x00001000 #define PERLIO_F_APPEND 0x00002000 #define PERLIO_F_CRLF 0x00004000 #define PERLIO_F_UTF8 0x00008000 #define PERLIO_F_UNBUF 0x00010000 #define PERLIO_F_WRBUF 0x00020000 #define PERLIO_F_RDBUF 0x00040000 #define PERLIO_F_LINEBUF 0x00080000 #define PERLIO_F_TEMP 0x00100000 #define PERLIO_F_OPEN 0x00200000 #define PERLIO_F_FASTGETS 0x00400000 #define PERLIO_F_TTY 0x00800000 #define PERLIO_F_NOTREG 0x01000000 #define PerlIOBase(f) (*(f)) #define PerlIOSelf(f,type) ((type *)PerlIOBase(f)) #define PerlIONext(f) (&(PerlIOBase(f)->next)) #define PerlIOValid(f) ((f) && *(f)) /*--------------------------------------------------------------------------------------*/ /* Data exports - EXT rather than extern is needed for Cygwin */ EXT PerlIO_funcs PerlIO_unix; EXT PerlIO_funcs PerlIO_perlio; EXT PerlIO_funcs PerlIO_stdio; EXT PerlIO_funcs PerlIO_crlf; EXT PerlIO_funcs PerlIO_utf8; EXT PerlIO_funcs PerlIO_byte; EXT PerlIO_funcs PerlIO_raw; EXT PerlIO_funcs PerlIO_pending; #ifdef HAS_MMAP EXT PerlIO_funcs PerlIO_mmap; #endif #ifdef WIN32 EXT PerlIO_funcs PerlIO_win32; #endif extern PerlIO *PerlIO_allocate(pTHX); extern SV *PerlIO_arg_fetch(PerlIO_list_t *av, IV n); #define PerlIOArg PerlIO_arg_fetch(layers,n) #ifdef PERLIO_USING_CRLF #define PERLIO_STDTEXT "t" #else #define PERLIO_STDTEXT "" #endif /*--------------------------------------------------------------------------------------*/ /* Generic, or stub layer functions */ extern IV PerlIOBase_fileno(pTHX_ PerlIO *f); extern PerlIO *PerlIOBase_dup(pTHX_ PerlIO *f, PerlIO *o, CLONE_PARAMS *param, int flags); extern IV PerlIOBase_pushed(pTHX_ PerlIO *f, const char *mode, SV *arg, PerlIO_funcs *tab); extern IV PerlIOBase_popped(pTHX_ PerlIO *f); extern IV PerlIOBase_binmode(pTHX_ PerlIO *f); extern SSize_t PerlIOBase_read(pTHX_ PerlIO *f, void *vbuf, Size_t count); extern SSize_t PerlIOBase_unread(pTHX_ PerlIO *f, const void *vbuf, Size_t count); extern IV PerlIOBase_eof(pTHX_ PerlIO *f); extern IV PerlIOBase_error(pTHX_ PerlIO *f); extern void PerlIOBase_clearerr(pTHX_ PerlIO *f); extern IV PerlIOBase_close(pTHX_ PerlIO *f); extern void PerlIOBase_setlinebuf(pTHX_ PerlIO *f); extern void PerlIOBase_flush_linebuf(pTHX); extern IV PerlIOBase_noop_ok(pTHX_ PerlIO *f); extern IV PerlIOBase_noop_fail(pTHX_ PerlIO *f); /*--------------------------------------------------------------------------------------*/ /* perlio buffer layer As this is reasonably generic its struct and "methods" are declared here so they can be used to "inherit" from it. */ typedef struct { struct _PerlIO base; /* Base "class" info */ STDCHAR *buf; /* Start of buffer */ STDCHAR *end; /* End of valid part of buffer */ STDCHAR *ptr; /* Current position in buffer */ Off_t posn; /* Offset of buf into the file */ Size_t bufsiz; /* Real size of buffer */ IV oneword; /* Emergency buffer */ } PerlIOBuf; extern int PerlIO_apply_layera(pTHX_ PerlIO *f, const char *mode, PerlIO_list_t *layers, IV n, IV max); extern int PerlIO_parse_layers(pTHX_ PerlIO_list_t *av, const char *names); extern void PerlIO_list_free(pTHX_ PerlIO_list_t *list); extern PerlIO_funcs *PerlIO_layer_fetch(pTHX_ PerlIO_list_t *av, IV n, PerlIO_funcs *def); extern SV *PerlIO_sv_dup(pTHX_ SV *arg, CLONE_PARAMS *param); extern PerlIO *PerlIOBuf_open(pTHX_ PerlIO_funcs *self, PerlIO_list_t *layers, IV n, const char *mode, int fd, int imode, int perm, PerlIO *old, int narg, SV **args); extern IV PerlIOBuf_pushed(pTHX_ PerlIO *f, const char *mode, SV *arg, PerlIO_funcs *tab); extern IV PerlIOBuf_popped(pTHX_ PerlIO *f); extern PerlIO *PerlIOBuf_dup(pTHX_ PerlIO *f, PerlIO *o, CLONE_PARAMS *param, int flags); extern SSize_t PerlIOBuf_read(pTHX_ PerlIO *f, void *vbuf, Size_t count); extern SSize_t PerlIOBuf_unread(pTHX_ PerlIO *f, const void *vbuf, Size_t count); extern SSize_t PerlIOBuf_write(pTHX_ PerlIO *f, const void *vbuf, Size_t count); extern IV PerlIOBuf_seek(pTHX_ PerlIO *f, Off_t offset, int whence); extern Off_t PerlIOBuf_tell(pTHX_ PerlIO *f); extern IV PerlIOBuf_close(pTHX_ PerlIO *f); extern IV PerlIOBuf_flush(pTHX_ PerlIO *f); extern IV PerlIOBuf_fill(pTHX_ PerlIO *f); extern STDCHAR *PerlIOBuf_get_base(pTHX_ PerlIO *f); extern Size_t PerlIOBuf_bufsiz(pTHX_ PerlIO *f); extern STDCHAR *PerlIOBuf_get_ptr(pTHX_ PerlIO *f); extern SSize_t PerlIOBuf_get_cnt(pTHX_ PerlIO *f); extern void PerlIOBuf_set_ptrcnt(pTHX_ PerlIO *f, STDCHAR * ptr, SSize_t cnt); extern int PerlIOUnix_oflags(const char *mode); /*--------------------------------------------------------------------------------------*/ #endif /* _PERLIOL_H */
3,116
2,372
<gh_stars>1000+ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2018 NVIDIA Corporation. All rights reserved. #ifndef CONVEX_RENDERER #define CONVEX_RENDERER #include "Convex.h" #include <vector> using namespace physx; class Shader; // ---------------------------------------------------------------------------- class ConvexRenderer { public: ConvexRenderer(); ~ConvexRenderer(); void init(); const static int maxVertsPerGroup = 100000; void setActive(bool active) { mActive = active; }; void add(const base::Convex* convex, Shader* shader); void remove(const base::Convex* convex); void render(); void setShaderMaterial(Shader* shader, const ShaderMaterial& mat) {this->mShader = shader; this->mShaderMat = mat;} void setTexArrays(unsigned int diffuse, unsigned int bump, unsigned int specular, unsigned int specPower) { mDiffuseTexArray = diffuse; mBumpTexArray = bump; mSpecularTexArray = specular; mEmissiveReflectSpecPowerTexArray = specPower; } void setVolTex(unsigned int volTexi) { volTex = volTexi;} private: void updateRenderBuffers(); void updateTransformations(); Shader* mShader; ShaderMaterial mShaderMat; struct ConvexGroup { void init() { numVertices = 0; numIndices = 0; VBO = 0; IBO = 0; matTex = 0; texSize = 0; } bool dirty; std::vector<const Convex*> convexes; std::vector<float> vertices; std::vector<unsigned int> indices; std::vector<float> texCoords; int numVertices, numIndices; unsigned int VBO; unsigned int IBO; unsigned int matTex; int texSize; Shader* mShader; }; std::vector<ConvexGroup*> mGroups; bool mActive; float mBumpTextureUVScale; float mExtraNoiseScale; float mRoughnessScale; unsigned int mDiffuseTexArray; unsigned int mBumpTexArray; unsigned int mSpecularTexArray; unsigned int mEmissiveReflectSpecPowerTexArray; unsigned int volTex; }; #endif
1,055
2,291
<gh_stars>1000+ package org.osmdroid.util; public interface IterableWithSize<T> extends Iterable<T> { int size(); }
45
1,526
<reponame>Hakunata/servicecomb-pack /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.servicecomb.pack.alpha.server.api; import static org.hamcrest.Matchers.hasSize; import static org.mockito.Mockito.when; import static org.springframework.test.web.servlet.result.MockMvcResultHandlers.print; import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath; import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.core.JsonGenerator.Feature; import com.fasterxml.jackson.databind.ObjectMapper; import java.util.ArrayList; import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.UUID; import org.apache.servicecomb.pack.alpha.core.NodeStatus; import org.apache.servicecomb.pack.alpha.core.NodeStatus.TypeEnum; import org.apache.servicecomb.pack.alpha.fsm.SagaActorState; import org.apache.servicecomb.pack.alpha.core.fsm.TransactionType; import org.apache.servicecomb.pack.alpha.core.fsm.TxState; import org.apache.servicecomb.pack.alpha.core.fsm.event.SagaEndedEvent; import org.apache.servicecomb.pack.alpha.core.fsm.event.SagaStartedEvent; import org.apache.servicecomb.pack.alpha.core.fsm.event.TxEndedEvent; import org.apache.servicecomb.pack.alpha.core.fsm.event.TxStartedEvent; import org.apache.servicecomb.pack.alpha.core.fsm.event.base.BaseEvent; import org.apache.servicecomb.pack.alpha.core.metrics.MetricsBean; import org.apache.servicecomb.pack.alpha.fsm.metrics.MetricsService; import org.apache.servicecomb.pack.alpha.fsm.repository.TransactionRepository; import org.apache.servicecomb.pack.alpha.core.fsm.repository.model.GlobalTransaction; import org.apache.servicecomb.pack.alpha.core.fsm.repository.model.PagingGlobalTransactions; import org.apache.servicecomb.pack.alpha.core.fsm.repository.model.SagaSubTransaction; import org.apache.servicecomb.pack.alpha.server.AlphaApplication; import org.apache.servicecomb.pack.alpha.server.AlphaConfig; import org.apache.servicecomb.pack.alpha.server.metrics.AlphaMetricsEndpoint; import org.junit.Test; import org.junit.runner.RunWith; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.test.autoconfigure.web.servlet.AutoConfigureMockMvc; import org.springframework.boot.test.context.SpringBootTest; import org.springframework.boot.test.mock.mockito.MockBean; import org.springframework.data.elasticsearch.core.ElasticsearchTemplate; import org.springframework.http.MediaType; import org.springframework.test.context.junit4.SpringRunner; import org.springframework.test.web.servlet.MockMvc; import org.springframework.test.web.servlet.result.MockMvcResultMatchers; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; @RunWith(SpringRunner.class) @AutoConfigureMockMvc @SpringBootTest(classes = {AlphaApplication.class, AlphaConfig.class}) public class APIv1ControllerTest { @Autowired private MockMvc mockMvc; @Autowired AlphaMetricsEndpoint alphaMetricsEndpoint; @MockBean MetricsService metricsService; @MockBean NodeStatus nodeStatus; @MockBean ElasticsearchTemplate template; @MockBean TransactionRepository transactionRepository; @Test public void metricsTest() throws Exception { MetricsBean metricsBean = new MetricsBean(); metricsBean.doEventReceived(); metricsBean.doEventAccepted(); metricsBean.doEventAvgTime(5); metricsBean.doActorReceived(); metricsBean.doActorAccepted(); metricsBean.doActorAvgTime(5); metricsBean.doRepositoryReceived(); metricsBean.doRepositoryAccepted(); metricsBean.doRepositoryAvgTime(5); metricsBean.doCommitted(); metricsBean.doCompensated(); metricsBean.doSuspended(); metricsBean.doSagaBeginCounter(); metricsBean.doSagaEndCounter(); metricsBean.doSagaAvgTime(5); when(metricsService.metrics()).thenReturn(metricsBean); when(nodeStatus.getTypeEnum()).thenReturn(TypeEnum.MASTER); mockMvc.perform(get("/alpha/api/v1/metrics")) .andExpect(status().isOk()) .andExpect( MockMvcResultMatchers.content().contentType(MediaType.APPLICATION_JSON_UTF8_VALUE)) .andExpect(jsonPath("$.metrics.eventReceived").value(1)) .andExpect(jsonPath("$.metrics.eventAccepted").value(1)) .andExpect(jsonPath("$.metrics.eventRejected").value(0)) .andExpect(jsonPath("$.metrics.eventAvgTime").value(5.0)) .andExpect(jsonPath("$.metrics.actorReceived").value(1)) .andExpect(jsonPath("$.metrics.actorAccepted").value(1)) .andExpect(jsonPath("$.metrics.actorRejected").value(0)) .andExpect(jsonPath("$.metrics.actorAvgTime").value(5.0)) .andExpect(jsonPath("$.metrics.repositoryReceived").value(1)) .andExpect(jsonPath("$.metrics.repositoryAccepted").value(1)) .andExpect(jsonPath("$.metrics.repositoryRejected").value(0)) .andExpect(jsonPath("$.metrics.repositoryAvgTime").value(5.0)) .andExpect(jsonPath("$.metrics.sagaBeginCounter").value(1)) .andExpect(jsonPath("$.metrics.sagaEndCounter").value(1)) .andExpect(jsonPath("$.metrics.sagaAvgTime").value(5.0)) .andExpect(jsonPath("$.metrics.committed").value(1)) .andExpect(jsonPath("$.metrics.compensated").value(1)) .andExpect(jsonPath("$.metrics.suspended").value(1)) .andExpect(jsonPath("$.nodeType").value(TypeEnum.MASTER.name())) .andReturn(); } @Test public void transactionTest() throws Exception { final String serviceName = "serviceName-1"; final String instanceId = "instanceId-1"; final String globalTxId = UUID.randomUUID().toString(); final String localTxId_1 = UUID.randomUUID().toString(); final String localTxId_2 = UUID.randomUUID().toString(); final String localTxId_3 = UUID.randomUUID().toString(); List<BaseEvent> events = new ArrayList(); events.add(SagaStartedEvent.builder().serviceName("service_g").instanceId("instance_g") .globalTxId(globalTxId).build()); events.add(TxStartedEvent.builder().serviceName("service_c1").instanceId("instance_c1") .globalTxId(globalTxId).parentTxId(globalTxId).localTxId(localTxId_1).build()); events.add(TxEndedEvent.builder().serviceName("service_c1").instanceId("instance_c1") .globalTxId(globalTxId).parentTxId(globalTxId).localTxId(localTxId_1).build()); events.add(TxStartedEvent.builder().serviceName("service_c2").instanceId("instance_c2") .globalTxId(globalTxId).parentTxId(globalTxId).localTxId(localTxId_2).build()); events.add(TxEndedEvent.builder().serviceName("service_c2").instanceId("instance_c2") .globalTxId(globalTxId).parentTxId(globalTxId).localTxId(localTxId_2).build()); events.add(TxStartedEvent.builder().serviceName("service_c3").instanceId("instance_c3") .globalTxId(globalTxId).parentTxId(globalTxId).localTxId(localTxId_3).build()); events.add(TxEndedEvent.builder().serviceName("service_c3").instanceId("instance_c3") .globalTxId(globalTxId).parentTxId(globalTxId).localTxId(localTxId_3).build()); events.add(SagaEndedEvent.builder().serviceName("service_g").instanceId("instance_g") .globalTxId(globalTxId).build()); List<SagaSubTransaction> subTransactions = new ArrayList(); subTransactions .add(SagaSubTransaction.builder().parentTxId(globalTxId).localTxId(localTxId_1).state( TxState.COMMITTED).beginTime(new Date()).endTime(new Date()).build()); subTransactions .add(SagaSubTransaction.builder().parentTxId(globalTxId).localTxId(localTxId_2).state( TxState.COMMITTED).beginTime(new Date()).endTime(new Date()).build()); subTransactions .add(SagaSubTransaction.builder().parentTxId(globalTxId).localTxId(localTxId_3).state( TxState.COMMITTED).beginTime(new Date()).endTime(new Date()).build()); List<GlobalTransaction> globalTransactions = new ArrayList<>(); globalTransactions.add(GlobalTransaction.builder() .serviceName(serviceName) .instanceId(instanceId) .globalTxId(globalTxId) .type(TransactionType.SAGA) .state(SagaActorState.COMMITTED.name()) .beginTime(new Date()) .endTime(new Date()) .subTxSize(3) .events(events) .subTransactions(subTransactions) .build()); PagingGlobalTransactions paging = PagingGlobalTransactions.builder() .page(0) .size(50) .elapsed(10) .total(1) .globalTransactions(globalTransactions) .build(); when(transactionRepository.getGlobalTransactions(null,0, 50)).thenReturn(paging); ObjectMapper mapper = new ObjectMapper(); mapper.configure(JsonGenerator.Feature.QUOTE_FIELD_NAMES, false); mapper.configure(Feature.QUOTE_NON_NUMERIC_NUMBERS, false); mockMvc.perform(get("/alpha/api/v1/transaction?page=0&size=50")) .andExpect(status().isOk()) .andExpect( MockMvcResultMatchers.content().contentType(MediaType.APPLICATION_JSON_UTF8_VALUE)) .andExpect(jsonPath("$.total").value(1)) .andExpect(jsonPath("$.page").value(0)) .andExpect(jsonPath("$.size").value(50)) .andExpect(jsonPath("$.elapsed").value(10)) .andExpect(jsonPath("$.globalTransactions", hasSize(1))) .andExpect(jsonPath("$.globalTransactions[0].globalTxId") .value(globalTransactions.get(0).getGlobalTxId())) .andExpect(jsonPath("$.globalTransactions[0].type") .value(globalTransactions.get(0).getType().name())) .andExpect(jsonPath("$.globalTransactions[0].serviceName") .value(globalTransactions.get(0).getServiceName())) .andExpect(jsonPath("$.globalTransactions[0].instanceId") .value(globalTransactions.get(0).getInstanceId())) .andExpect(jsonPath("$.globalTransactions[0].beginTime") .value(globalTransactions.get(0).getBeginTime().getTime())) .andExpect(jsonPath("$.globalTransactions[0].endTime") .value(globalTransactions.get(0).getEndTime().getTime())) .andExpect(jsonPath("$.globalTransactions[0].state") .value(globalTransactions.get(0).getState())) .andExpect(jsonPath("$.globalTransactions[0].subTxSize") .value(globalTransactions.get(0).getSubTxSize())) .andExpect(jsonPath("$.globalTransactions[0].durationTime") .value(globalTransactions.get(0).getDurationTime())) .andExpect(jsonPath("$.globalTransactions[0].subTransactions", hasSize(3))) .andExpect(jsonPath("$.globalTransactions[0].events", hasSize(8))) .andReturn(); } @Test public void globalTransactionByGlobalTxIdTest() throws Exception { final String serviceName = "serviceName-1"; final String instanceId = "instanceId-1"; final String globalTxId = UUID.randomUUID().toString(); final String localTxId_1 = UUID.randomUUID().toString(); final String localTxId_2 = UUID.randomUUID().toString(); final String localTxId_3 = UUID.randomUUID().toString(); List<BaseEvent> events = new ArrayList(); events.add(SagaStartedEvent.builder().serviceName("service_g").instanceId("instance_g") .globalTxId(globalTxId).build()); events.add(TxStartedEvent.builder().serviceName("service_c1").instanceId("instance_c1") .globalTxId(globalTxId).parentTxId(globalTxId).localTxId(localTxId_1).build()); events.add(TxEndedEvent.builder().serviceName("service_c1").instanceId("instance_c1") .globalTxId(globalTxId).parentTxId(globalTxId).localTxId(localTxId_1).build()); events.add(TxStartedEvent.builder().serviceName("service_c2").instanceId("instance_c2") .globalTxId(globalTxId).parentTxId(globalTxId).localTxId(localTxId_2).build()); events.add(TxEndedEvent.builder().serviceName("service_c2").instanceId("instance_c2") .globalTxId(globalTxId).parentTxId(globalTxId).localTxId(localTxId_2).build()); events.add(TxStartedEvent.builder().serviceName("service_c3").instanceId("instance_c3") .globalTxId(globalTxId).parentTxId(globalTxId).localTxId(localTxId_3).build()); events.add(TxEndedEvent.builder().serviceName("service_c3").instanceId("instance_c3") .globalTxId(globalTxId).parentTxId(globalTxId).localTxId(localTxId_3).build()); events.add(SagaEndedEvent.builder().serviceName("service_g").instanceId("instance_g") .globalTxId(globalTxId).build()); List<SagaSubTransaction> subTransactions = new ArrayList(); subTransactions .add(SagaSubTransaction.builder().parentTxId(globalTxId).localTxId(localTxId_1).state( TxState.COMMITTED).beginTime(new Date()).endTime(new Date()).build()); subTransactions .add(SagaSubTransaction.builder().parentTxId(globalTxId).localTxId(localTxId_2).state( TxState.COMMITTED).beginTime(new Date()).endTime(new Date()).build()); subTransactions .add(SagaSubTransaction.builder().parentTxId(globalTxId).localTxId(localTxId_3).state( TxState.COMMITTED).beginTime(new Date()).endTime(new Date()).build()); GlobalTransaction globalTransaction = GlobalTransaction.builder() .serviceName(serviceName) .instanceId(instanceId) .globalTxId(globalTxId) .type(TransactionType.SAGA) .state(SagaActorState.COMMITTED.name()) .beginTime(new Date()) .endTime(new Date()) .subTxSize(3) .events(events) .subTransactions(subTransactions) .build(); when(transactionRepository.getGlobalTransactionByGlobalTxId(globalTxId)).thenReturn(globalTransaction); ObjectMapper mapper = new ObjectMapper(); mapper.configure(JsonGenerator.Feature.QUOTE_FIELD_NAMES, false); mapper.configure(Feature.QUOTE_NON_NUMERIC_NUMBERS, false); mockMvc.perform(get("/alpha/api/v1/transaction/"+globalTxId)) .andDo(print()) .andExpect(status().isOk()) .andExpect( MockMvcResultMatchers.content().contentType(MediaType.APPLICATION_JSON_UTF8_VALUE)) .andExpect(jsonPath("$.globalTxId") .value(globalTransaction.getGlobalTxId())) .andExpect(jsonPath("$.type") .value(globalTransaction.getType().name())) .andExpect(jsonPath("$.serviceName") .value(globalTransaction.getServiceName())) .andExpect(jsonPath("$.instanceId") .value(globalTransaction.getInstanceId())) .andExpect(jsonPath("$.beginTime") .value(globalTransaction.getBeginTime().getTime())) .andExpect(jsonPath("$.endTime") .value(globalTransaction.getEndTime().getTime())) .andExpect(jsonPath("$.state") .value(globalTransaction.getState())) .andExpect(jsonPath("$.subTxSize") .value(globalTransaction.getSubTxSize())) .andExpect(jsonPath("$.durationTime") .value(globalTransaction.getDurationTime())) .andExpect(jsonPath("$.subTransactions", hasSize(3))) .andExpect(jsonPath("$.events", hasSize(8))) .andReturn(); } @Test public void transactionStatisticsTest() throws Exception { Map<String, Long> statistics = new HashMap<>(); statistics.put("COMMITTED", 1l); statistics.put("SUSPENDED", 2l); statistics.put("COMPENSATED", 3l); when(transactionRepository.getTransactionStatistics()).thenReturn(statistics); mockMvc.perform(get("/alpha/api/v1/transaction/statistics")) .andExpect(status().isOk()) .andExpect( MockMvcResultMatchers.content().contentType(MediaType.APPLICATION_JSON_UTF8_VALUE)) .andExpect(jsonPath("$.COMMITTED").value(statistics.get("COMMITTED"))) .andExpect(jsonPath("$.SUSPENDED").value(statistics.get("SUSPENDED"))) .andExpect(jsonPath("$.COMPENSATED").value(statistics.get("COMPENSATED"))) .andReturn(); } @Test public void transactionSlowTest() throws Exception { List<GlobalTransaction> globalTransactions = new ArrayList<>(); for(int i=0;i<10;i++){ globalTransactions.add(GlobalTransaction.builder() .beginTime(new Date()) .endTime(new Date()) .events(new ArrayList<>()) .subTransactions(new ArrayList<>()) .build()); } when(transactionRepository.getSlowGlobalTransactionsTopN(10)).thenReturn(globalTransactions); mockMvc.perform(get("/alpha/api/v1/transaction/slow")) .andExpect(status().isOk()) .andExpect( MockMvcResultMatchers.content().contentType(MediaType.APPLICATION_JSON_UTF8_VALUE)) .andExpect(jsonPath("$", hasSize(10))) .andReturn(); } }
6,748
679
/************************************************************** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * *************************************************************/ #ifndef _HTMLNUM_HXX #define _HTMLNUM_HXX #include <swtypes.hxx> #include <string.h> #define HTML_NUMBUL_MARGINLEFT (MM50*2 + MM50/2) #define HTML_NUMBUL_INDENT (-MM50) class SwTxtNode; class SwNumRule; class SwHTMLNumRuleInfo { sal_uInt16 aNumStarts[MAXLEVEL]; SwNumRule * pNumRule; // Aktuelle Numerierung sal_uInt16 nDeep; // aktuelle Num-Tiefe (1, 2, 3, ...) sal_Bool bRestart : 1; // Export: Numerierung neu starten sal_Bool bNumbered : 1; // Export: Absatz ist numeriert public: inline void Set( const SwHTMLNumRuleInfo& rInf ); void Set( const SwTxtNode& rTxtNd ); SwHTMLNumRuleInfo() : pNumRule( 0 ), nDeep( 0 ), bRestart( sal_False ), bNumbered( sal_False ) { memset( &aNumStarts, 0xff, sizeof( aNumStarts ) ); } SwHTMLNumRuleInfo( const SwHTMLNumRuleInfo& rInf ) : pNumRule( rInf.pNumRule ), nDeep( rInf.nDeep ), bRestart( rInf.bRestart ), bNumbered( rInf.bNumbered ) { memcpy( &aNumStarts, &rInf.aNumStarts, sizeof( aNumStarts ) ); } SwHTMLNumRuleInfo( const SwTxtNode& rTxtNd ) { Set( rTxtNd ); } inline SwHTMLNumRuleInfo& operator=( const SwHTMLNumRuleInfo& rInf ); inline void Clear(); void SetNumRule( const SwNumRule *pRule ) { pNumRule = (SwNumRule *)pRule; } SwNumRule *GetNumRule() { return pNumRule; } const SwNumRule *GetNumRule() const { return pNumRule; } void SetDepth( sal_uInt16 nDepth ) { nDeep = nDepth; } sal_uInt16 GetDepth() const { return nDeep; } sal_uInt16 IncDepth() { return ++nDeep; } sal_uInt16 DecDepth() { return nDeep==0 ? 0 : --nDeep; } inline sal_uInt8 GetLevel() const; void SetRestart( sal_Bool bSet ) { bRestart = bSet; } sal_Bool IsRestart() const { return bRestart; } void SetNumbered( sal_Bool bSet ) { bNumbered = bSet; } sal_Bool IsNumbered() const { return bNumbered; } inline void SetNodeStartValue( sal_uInt8 nLvl, sal_uInt16 nVal=USHRT_MAX ); sal_uInt16 GetNodeStartValue( sal_uInt8 nLvl ) const { return aNumStarts[nLvl]; } }; inline SwHTMLNumRuleInfo& SwHTMLNumRuleInfo::operator=( const SwHTMLNumRuleInfo& rInf ) { Set( rInf ); return *this; } inline void SwHTMLNumRuleInfo::Set( const SwHTMLNumRuleInfo& rInf ) { pNumRule = rInf.pNumRule; nDeep = rInf.nDeep; bRestart = rInf.bRestart; bNumbered = rInf.bNumbered; memcpy( &aNumStarts, &rInf.aNumStarts, sizeof( aNumStarts ) ); } inline void SwHTMLNumRuleInfo::Clear() { pNumRule = 0; nDeep = 0; bRestart = bNumbered = sal_False; memset( &aNumStarts, 0xff, sizeof( aNumStarts ) ); } inline sal_uInt8 SwHTMLNumRuleInfo::GetLevel() const { return (sal_uInt8)( pNumRule!=0 && nDeep != 0 ? ( nDeep<=MAXLEVEL ? nDeep-1 : MAXLEVEL - 1 ) : 0 ); } inline void SwHTMLNumRuleInfo::SetNodeStartValue( sal_uInt8 nLvl, sal_uInt16 nVal ) { aNumStarts[nLvl] = nVal; } #endif
1,393
590
# coding: utf-8 """ https://algodaily.com/challenges/reverse-only-alphabetical """ # The Two-Pointer way. def reverse_only_alpha(s): left = 0 right = len(s) - 1 s_list = list(s) while left < right: if not s_list[left].isalpha(): left += 1 continue if not s_list[right].isalpha(): right -= 1 continue s_list[left], s_list[right] = s_list[right], s_list[left] left += 1 right -= 1 return ''.join(s_list)
253
434
{ "instance" : { "testCallbackFunctionalTest" : "JohanBrichau 4/1/2016 14:27", "testDelegationFunctionalTest" : "JohanBrichau 9/11/2018 12:15", "testFlowConvenienceFunctionalTest" : "JohanBrichau 9/11/2018 12:03", "testCallbackSecurityFunctionalTest" : "JohanBrichau 4/1/2016 14:46", "testFilterFunctionalTest" : "JohanBrichau 9/9/2018 18:37", "componentClass" : "JohanBrichau 3/1/2014 19:16", "testContextFunctionalTest" : "JohanBrichau 9/9/2018 15:10", "testDelayFunctionalTest" : "JohanBrichau 9/9/2018 17:49", "testCanvasTagFunctionalTest" : "JohanBrichau 9/9/2018 12:40", "testDateTimeFunctionalTest" : "JohanBrichau 9/9/2018 16:37", "testCanvasTableFunctionalTest" : "JohanBrichau 9/9/2018 12:30", "testExpiryFunctionalTest" : "JohanBrichau 9/9/2018 18:36", "testEncodingFunctionalTest" : "JohanBrichau 9/9/2018 18:35", "testDateSelectorFunctionalTest" : "JohanBrichau 9/9/2018 16:01", "selectTest:" : "JohanBrichau 3/1/2014 19:30", "testBatchFunctionalTest" : "JohanBrichau 5/2/2014 21:37", "testCookieFunctionalTest" : "JohanBrichau 9/9/2018 15:07", "testExceptionFunctionalTest" : "JohanBrichau 9/9/2018 18:35", "testButtonFunctionalTest" : "JohanBrichau 5/2/2014 21:17" }, "class" : { } }
528
301
/* * Copyright (c) 2018-2019 "Neo4j, Inc." [https://neo4j.com] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.opencypher.gremlin.queries; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.tuple; import java.util.List; import java.util.Map; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.opencypher.gremlin.groups.SkipWithCosmosDB; import org.opencypher.gremlin.rules.GremlinServerExternalResource; import org.opencypher.gremlin.test.TestCommons; public class OrderByTest { @ClassRule public static final GremlinServerExternalResource gremlinServer = new GremlinServerExternalResource(TestCommons::modernGraph); private static final int VERTICES_COUNT = 6; private List<Map<String, Object>> submitAndGet(String cypher) { return gremlinServer.cypherGremlinClient().submit(cypher).all(); } @Test public void orderBySingleColumn() throws Exception { List<Map<String, Object>> results = submitAndGet("MATCH (n) RETURN n.name ORDER BY n.name"); assertThat(results).hasSize(6) .extracting("n.name") .containsExactly("josh", "lop", "marko", "peter", "ripple", "vadas"); } @Test public void orderByMultipleColumns() throws Exception { List<Map<String, Object>> results = submitAndGet( "MATCH (p:person)-[:created]->(s:software) " + "RETURN p.name, s.name " + "ORDER BY p.name ASC, s.name DESC" ); assertThat(results) .extracting("p.name", "s.name") .containsExactly( tuple("josh", "ripple"), tuple("josh", "lop"), tuple("marko", "lop"), tuple("peter", "lop") ); } @Test public void skip() throws Exception { List<Map<String, Object>> results = submitAndGet("MATCH (n) RETURN n SKIP 2"); assertThat(results).hasSize(VERTICES_COUNT - 2); } @Test public void limit() throws Exception { List<Map<String, Object>> results = submitAndGet("MATCH (n) RETURN n LIMIT 1"); assertThat(results).hasSize(1); } @Test public void orderBySkipLimit() throws Exception { List<Map<String, Object>> results = submitAndGet("MATCH (n) RETURN n.name ORDER BY n.name SKIP 2 LIMIT 1"); assertThat(results) .extracting("n.name") .containsExactly("marko"); } @Test public void skipOutOfSize() throws Exception { List<Map<String, Object>> results = submitAndGet("MATCH (n) RETURN n.name SKIP 20"); assertThat(results).isEmpty(); } @Test public void limitOutOfSize() throws Exception { List<Map<String, Object>> results = submitAndGet("MATCH (n) RETURN n.name LIMIT 10"); assertThat(results).hasSize(VERTICES_COUNT); } @Test @Category(SkipWithCosmosDB.RealiasingCreatesCollection.class) public void projections() throws Exception { List<Map<String, Object>> results = submitAndGet( "MATCH (p:person) " + "WITH p, 0 AS relevance " + "RETURN p.age AS age " + "ORDER BY relevance, p.age" ); assertThat(results) .extracting("age") .containsExactly(27L, 29L, 32L, 35L); } @Test public void orderByAfterAggregation() { List<Map<String, Object>> results = submitAndGet( "MATCH (p:person)-[:created]->(s:software) " + "RETURN p.name AS name, count(s) AS creations " + "ORDER BY creations DESC, name ASC" ); assertThat(results) .extracting("name", "creations") .containsExactly( tuple("josh", 2L), tuple("marko", 1L), tuple("peter", 1L) ); } @Test @Category(SkipWithCosmosDB.IsNeqOnDifferentTypes.class) public void doubleAggregation() { List<Map<String, Object>> results = submitAndGet( "MATCH (p:person)" + "WITH p.name AS name, count(p) AS cnt " + "RETURN count(cnt) as count2" ); assertThat(results) .extracting("count2") .containsExactly(4L); } }
2,078
658
/* * Copyright 2011-2014 <NAME>. All rights reserved. * License: http://www.opensource.org/licenses/BSD-2-Clause */ // FPU math lib #ifndef FPU_MATH_H_HEADER_GUARD #define FPU_MATH_H_HEADER_GUARD #define _USE_MATH_DEFINES #include <math.h> #include <string.h> #ifndef M_PI #define M_PI 3.14159265358979323846 #endif #if defined(_MSC_VER) inline float fminf(float _a, float _b) { return _a < _b ? _a : _b; } inline float fmaxf(float _a, float _b) { return _a > _b ? _a : _b; } #endif // BX_COMPILER_MSVC inline float toRad(float _deg) { return _deg * float(M_PI / 180.0); } inline float toDeg(float _rad) { return _rad * float(180.0 / M_PI); } inline float fclamp(float _a, float _min, float _max) { return fminf(fmaxf(_a, _min), _max); } inline float fsaturate(float _a) { return fclamp(_a, 0.0f, 1.0f); } inline float flerp(float _a, float _b, float _t) { return _a + (_b - _a) * _t; } inline float fsign(float _a) { return _a < 0.0f ? -1.0f : 1.0f; } inline void vec3Move(float* __restrict _result, const float* __restrict _a) { _result[0] = _a[0]; _result[1] = _a[1]; _result[2] = _a[2]; } inline void vec3Abs(float* __restrict _result, const float* __restrict _a) { _result[0] = fabsf(_a[0]); _result[1] = fabsf(_a[1]); _result[2] = fabsf(_a[2]); } inline void vec3Neg(float* __restrict _result, const float* __restrict _a) { _result[0] = -_a[0]; _result[1] = -_a[1]; _result[2] = -_a[2]; } inline void vec3Add(float* __restrict _result, const float* __restrict _a, const float* __restrict _b) { _result[0] = _a[0] + _b[0]; _result[1] = _a[1] + _b[1]; _result[2] = _a[2] + _b[2]; } inline void vec3Sub(float* __restrict _result, const float* __restrict _a, const float* __restrict _b) { _result[0] = _a[0] - _b[0]; _result[1] = _a[1] - _b[1]; _result[2] = _a[2] - _b[2]; } inline void vec3Mul(float* __restrict _result, const float* __restrict _a, const float* __restrict _b) { _result[0] = _a[0] * _b[0]; _result[1] = _a[1] * _b[1]; _result[2] = _a[2] * _b[2]; } inline void vec3Mul(float* __restrict _result, const float* __restrict _a, float _b) { _result[0] = _a[0] * _b; _result[1] = _a[1] * _b; _result[2] = _a[2] * _b; } inline void vec4Mul(float* __restrict _result, const float* __restrict _a, float _b) { _result[0] = _a[0] * _b; _result[1] = _a[1] * _b; _result[2] = _a[2] * _b; _result[3] = _a[3] * _b; } inline float vec3Dot(const float* __restrict _a, const float* __restrict _b) { return _a[0]*_b[0] + _a[1]*_b[1] + _a[2]*_b[2]; } inline void vec3Cross(float* __restrict _result, const float* __restrict _a, const float* __restrict _b) { _result[0] = _a[1]*_b[2] - _a[2]*_b[1]; _result[1] = _a[2]*_b[0] - _a[0]*_b[2]; _result[2] = _a[0]*_b[1] - _a[1]*_b[0]; } inline float vec3Length(const float* _a) { return sqrtf(vec3Dot(_a, _a) ); } inline float vec3Norm(float* __restrict _result, const float* __restrict _a) { const float len = vec3Length(_a); const float invLen = 1.0f/len; _result[0] = _a[0] * invLen; _result[1] = _a[1] * invLen; _result[2] = _a[2] * invLen; return len; } #endif // FPU_MATH_H_HEADER_GUARD
1,501
848
<filename>dsa/WAA-TRD/accel/classification-pre_jpeg/xf_pp_pipeline_accel.cpp<gh_stars>100-1000 /* * Copyright 2019 Xilinx, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "xf_pp_pipeline_config.h" extern "C" { void pp_pipeline_accel(ap_uint<INPUT_PTR_WIDTH> *Y_img_inp, ap_uint<INPUT_PTR_WIDTH> *U_img_inp, ap_uint<INPUT_PTR_WIDTH> *V_img_inp, ap_uint<OUTPUT_PTR_WIDTH> *img_out, int rows_in, int cols_in, int rows_out, int cols_out, int rows_out_resize, int cols_out_resize, float params[3*T_CHANNELS], int th1, int th2) { // clang-format off #pragma HLS INTERFACE m_axi port=Y_img_inp offset=slave bundle=gmem1 #pragma HLS INTERFACE m_axi port=U_img_inp offset=slave bundle=gmem4 #pragma HLS INTERFACE m_axi port=V_img_inp offset=slave bundle=gmem5 #pragma HLS INTERFACE m_axi port=img_out offset=slave bundle=gmem2 #pragma HLS INTERFACE m_axi port=params offset=slave bundle=gmem3 #pragma HLS INTERFACE s_axilite port=Y_img_inp bundle=control #pragma HLS INTERFACE s_axilite port=U_img_inp bundle=control #pragma HLS INTERFACE s_axilite port=V_img_inp bundle=control #pragma HLS INTERFACE s_axilite port=img_out bundle=control #pragma HLS INTERFACE s_axilite port=params bundle=control #pragma HLS INTERFACE s_axilite port=rows_in bundle=control #pragma HLS INTERFACE s_axilite port=cols_in bundle=control #pragma HLS INTERFACE s_axilite port=rows_out bundle=control #pragma HLS INTERFACE s_axilite port=cols_out bundle=control #pragma HLS INTERFACE s_axilite port=rows_out_resize bundle=control #pragma HLS INTERFACE s_axilite port=cols_out_resize bundle=control #pragma HLS INTERFACE s_axilite port=th1 bundle=control #pragma HLS INTERFACE s_axilite port=th2 bundle=control #pragma HLS INTERFACE s_axilite port=return bundle=control // clang-format on xf::cv::Mat<XF_8UC1, HEIGHT, WIDTH, NPC> y_img_inmat(rows_in, cols_in); // clang-format off #pragma HLS stream variable=y_img_inmat.data depth=2 // clang-format on xf::cv::Mat<XF_8UC1, HEIGHT, WIDTH, NPC> u_img_inmat(rows_in, cols_in); // clang-format off #pragma HLS stream variable=u_img_inmat.data depth=2 // clang-format on xf::cv::Mat<XF_8UC1, HEIGHT, WIDTH, NPC> v_img_inmat(rows_in, cols_in); #pragma HLS stream variable=v_img_inmat.data depth=2 // clang-format on xf::cv::Mat<XF_8UC3, HEIGHT, WIDTH, NPC>imgInput0(rows_in, cols_in); // clang-format off #pragma HLS stream variable=imgInput0.data depth=2 // clang-format on xf::cv::Mat<TYPE, NEWHEIGHT, NEWWIDTH, NPC> out_mat(rows_out, cols_out); // clang-format off #pragma HLS stream variable=out_mat.data depth=2 // clang-format on hls::stream<ap_uint<INPUT_PTR_WIDTH> > resizeStrmout; int srcMat_cols_align_npc = ((out_mat.cols + (NPC - 1)) >> XF_BITSHIFT(NPC)) << XF_BITSHIFT(NPC); // clang-format off #pragma HLS DATAFLOW // clang-format on xf::cv::accel_utils obj; obj.Array2xfMat<INPUT_PTR_WIDTH,XF_8UC1,HEIGHT, WIDTH, NPC> (Y_img_inp, y_img_inmat); obj.Array2xfMat<INPUT_PTR_WIDTH,XF_8UC1,HEIGHT, WIDTH, NPC> (U_img_inp, u_img_inmat); obj.Array2xfMat<INPUT_PTR_WIDTH,XF_8UC1,HEIGHT, WIDTH, NPC> (V_img_inp, v_img_inmat); //xf::cv::yuv42rgb - YUV to RGB conversion xf::cv::yuv42rgb<XF_8UC1, XF_8UC3, HEIGHT, WIDTH, NPC>(y_img_inmat, u_img_inmat, v_img_inmat, imgInput0); //xf::cv::resize - Resize 8bit BGR image xf::cv::resize<INTERPOLATION,TYPE,HEIGHT,WIDTH,NEWHEIGHT,NEWWIDTH,NPC,MAXDOWNSCALE> (imgInput0, out_mat); //conversion of xf::Mat to stream obj.xfMat2hlsStrm<INPUT_PTR_WIDTH, TYPE, NEWHEIGHT, NEWWIDTH, NPC, (NEWWIDTH*NEWHEIGHT/8)>(out_mat, resizeStrmout, srcMat_cols_align_npc); //xf::cv::preProcess - Mean-Sub and scaling xf::cv::preProcess <INPUT_PTR_WIDTH, OUTPUT_PTR_WIDTH, T_CHANNELS, CPW, HEIGHT, WIDTH, NPC, PACK_MODE, X_WIDTH, ALPHA_WIDTH, BETA_WIDTH, GAMMA_WIDTH, OUT_WIDTH, X_IBITS, ALPHA_IBITS, BETA_IBITS, GAMMA_IBITS, OUT_IBITS, SIGNED_IN, OPMODE> (resizeStrmout, img_out, params, rows_out, cols_out, th1, th2); } }
2,033
3,227
<gh_stars>1000+ namespace CGAL { namespace Barycentric_coordinates { /*! \ingroup PkgBarycentricCoordinates2RefConcepts \cgalConcept A concept that describes the set of methods that should be defined for all coordinate models used to parameterize the class `Generalized_barycentric_coordinates_2`. \cgalHasModel - `Wachspress_2` - `Mean_value_2` - `Discrete_harmonic_2` \deprecated This part of the package is deprecated since the version 5.4 of \cgal. */ class BarycentricCoordinates_2 { public: /// \name Creation /// @{ /*! Creates a class that implements generalized barycentric coordinates for any query point that does not belong to the polygon's boundary. The polygon is given by a range of vertices of the type `Traits::Point_2` stored in a container of the type <a href="https://en.cppreference.com/w/cpp/container/vector">`std::vector`</a>. */ BarycentricCoordinates_2( const std::vector<Traits::Point_2>& vertices, const Traits& barycentric_traits) { } /// @} /// \name Functions /// @{ /*! A function that computes generalized barycentric coordinates without normalization that are called generalized barycentric weights (as fast as possible algorithm is used). Weights are computed with respect to a query point of the type `Traits::Point_2` and stored in the output iterator `output`. The function returns a pointer to the last stored element. */ boost::optional<OutputIterator> weights( const Traits::Point_2& query_point, OutputIterator& output) { } /*! A function that computes generalized barycentric coordinates on the bounded side of a polygon with one of two possible algorithms: one is precise and one is fast. The algorithm type is specified by the parameter `type_of_algorithm`. Coordinates are computed with respect to a query point of the type `Traits::Point_2` and stored in the output iterator `output`. The function returns a pointer to the last stored element. */ boost::optional<OutputIterator> coordinates_on_bounded_side( const Traits::Point_2& query_point, OutputIterator& output, const Type_of_algorithm type_of_algorithm) { } /*! A function that computes generalized barycentric coordinates on the unbounded side of a polygon with one of two possible algorithms: one is precise and one is fast. The algorithm type is specified by the parameter `type_of_algorithm`. Coordinates are computed with respect to a query point of the type `Traits::Point_2` and stored in the output iterator `output`. The function returns a pointer to the last stored element. */ boost::optional<OutputIterator> coordinates_on_unbounded_side( const Traits::Point_2& query_point, OutputIterator& output, const Type_of_algorithm type_of_algorithm) { } /// @} }; } // namespace Barycentric_coordinates } // namespace CGAL
874
886
<gh_stars>100-1000 package com.flowci.docker.domain; import lombok.AllArgsConstructor; import lombok.Getter; import lombok.Setter; @Getter @AllArgsConstructor(staticName = "of") public final class SSHOption { /** * Private rsa key for remote host access */ private final String privateKey; private final String remoteHost; private final String remoteUser; private final int port = 22; @Setter private int timeoutInSeconds = 10; }
163
3,631
/* * Copyright 2021 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.drools.testcoverage.functional; import java.util.ArrayList; import java.util.Collection; import java.util.List; import org.assertj.core.api.Assertions; import org.drools.testcoverage.common.listener.TrackingAgendaEventListener; import org.drools.testcoverage.common.util.KieBaseTestConfiguration; import org.drools.testcoverage.common.util.KieBaseUtil; import org.drools.testcoverage.common.util.TestConstants; import org.drools.testcoverage.common.util.TestParametersUtil; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.kie.api.KieBase; import org.kie.api.KieServices; import org.kie.api.command.Command; import org.kie.api.io.Resource; import org.kie.api.runtime.KieSession; @RunWith(Parameterized.class) public class ActivationGroupTest { private final KieBaseTestConfiguration kieBaseTestConfiguration; public ActivationGroupTest(final KieBaseTestConfiguration kieBaseTestConfiguration) { this.kieBaseTestConfiguration = kieBaseTestConfiguration; } @Parameterized.Parameters public static Collection<Object[]> getParameters() { return TestParametersUtil.getKieBaseConfigurations(); } /** * Only one rule from activation group fires. */ @Test public void basicTestActivationGroup() { TrackingAgendaEventListener listener = prepareKSession("basicActivationGroup"); Assertions.assertThat(listener.isRuleFired("basic1")).isFalse(); Assertions.assertThat(listener.isRuleFired("basic2")).isTrue(); Assertions.assertThat(listener.isRuleFired("basic3")).isFalse(); } @Test public void recursiveTestActivationGroup() { TrackingAgendaEventListener listener = prepareKSession("recursiveActivationGroup"); Assertions.assertThat(listener.isRuleFired("simplyRecursive1")).isFalse(); Assertions.assertThat(listener.isRuleFired("simplyRecursive2")).isTrue(); Assertions.assertThat(listener.isRuleFired("simplyRecursive3")).isTrue(); } @Test public void testActivationGroupWithDefaultSalience() { TrackingAgendaEventListener listener = prepareKSession("defaultSalienceActivationGroup"); Assertions.assertThat(listener.rulesCount()).isEqualTo(1); } @Test public void testActivationGroupRecursivelyWithDefaultSalience() { TrackingAgendaEventListener listener = prepareKSession("defaultSalienceWithRecursion"); Assertions.assertThat(listener.rulesCount()).isEqualTo(2); } private TrackingAgendaEventListener prepareKSession(String startingRule) { List<Command<?>> commands = new ArrayList<Command<?>>(); TrackingAgendaEventListener listener = new TrackingAgendaEventListener(); final KieSession ksession = getKieBaseForTest().newKieSession(); try { ksession.addEventListener(listener); ksession.insert(startingRule); ksession.fireAllRules(); } finally { ksession.dispose(); } return listener; } private KieBase getKieBaseForTest() { final Resource drlResource = KieServices.Factory.get().getResources().newClassPathResource("activation-group.drl", getClass()); return KieBaseUtil.getKieBaseFromKieModuleFromResources(TestConstants.PACKAGE_FUNCTIONAL, kieBaseTestConfiguration, drlResource); } }
1,488
1,408
<gh_stars>1000+ /* * Copyright (c) 2020, NVIDIA Corporation. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ #include <stdint.h> #include <arch_helpers.h> #include <lib/mmio.h> #include <plat/common/platform.h> #include <platform_def.h> u_register_t plat_get_stack_protector_canary(void) { u_register_t seed; /* * Ideally, a random number should be returned instead. As the * platform does not have any random number generator, this is * better than nothing, but not really secure. */ seed = mmio_read_32(TEGRA_MISC_BASE + HARDWARE_REVISION_OFFSET); seed <<= 32; seed |= mmio_read_32(TEGRA_TMRUS_BASE); return seed ^ read_cntpct_el0(); }
260
2,151
// Copyright 2018 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_ANDROID_METRICS_UKM_UTILS_FOR_TEST_H_ #define CHROME_BROWSER_ANDROID_METRICS_UKM_UTILS_FOR_TEST_H_ #include <stdint.h> #include "base/macros.h" namespace ukm { // The native part of java UkmUtilsForTest class. class UkmUtilsForTest { public: static bool IsEnabled(); static bool HasSourceWithId(SourceId source_id); static void RecordSourceWithId(SourceId source_id); private: // Should never be needed, as this class is setup to let it be a friend to // access UKM internals for testing. UkmUtilsForTest(); ~UkmUtilsForTest(); DISALLOW_COPY_AND_ASSIGN(UkmUtilsForTest); }; } // namespace ukm #endif // CHROME_BROWSER_ANDROID_METRICS_UKM_UTILS_FOR_TEST_H_
319
352
<filename>src/test/java/helloworld/behavioral/memento/HelloWorldMementoTest.java package helloworld.behavioral.memento; import org.junit.Test; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.is; /** * @author <EMAIL> */ public class HelloWorldMementoTest { @Test public void testHelloWorldMediator(){ HelloWorldMementoOriginator helloWorldMementoOriginator = new HelloWorldMementoOriginator(); HelloWorldMementoOriginator.Memento memento = helloWorldMementoOriginator.set("Hello Memento!").saveToMemento(); helloWorldMementoOriginator.set("Hello Whatever!"); assertThat(helloWorldMementoOriginator.helloWorld(), is("Hello Whatever!")); helloWorldMementoOriginator.restoreFromMemento(memento); assertThat(helloWorldMementoOriginator.helloWorld(),is("Hello Memento!")); } }
316
572
from app import Coluna def test_coluna_deve_ter_um_nome(): assert Coluna('Fazendo').nome == 'Fazendo'
44
348
{"nom":"Robertot","circ":"10ème circonscription","dpt":"Seine-Maritime","inscrits":174,"abs":73,"votants":101,"blancs":9,"nuls":4,"exp":88,"res":[{"nuance":"REM","nom":"<NAME>","voix":47},{"nuance":"FN","nom":"<NAME>","voix":41}]}
90
342
<filename>sentinel/server/__init__.py # coding=utf-8 from .server import APIServer
29
335
<filename>N/Noodle_verb.json<gh_stars>100-1000 { "word": "Noodle", "definitions": [ "Improvise or play casually on a musical instrument." ], "parts-of-speech": "Verb" }
83
1,463
#import <Foundation/Foundation.h> @class RCTBridge; @interface RNAppGlobals : NSObject { RCTBridge *appBridge; } @property (nonatomic, retain) RCTBridge *appBridge; + (id)sharedInstance; @end
75
412
#include <assert.h> int foo(int * A[]) { int * _A=A[0]; return _A[1]; } int main() { int Y[2]={42, 13}; int * A[1]={ 0 }; // should be Y instead of 0 for assertion to hold int x=foo(A); assert(x==13); return x; }
106
428
/** * Copyright 2008 - 2019 The Loon Game Engine Authors * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. * * @project loon * @author cping * @email:<EMAIL> * @version 0.5 */ package loon.utils; import java.util.Random; import loon.LSysException; import loon.LSystem; /** * 一个UUID生成器,作用是在没有UUID支持的环境获得UUID(为了算法通用,方便移植) * * ps:因为加入了游戏动态纹理内存占用量和纹理数量还有精灵桌面组件数量为因素,[在游戏运行时]是真随机值,不运行就是伪随机…… */ public class UUID { public static UUID convertUUID(String id) { if (id == null) { return null; } if (id.length() != 36) { return null; } long lo, hi; lo = hi = 0; for (int i = 0, j = 0; i < 36; ++j) { switch (i) { case 8: case 13: case 18: case 23: if (id.charAt(i) != '-') { throw new LSysException( "UUID has to be represented by the standard 36-char representation"); } ++i; } int curr; char c = id.charAt(i); if (c >= '0' && c <= '9') { curr = (c - '0'); } else if (c >= 'a' && c <= 'f') { curr = (c - 'a' + 10); } else if (c >= 'A' && c <= 'F') { curr = (c - 'A' + 10); } else { throw new LSysException( "Non-hex character at #" + i + ": '" + c + "' (value 0x" + CharUtils.toHex(c) + ")"); } curr = (curr << 4); c = id.charAt(++i); if (c >= '0' && c <= '9') { curr |= (c - '0'); } else if (c >= 'a' && c <= 'f') { curr |= (c - 'a' + 10); } else if (c >= 'A' && c <= 'F') { curr |= (c - 'A' + 10); } else { throw new LSysException( "Non-hex character at #" + i + ": '" + c + "' (value 0x" + CharUtils.toHex(c) + ")"); } if (j < 8) { hi = (hi << 8) | curr; } else { lo = (lo << 8) | curr; } ++i; } return new UUID(hi, lo); } private boolean dirty; private String uuidString; private long mostSigBits; private long leastSigBits; public UUID() { this(MathUtils.random); } public UUID(Random random) { this(random.nextLong(), random.nextLong()); } public UUID(byte[] bytes) { checkUUIDByteArray(bytes, 0); long r1 = getLong(bytes, 0); long r2 = getLong(bytes, 8); this.set(r1, r2); } public UUID(long r1, long r2) { this.set(r1, r2); } public void set(long r1, long r2) { mostSigBits = r1; leastSigBits = r2; dirty = true; } protected int getInt(byte[] buffer, int offset) { return (buffer[offset] << 24) | ((buffer[offset + 1] & 0xFF) << 16) | ((buffer[offset + 2] & 0xFF) << 8) | (buffer[offset + 3] & 0xFF); } protected long getLong(byte[] buffer, int offset) { long hi = ((long) getInt(buffer, offset)) << 32; long lo = (((long) getInt(buffer, offset + 4)) << 32) >>> 32; return hi | lo; } protected void checkUUIDByteArray(byte[] bytes, int offset) { if (bytes == null) { throw new LSysException("Invalid byte[] passed: can not be null"); } if (offset < 0) { throw new LSysException("Invalid offset (" + offset + ") passed: can not be negative"); } if ((offset + 16) > bytes.length) { throw new LSysException( "Invalid offset (" + offset + ") passed: not enough room in byte array (need 16 bytes)"); } } @Override public String toString() { if (dirty) { long millis = TimeUtils.millis(); mostSigBits = mostSigBits + ((millis / 2) + MathUtils.nextInt((int) leastSigBits)); leastSigBits = leastSigBits + ((millis / 3) + MathUtils.nextInt((int) mostSigBits)); mostSigBits += MathUtils.max(LSystem.getTextureMemSize(), MathUtils.random(10)) * MathUtils.random(986429531); leastSigBits += MathUtils.max(LSystem.countTexture(), MathUtils.random(10)) * MathUtils.random(895318642); mostSigBits += LSystem.allSpritesCount() * MathUtils.random(135799876); leastSigBits += LSystem.allDesktopCount() * MathUtils.random(246805432); uuidString = (digits(mostSigBits >> 32, 8) + "-" + digits(mostSigBits >> 16, 4) + "-" + digits(mostSigBits, 4) + "-" + digits(leastSigBits >> 48, 4) + "-" + digits(leastSigBits, 12)); dirty = false; } return uuidString; } private String toUnsignedString(long val) { int mag = 64 - MathUtils.longOfZeros(val); int chars = MathUtils.max(((mag + (4 - 1)) / 4), 1); char[] buf = new char[chars]; CharUtils.toUnsignedLong(val, 4, buf, 0, chars); return new String(buf); } private String digits(long val, int digits) { long hi = 1L << (digits * 4); return toUnsignedString(hi | (val & (hi - 1))).substring(1); } @Override public int hashCode() { long hilo = mostSigBits ^ leastSigBits; return ((int) (hilo >> 32)) ^ (int) hilo; } }
2,247
582
/** * This program and the accompanying materials * are made available under the terms of the License * which accompanies this distribution in the file LICENSE.txt */ package com.archimatetool.canvas.dnd; import org.eclipse.gef.EditPartViewer; import org.eclipse.swt.dnd.URLTransfer; /** * URL DnD listener * * @author <NAME> */ public class URLTransferDropTargetListener extends FileTransferDropTargetListener { public URLTransferDropTargetListener(EditPartViewer viewer) { super(viewer); setTransfer(URLTransfer.getInstance()); } }
181
1,380
<filename>app/src/main/java/com/github/jorgecastilloprz/fabprogresscircle/interactor/MockAction.java /* * Copyright (C) 2015 <NAME> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.github.jorgecastilloprz.fabprogresscircle.interactor; import com.github.jorgecastilloprz.fabprogresscircle.executor.Interactor; import com.github.jorgecastilloprz.fabprogresscircle.executor.MainThread; import com.github.jorgecastilloprz.fabprogresscircle.executor.MainThreadImpl; /** * @author <NAME> */ public class MockAction implements Interactor { private MockActionCallback callback; private MainThread mainThread; public MockAction(MockActionCallback callback) { this.callback = callback; this.mainThread = new MainThreadImpl(); } @Override public void run() { mockLoadingTime(); notifyActionComplete(); } private void mockLoadingTime() { try { Thread.sleep(3000); } catch (InterruptedException e) { //Empty } } private void notifyActionComplete() { mainThread.post(new Runnable() { @Override public void run() { callback.onMockActionComplete(); } }); } }
514
458
// Copyright 2015-2021 Swim Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package swim.codec; import java.nio.ByteBuffer; final class Base64Writer extends Writer<Object, Object> { final Base64 base64; final Object value; final ByteBuffer input; final int index; final int limit; final int step; Base64Writer(Base64 base64, Object value, ByteBuffer input, int index, int limit, int step) { this.base64 = base64; this.value = value; this.input = input; this.index = index; this.limit = limit; this.step = step; } Base64Writer(Base64 base64, Object value, ByteBuffer input) { this(base64, value, input, input.position(), input.limit(), 1); } Base64Writer(Base64 base64, Object value, byte[] input) { this(base64, value, ByteBuffer.wrap(input)); } Base64Writer(Base64 base64) { this(base64, null, null, 0, 0, 0); } @Override public Writer<Object, Object> feed(Object value) { if (value instanceof ByteBuffer) { return new Base64Writer(this.base64, null, (ByteBuffer) value); } else if (value instanceof byte[]) { return new Base64Writer(this.base64, null, (byte[]) value); } else { throw new IllegalArgumentException(value.toString()); } } @Override public Writer<Object, Object> pull(Output<?> output) { return Base64Writer.write(output, this.base64, this.value, this.input, this.index, this.limit, this.step); } static Writer<Object, Object> write(Output<?> output, Base64 base64, Object value, ByteBuffer input, int index, int limit, int step) { while (index + 2 < limit && output.isCont()) { final int x = input.get(index) & 0xff; final int y = input.get(index + 1) & 0xff; final int z = input.get(index + 2) & 0xff; if (step == 1 && output.isCont()) { output = output.write(base64.encodeDigit(x >>> 2)); step = 2; } if (step == 2 && output.isCont()) { output = output.write(base64.encodeDigit(((x << 4) | (y >>> 4)) & 0x3f)); step = 3; } if (step == 3 && output.isCont()) { output = output.write(base64.encodeDigit(((y << 2) | (z >>> 6)) & 0x3f)); step = 4; } if (step == 4 && output.isCont()) { output = output.write(base64.encodeDigit(z & 0x3f)); index += 3; step = 1; } } if (index + 1 < limit && output.isCont()) { final int x = input.get(index) & 0xff; final int y = input.get(index + 1) & 0xff; if (step == 1 && output.isCont()) { output = output.write(base64.encodeDigit(x >>> 2)); step = 2; } if (step == 2 && output.isCont()) { output = output.write(base64.encodeDigit(((x << 4) | (y >>> 4)) & 0x3f)); step = 3; } if (step == 3 && output.isCont()) { output = output.write(base64.encodeDigit((y << 2) & 0x3f)); step = 4; } if (step == 4) { if (!base64.isPadded()) { index += 2; } else if (output.isCont()) { output = output.write('='); index += 2; } } } else if (index < limit && output.isCont()) { final int x = input.get(index) & 0xff; if (step == 1 && output.isCont()) { output = output.write(base64.encodeDigit(x >>> 2)); step = 2; } if (step == 2 && output.isCont()) { output = output.write(base64.encodeDigit((x << 4) & 0x3f)); step = 3; } if (step == 3) { if (!base64.isPadded()) { index += 1; } else if (output.isCont()) { output = output.write('='); step = 4; } } if (step == 4 && output.isCont()) { output = output.write('='); index += 1; } } if (index == limit) { return Writer.done(value); } else if (output.isDone()) { return Writer.error(new WriterException("truncated")); } else if (output.isError()) { return Writer.error(output.trap()); } return new Base64Writer(base64, value, input, index, limit, step); } static Writer<?, ?> write(Output<?> output, Base64 base64, Object value, ByteBuffer input) { return Base64Writer.write(output, base64, value, input, input.position(), input.limit(), 1); } static Writer<?, ?> write(Output<?> output, Base64 base64, Object value, byte[] input) { return Base64Writer.write(output, base64, value, ByteBuffer.wrap(input)); } }
2,086
8,772
<reponame>dgusoff/cas package org.apereo.cas.adaptors.x509.authentication; import lombok.Getter; import lombok.RequiredArgsConstructor; import java.security.GeneralSecurityException; import java.time.ZonedDateTime; /** * Exception describing an expired CRL condition. * * @author <NAME> * @since 3.4.6 */ @Getter @RequiredArgsConstructor public class ExpiredCRLException extends GeneralSecurityException { private static final long serialVersionUID = 5157864033250359972L; /** * Identifier/name of CRL. */ private final String id; /** * CRL expiration date. */ private final ZonedDateTime expirationDate; /** * Leniency of expiration. */ private final int leniency; /** * Creates a new instance describing a CRL that expired on the given date. * * @param identifier Identifier or name that describes CRL. * @param expirationDate CRL expiration date. */ public ExpiredCRLException(final String identifier, final ZonedDateTime expirationDate) { this(identifier, expirationDate, 0); } @Override public String getMessage() { if (this.leniency > 0) { return String.format("CRL %s expired on %s and is beyond the leniency period of %s seconds.", this.id, this.expirationDate, this.leniency); } return String.format("CRL %s expired on %s", this.id, this.expirationDate); } }
518
335
{ "word": "Entertain", "definitions": [ "Provide (someone) with amusement or enjoyment.", "Receive (someone) as a guest and provide them with food and drink.", "Give attention or consideration to (an idea or feeling)" ], "parts-of-speech": "Verb" }
107
1,169
// -*- mode: java; c-basic-offset: 2; -*- // Copyright 2017 MIT, All rights reserved // Released under the Apache License, Version 2.0 // http://www.apache.org/licenses/LICENSE-2.0 package com.google.appinventor.client.utils; import com.google.appinventor.client.widgets.boxes.Box; import com.google.gwt.user.client.ui.Button; import com.google.gwt.user.client.ui.ClickListener; import com.google.gwt.user.client.ui.DialogBox; import com.google.gwt.user.client.ui.FlowPanel; import com.google.gwt.user.client.ui.HTML; import com.google.gwt.user.client.ui.VerticalPanel; import com.google.gwt.user.client.ui.Widget; /** * A Utility for Dialog Boxes * * Put up a Dialog Box in the middle of the screen. It must have an * "OK" button and can optionally have a "Cancel" button. Also takes * an object to call on OK or Cancel after the OK or Cancel button is * pressed. This dialog is modal, locking out other activity. * * @author <EMAIL> (<NAME>) */ public class MessageDialog { public interface Actions { /** * Action to perform when OK button is pressed. * Note: the dialog box itself is already dismissed */ public void onOK(); /** * Action to peform when the Cancel button is pressed. * Note: the dialog box itself is already dismissed */ public void onCancel(); } private MessageDialog() { } /** * Put up a modal dialog box. * * @param title Title for the dialog, already internationalized * @param message Message box content, already internationalized * @param OK String for OK button, already internationalized * @param Cancel String for Cancel button, null if non, internationalized * @param actions Actions object to call upon completion, can be null */ public static void messageDialog(String title, String message, String OK, String Cancel, final Actions actions) { final DialogBox dialogBox = new DialogBox(false, true); // DialogBox(autohide, modal) dialogBox.setStylePrimaryName("ode-DialogBox"); dialogBox.setText(title); dialogBox.setHeight("100px"); dialogBox.setWidth("400px"); dialogBox.setGlassEnabled(true); dialogBox.setAnimationEnabled(true); dialogBox.center(); VerticalPanel DialogBoxContents = new VerticalPanel(); HTML messageHtml = new HTML("<p>" + message + "</p>"); messageHtml.setStyleName("DialogBox-message"); FlowPanel holder = new FlowPanel(); Button okButton = new Button(OK); okButton.addClickListener(new ClickListener() { public void onClick(Widget sender) { dialogBox.hide(); if (actions != null) actions.onOK(); } }); holder.add(okButton); if (Cancel != null) { Button cancelButton = new Button(Cancel); cancelButton.addClickListener(new ClickListener() { @Override public void onClick(Widget sender) { dialogBox.hide(); if (actions != null) actions.onCancel(); } }); holder.add(cancelButton); } DialogBoxContents.add(messageHtml); DialogBoxContents.add(holder); dialogBox.setWidget(DialogBoxContents); dialogBox.show(); } }
1,119
1,062
/** * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <string> #include <stdexcept> #include <map> #include "metadata/metadata_api.h" #include "util/util_api.h" namespace MR4C { class MetadataTypes { friend class MetadataElement; private: std::map<std::string,MetadataElement::Type> m_stringToEnum; std::map<MetadataElement::Type,std::string> m_enumToString; static MetadataTypes& instance() { static MetadataTypes s_instance; return s_instance; } MetadataTypes() { mapType(MetadataElement::FIELD, "FIELD"); mapType(MetadataElement::ARRAY, "ARRAY"); mapType(MetadataElement::LIST, "LIST"); mapType(MetadataElement::MAP, "MAP"); mapType(MetadataElement::KEY, "KEY"); } // making sure these are private MetadataTypes(const MetadataTypes& types); MetadataTypes& operator=(const MetadataTypes& types); void mapType(MetadataElement::Type type, const std::string& strType) { m_stringToEnum[strType] = type; m_enumToString[type] = strType; } MetadataElement::Type enumFromString(std::string strType) { if ( m_stringToEnum.count(strType)==0 ) { MR4C_THROW(std::invalid_argument, "No metadata element type named [" << strType << "]"); } return m_stringToEnum[strType]; } std::string enumToString(MetadataElement::Type type) { if ( m_enumToString.count(type)==0 ) { MR4C_THROW(std::invalid_argument, "No metadata element type enum = " << type); } return m_enumToString[type]; } void validateCast( MetadataElement::Type actualType, MetadataElement::Type targetType) { if ( actualType!=targetType ) { MR4C_THROW(std::runtime_error, "Illegal metadata cast attempt: " << enumToString(actualType) << " to " << enumToString(targetType)); } } }; MetadataElement::Type MetadataElement::enumFromString(std::string strType) { return MetadataTypes::instance().enumFromString(strType); } std::string MetadataElement::enumToString(MetadataElement::Type type) { return MetadataTypes::instance().enumToString(type); } template<typename T> const T* MetadataElement::castElement(const MetadataElement* element, Type type) { MetadataTypes::instance().validateCast(element->getMetadataElementType(), type); return dynamic_cast<const T*>(element); } template<typename T> T* MetadataElement::castElement(MetadataElement* element, Type type) { MetadataTypes::instance().validateCast(element->getMetadataElementType(), type); return dynamic_cast<T*>(element); } template<typename T> const T& MetadataElement::castElement(const MetadataElement& element, Type type) { MetadataTypes::instance().validateCast(element.getMetadataElementType(), type); return dynamic_cast<const T&>(element); } template<typename T> T& MetadataElement::castElement(MetadataElement& element, Type type) { MetadataTypes::instance().validateCast(element.getMetadataElementType(), type); return dynamic_cast<T&>(element); } // These are all the possible instantiations of the above templates // They are included here to avoid having to put implementations into header files template const MetadataField* MetadataElement::castElement<MetadataField>(const MetadataElement* element, Type type); template const MetadataArray* MetadataElement::castElement<MetadataArray>(const MetadataElement* element, Type type); template const MetadataKey* MetadataElement::castElement<MetadataKey>(const MetadataElement* element, Type type); template const MetadataList* MetadataElement::castElement<MetadataList>(const MetadataElement* element, Type type); template const MetadataMap* MetadataElement::castElement<MetadataMap>(const MetadataElement* element, Type type); template MetadataField* MetadataElement::castElement<MetadataField>(MetadataElement* element, Type type); template MetadataArray* MetadataElement::castElement<MetadataArray>(MetadataElement* element, Type type); template MetadataKey* MetadataElement::castElement<MetadataKey>(MetadataElement* element, Type type); template MetadataList* MetadataElement::castElement<MetadataList>(MetadataElement* element, Type type); template MetadataMap* MetadataElement::castElement<MetadataMap>(MetadataElement* element, Type type); template const MetadataField& MetadataElement::castElement<MetadataField>(const MetadataElement& element, Type type); template const MetadataArray& MetadataElement::castElement<MetadataArray>(const MetadataElement& element, Type type); template const MetadataKey& MetadataElement::castElement<MetadataKey>(const MetadataElement& element, Type type); template const MetadataList& MetadataElement::castElement<MetadataList>(const MetadataElement& element, Type type); template const MetadataMap& MetadataElement::castElement<MetadataMap>(const MetadataElement& element, Type type); template MetadataField& MetadataElement::castElement<MetadataField>(MetadataElement& element, Type type); template MetadataArray& MetadataElement::castElement<MetadataArray>(MetadataElement& element, Type type); template MetadataKey& MetadataElement::castElement<MetadataKey>(MetadataElement& element, Type type); template MetadataList& MetadataElement::castElement<MetadataList>(MetadataElement& element, Type type); template MetadataMap& MetadataElement::castElement<MetadataMap>(MetadataElement& element, Type type); }
1,745
1,076
#include "defines.h" #include "IDirect3D9Proxy.h" #pragma pack(1) HINSTANCE hlThis = 0; HINSTANCE hlD3D9 = 0; FARPROC origProc[15] = {0}; BOOL WINAPI DllMain(HINSTANCE hInst, DWORD reason, LPVOID){ if (reason == DLL_PROCESS_ATTACH){ hlThis = hInst; //Get path to the original d3d9.dll char infoBuf[MAX_PATH]; GetSystemDirectory(infoBuf, MAX_PATH); strcat_s(infoBuf, MAX_PATH, "\\d3d9.dll"); //And load it... hlD3D9 = LoadLibrary(infoBuf); if (!hlD3D9){ MessageBox(NULL, "D3D9 Proxy DLL error", "Cannot find original d3d9.dll in the system directory!", MB_OK | MB_ICONERROR); return FALSE; } //Load original functions origProc[0] = GetProcAddress(hlD3D9, "D3DPERF_BeginEvent"); origProc[1] = GetProcAddress(hlD3D9, "D3DPERF_EndEvent"); origProc[2] = GetProcAddress(hlD3D9, "D3DPERF_GetStatus"); origProc[3] = GetProcAddress(hlD3D9, "D3DPERF_QueryRepeatFrame"); origProc[4] = GetProcAddress(hlD3D9, "D3DPERF_SetMarker"); origProc[5] = GetProcAddress(hlD3D9, "D3DPERF_SetOptions"); origProc[6] = GetProcAddress(hlD3D9, "D3DPERF_SetRegion"); origProc[7] = GetProcAddress(hlD3D9, "DebugSetLevel"); origProc[8] = GetProcAddress(hlD3D9, "DebugSetMute"); origProc[9] = GetProcAddress(hlD3D9, "Direct3DCreate9"); origProc[10] = GetProcAddress(hlD3D9, "Direct3DCreate9Ex"); origProc[11] = GetProcAddress(hlD3D9, "Direct3DShaderValidatorCreate9"); origProc[12] = GetProcAddress(hlD3D9, "PSGPError"); origProc[13] = GetProcAddress(hlD3D9, "PSGPSampleTexture"); }else if (reason == DLL_PROCESS_DETACH){ FreeLibrary(hlD3D9); } return TRUE; } //Direct3DCreate9 extern "C" IDirect3D9* WINAPI __ProxyFunc9(UINT SDKVersion){ //Recall original function typedef IDirect3D9* (WINAPI* Direct3DCreate9Func)(UINT sdkver); Direct3DCreate9Func origDirect3DCreate9 = (Direct3DCreate9Func)GetProcAddress(hlD3D9, "Direct3DCreate9"); IDirect3D9* res = origDirect3DCreate9(SDKVersion); return new IDirect3D9Proxy(res); } //Direct3DCreate9Ex extern "C" __declspec(naked) void __stdcall __ProxyFunc10(){ __asm{ jmp origProc[10*4]; } } extern "C" __declspec(naked) void __stdcall __ProxyFunc0(){ __asm{ jmp origProc[0*4]; } } extern "C" __declspec(naked) void __stdcall __ProxyFunc1(){ __asm{ jmp origProc[1*4]; } } extern "C" __declspec(naked) void __stdcall __ProxyFunc2(){ __asm{ jmp origProc[2*4]; } } extern "C" __declspec(naked) void __stdcall __ProxyFunc3(){ __asm{ jmp origProc[3*4]; } } extern "C" __declspec(naked) void __stdcall __ProxyFunc4(){ __asm{ jmp origProc[4*4]; } } extern "C" __declspec(naked) void __stdcall __ProxyFunc5(){ __asm{ jmp origProc[5*4]; } } extern "C" __declspec(naked) void __stdcall __ProxyFunc6(){ __asm{ jmp origProc[6*4]; } } extern "C" __declspec(naked) void __stdcall __ProxyFunc7(){ __asm{ jmp origProc[7*4]; } } extern "C" __declspec(naked) void __stdcall __ProxyFunc8(){ __asm{ jmp origProc[8*4]; } } extern "C" __declspec(naked) void __stdcall __ProxyFunc11(){ __asm{ jmp origProc[11*4]; } } extern "C" __declspec(naked) void __stdcall __ProxyFunc12(){ __asm{ jmp origProc[12*4]; } } extern "C" __declspec(naked) void __stdcall __ProxyFunc13(){ __asm{ jmp origProc[13*4]; } }
1,504
1,279
<filename>tests/translator/output/error_http_api_event_multiple_same_path.json<gh_stars>1000+ { "errors": [ { "errorMessage": "Resource with id [HttpApiFunction2] is invalid. Event with id [Api2] is invalid. API method 'x-amazon-apigateway-any-method' defined multiple times for path '$default'." } ], "errorMessage": "Invalid Serverless Application Specification document. Number of errors found: 1. Resource with id [HttpApiFunction2] is invalid. Event with id [Api2] is invalid. API method 'x-amazon-apigateway-any-method' defined multiple times for path '$default'." }
184
310
{ "name": "<NAME>", "description": "A ham radio.", "url": "http://www.elecraft.com/k2_page.htm" }
44
2,107
/*- * BSD LICENSE * * Copyright (c) Intel Corporation. * All rights reserved. * * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "spdk/stdinc.h" #include "spdk/env.h" #include "spdk/util.h" #include "spdk/memory.h" #include "spdk/likely.h" #include "spdk/log.h" #include "spdk_internal/idxd.h" #include "idxd.h" struct spdk_user_idxd_device { struct spdk_idxd_device idxd; struct spdk_pci_device *device; int sock_id; struct idxd_registers registers; void *reg_base; uint32_t wqcfg_offset; uint32_t grpcfg_offset; uint32_t ims_offset; uint32_t msix_perm_offset; uint32_t perfmon_offset; }; typedef bool (*spdk_idxd_probe_cb)(void *cb_ctx, struct spdk_pci_device *pci_dev); #define __user_idxd(idxd) (struct spdk_user_idxd_device *)idxd pthread_mutex_t g_driver_lock = PTHREAD_MUTEX_INITIALIZER; static struct device_config g_user_dev_cfg = {}; static struct spdk_idxd_device *idxd_attach(struct spdk_pci_device *device); static uint32_t _idxd_read_4(struct spdk_idxd_device *idxd, uint32_t offset) { struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd); return spdk_mmio_read_4((uint32_t *)(user_idxd->reg_base + offset)); } static void _idxd_write_4(struct spdk_idxd_device *idxd, uint32_t offset, uint32_t value) { struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd); spdk_mmio_write_4((uint32_t *)(user_idxd->reg_base + offset), value); } static uint64_t _idxd_read_8(struct spdk_idxd_device *idxd, uint32_t offset) { struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd); return spdk_mmio_read_8((uint64_t *)(user_idxd->reg_base + offset)); } static void _idxd_write_8(struct spdk_idxd_device *idxd, uint32_t offset, uint64_t value) { struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd); spdk_mmio_write_8((uint64_t *)(user_idxd->reg_base + offset), value); } static void user_idxd_set_config(struct device_config *dev_cfg, uint32_t config_num) { g_user_dev_cfg = *dev_cfg; } /* Used for control commands, not for descriptor submission. */ static int idxd_wait_cmd(struct spdk_idxd_device *idxd, int _timeout) { uint32_t timeout = _timeout; union idxd_cmdsts_reg cmd_status = {}; cmd_status.raw = _idxd_read_4(idxd, IDXD_CMDSTS_OFFSET); while (cmd_status.active && --timeout) { usleep(1); cmd_status.raw = _idxd_read_4(idxd, IDXD_CMDSTS_OFFSET); } /* Check for timeout */ if (timeout == 0 && cmd_status.active) { SPDK_ERRLOG("Command timeout, waited %u\n", _timeout); return -EBUSY; } /* Check for error */ if (cmd_status.err) { SPDK_ERRLOG("Command status reg reports error 0x%x\n", cmd_status.err); return -EINVAL; } return 0; } static int idxd_unmap_pci_bar(struct spdk_idxd_device *idxd, int bar) { int rc = 0; void *addr = NULL; struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd); if (bar == IDXD_MMIO_BAR) { addr = (void *)user_idxd->reg_base; } else if (bar == IDXD_WQ_BAR) { addr = (void *)idxd->portals; } if (addr) { rc = spdk_pci_device_unmap_bar(user_idxd->device, 0, addr); } return rc; } static int idxd_map_pci_bars(struct spdk_idxd_device *idxd) { int rc; void *addr; uint64_t phys_addr, size; struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd); rc = spdk_pci_device_map_bar(user_idxd->device, IDXD_MMIO_BAR, &addr, &phys_addr, &size); if (rc != 0 || addr == NULL) { SPDK_ERRLOG("pci_device_map_range failed with error code %d\n", rc); return -1; } user_idxd->reg_base = addr; rc = spdk_pci_device_map_bar(user_idxd->device, IDXD_WQ_BAR, &addr, &phys_addr, &size); if (rc != 0 || addr == NULL) { SPDK_ERRLOG("pci_device_map_range failed with error code %d\n", rc); rc = idxd_unmap_pci_bar(idxd, IDXD_MMIO_BAR); if (rc) { SPDK_ERRLOG("unable to unmap MMIO bar\n"); } return -EINVAL; } idxd->portals = addr; return 0; } static int idxd_reset_dev(struct spdk_idxd_device *idxd) { int rc; _idxd_write_4(idxd, IDXD_CMD_OFFSET, IDXD_RESET_DEVICE << IDXD_CMD_SHIFT); rc = idxd_wait_cmd(idxd, IDXD_REGISTER_TIMEOUT_US); if (rc < 0) { SPDK_ERRLOG("Error resetting device %u\n", rc); } return rc; } /* * Build group config based on getting info from the device combined * with the defined configuration. Once built, it is written to the * device. */ static int idxd_group_config(struct spdk_idxd_device *idxd) { int i; uint64_t base_offset; struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd); assert(g_user_dev_cfg.num_groups <= user_idxd->registers.groupcap.num_groups); idxd->groups = calloc(user_idxd->registers.groupcap.num_groups, sizeof(struct idxd_group)); if (idxd->groups == NULL) { SPDK_ERRLOG("Failed to allocate group memory\n"); return -ENOMEM; } assert(g_user_dev_cfg.total_engines <= user_idxd->registers.enginecap.num_engines); for (i = 0; i < g_user_dev_cfg.total_engines; i++) { idxd->groups[i % g_user_dev_cfg.num_groups].grpcfg.engines |= (1 << i); } assert(g_user_dev_cfg.total_wqs <= user_idxd->registers.wqcap.num_wqs); for (i = 0; i < g_user_dev_cfg.total_wqs; i++) { idxd->groups[i % g_user_dev_cfg.num_groups].grpcfg.wqs[0] |= (1 << i); } for (i = 0; i < g_user_dev_cfg.num_groups; i++) { idxd->groups[i].idxd = idxd; idxd->groups[i].id = i; /* Divide BW tokens evenly */ idxd->groups[i].grpcfg.flags.tokens_allowed = user_idxd->registers.groupcap.total_tokens / g_user_dev_cfg.num_groups; } /* * Now write the group config to the device for all groups. We write * to the max number of groups in order to 0 out the ones we didn't * configure. */ for (i = 0 ; i < user_idxd->registers.groupcap.num_groups; i++) { base_offset = user_idxd->grpcfg_offset + i * 64; /* GRPWQCFG, work queues config */ _idxd_write_8(idxd, base_offset, idxd->groups[i].grpcfg.wqs[0]); /* GRPENGCFG, engine config */ _idxd_write_8(idxd, base_offset + CFG_ENGINE_OFFSET, idxd->groups[i].grpcfg.engines); /* GRPFLAGS, flags config */ _idxd_write_8(idxd, base_offset + CFG_FLAG_OFFSET, idxd->groups[i].grpcfg.flags.raw); } return 0; } /* * Build work queue (WQ) config based on getting info from the device combined * with the defined configuration. Once built, it is written to the device. */ static int idxd_wq_config(struct spdk_user_idxd_device *user_idxd) { int i, j; struct idxd_wq *queue; struct spdk_idxd_device *idxd = &user_idxd->idxd; u_int32_t wq_size = user_idxd->registers.wqcap.total_wq_size / g_user_dev_cfg.total_wqs; SPDK_NOTICELOG("Total ring slots available space 0x%x, so per work queue is 0x%x\n", user_idxd->registers.wqcap.total_wq_size, wq_size); assert(g_user_dev_cfg.total_wqs <= IDXD_MAX_QUEUES); assert(g_user_dev_cfg.total_wqs <= user_idxd->registers.wqcap.num_wqs); assert(LOG2_WQ_MAX_BATCH <= user_idxd->registers.gencap.max_batch_shift); assert(LOG2_WQ_MAX_XFER <= user_idxd->registers.gencap.max_xfer_shift); idxd->total_wq_size = user_idxd->registers.wqcap.total_wq_size; /* Spread the channels we allow per device based on the total number of WQE to try * and achieve optimal performance for common cases. */ idxd->chan_per_device = (idxd->total_wq_size >= 128) ? 8 : 4; idxd->queues = calloc(1, user_idxd->registers.wqcap.num_wqs * sizeof(struct idxd_wq)); if (idxd->queues == NULL) { SPDK_ERRLOG("Failed to allocate queue memory\n"); return -ENOMEM; } for (i = 0; i < g_user_dev_cfg.total_wqs; i++) { queue = &user_idxd->idxd.queues[i]; queue->wqcfg.wq_size = wq_size; queue->wqcfg.mode = WQ_MODE_DEDICATED; queue->wqcfg.max_batch_shift = LOG2_WQ_MAX_BATCH; queue->wqcfg.max_xfer_shift = LOG2_WQ_MAX_XFER; queue->wqcfg.wq_state = WQ_ENABLED; queue->wqcfg.priority = WQ_PRIORITY_1; /* Not part of the config struct */ queue->idxd = &user_idxd->idxd; queue->group = &idxd->groups[i % g_user_dev_cfg.num_groups]; } /* * Now write the work queue config to the device for all wq space */ for (i = 0 ; i < user_idxd->registers.wqcap.num_wqs; i++) { queue = &idxd->queues[i]; for (j = 0 ; j < WQCFG_NUM_DWORDS; j++) { _idxd_write_4(idxd, user_idxd->wqcfg_offset + i * 32 + j * 4, queue->wqcfg.raw[j]); } } return 0; } static int idxd_device_configure(struct spdk_user_idxd_device *user_idxd) { int i, rc = 0; union idxd_offsets_register offsets_reg; union idxd_genstatus_register genstatus_reg; struct spdk_idxd_device *idxd = &user_idxd->idxd; /* * Map BAR0 and BAR2 */ rc = idxd_map_pci_bars(idxd); if (rc) { return rc; } /* * Reset the device */ rc = idxd_reset_dev(idxd); if (rc) { goto err_reset; } /* * Read in config registers */ user_idxd->registers.version = _idxd_read_4(idxd, IDXD_VERSION_OFFSET); user_idxd->registers.gencap.raw = _idxd_read_8(idxd, IDXD_GENCAP_OFFSET); user_idxd->registers.wqcap.raw = _idxd_read_8(idxd, IDXD_WQCAP_OFFSET); user_idxd->registers.groupcap.raw = _idxd_read_8(idxd, IDXD_GRPCAP_OFFSET); user_idxd->registers.enginecap.raw = _idxd_read_8(idxd, IDXD_ENGCAP_OFFSET); for (i = 0; i < IDXD_OPCAP_WORDS; i++) { user_idxd->registers.opcap.raw[i] = _idxd_read_8(idxd, i * sizeof(uint64_t) + IDXD_OPCAP_OFFSET); } offsets_reg.raw[0] = _idxd_read_8(idxd, IDXD_TABLE_OFFSET); offsets_reg.raw[1] = _idxd_read_8(idxd, IDXD_TABLE_OFFSET + sizeof(uint64_t)); user_idxd->grpcfg_offset = offsets_reg.grpcfg * IDXD_TABLE_OFFSET_MULT; user_idxd->wqcfg_offset = offsets_reg.wqcfg * IDXD_TABLE_OFFSET_MULT; user_idxd->ims_offset = offsets_reg.ims * IDXD_TABLE_OFFSET_MULT; user_idxd->msix_perm_offset = offsets_reg.msix_perm * IDXD_TABLE_OFFSET_MULT; user_idxd->perfmon_offset = offsets_reg.perfmon * IDXD_TABLE_OFFSET_MULT; /* * Configure groups and work queues. */ rc = idxd_group_config(idxd); if (rc) { goto err_group_cfg; } rc = idxd_wq_config(user_idxd); if (rc) { goto err_wq_cfg; } /* * Enable the device */ genstatus_reg.raw = _idxd_read_4(idxd, IDXD_GENSTATUS_OFFSET); assert(genstatus_reg.state == IDXD_DEVICE_STATE_DISABLED); _idxd_write_4(idxd, IDXD_CMD_OFFSET, IDXD_ENABLE_DEV << IDXD_CMD_SHIFT); rc = idxd_wait_cmd(idxd, IDXD_REGISTER_TIMEOUT_US); genstatus_reg.raw = _idxd_read_4(idxd, IDXD_GENSTATUS_OFFSET); if ((rc < 0) || (genstatus_reg.state != IDXD_DEVICE_STATE_ENABLED)) { rc = -EINVAL; SPDK_ERRLOG("Error enabling device %u\n", rc); goto err_device_enable; } genstatus_reg.raw = spdk_mmio_read_4((uint32_t *)(user_idxd->reg_base + IDXD_GENSTATUS_OFFSET)); assert(genstatus_reg.state == IDXD_DEVICE_STATE_ENABLED); /* * Enable the work queues that we've configured */ for (i = 0; i < g_user_dev_cfg.total_wqs; i++) { _idxd_write_4(idxd, IDXD_CMD_OFFSET, (IDXD_ENABLE_WQ << IDXD_CMD_SHIFT) | i); rc = idxd_wait_cmd(idxd, IDXD_REGISTER_TIMEOUT_US); if (rc < 0) { SPDK_ERRLOG("Error enabling work queues 0x%x\n", rc); goto err_wq_enable; } } if ((rc == 0) && (genstatus_reg.state == IDXD_DEVICE_STATE_ENABLED)) { SPDK_NOTICELOG("Device enabled, version 0x%x gencap: 0x%lx\n", user_idxd->registers.version, user_idxd->registers.gencap.raw); } return rc; err_wq_enable: err_device_enable: free(idxd->queues); err_wq_cfg: free(idxd->groups); err_group_cfg: err_reset: idxd_unmap_pci_bar(idxd, IDXD_MMIO_BAR); idxd_unmap_pci_bar(idxd, IDXD_MMIO_BAR); return rc; } static void user_idxd_device_destruct(struct spdk_idxd_device *idxd) { struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd); idxd_unmap_pci_bar(idxd, IDXD_MMIO_BAR); idxd_unmap_pci_bar(idxd, IDXD_WQ_BAR); free(idxd->groups); free(idxd->queues); spdk_pci_device_detach(user_idxd->device); free(user_idxd); } struct idxd_enum_ctx { spdk_idxd_probe_cb probe_cb; spdk_idxd_attach_cb attach_cb; void *cb_ctx; }; /* This function must only be called while holding g_driver_lock */ static int idxd_enum_cb(void *ctx, struct spdk_pci_device *pci_dev) { struct idxd_enum_ctx *enum_ctx = ctx; struct spdk_idxd_device *idxd; if (enum_ctx->probe_cb(enum_ctx->cb_ctx, pci_dev)) { idxd = idxd_attach(pci_dev); if (idxd == NULL) { SPDK_ERRLOG("idxd_attach() failed\n"); return -EINVAL; } enum_ctx->attach_cb(enum_ctx->cb_ctx, idxd); } return 0; } static bool probe_cb(void *cb_ctx, struct spdk_pci_device *pci_dev) { struct spdk_pci_addr pci_addr = spdk_pci_device_get_addr(pci_dev); SPDK_NOTICELOG( " Found matching device at %04x:%02x:%02x.%x vendor:0x%04x device:0x%04x\n", pci_addr.domain, pci_addr.bus, pci_addr.dev, pci_addr.func, spdk_pci_device_get_vendor_id(pci_dev), spdk_pci_device_get_device_id(pci_dev)); /* Claim the device in case conflict with other process */ if (spdk_pci_device_claim(pci_dev) < 0) { return false; } return true; } static int user_idxd_probe(void *cb_ctx, spdk_idxd_attach_cb attach_cb) { int rc; struct idxd_enum_ctx enum_ctx; enum_ctx.probe_cb = probe_cb; enum_ctx.attach_cb = attach_cb; enum_ctx.cb_ctx = cb_ctx; pthread_mutex_lock(&g_driver_lock); rc = spdk_pci_enumerate(spdk_pci_idxd_get_driver(), idxd_enum_cb, &enum_ctx); pthread_mutex_unlock(&g_driver_lock); return rc; } static void user_idxd_dump_sw_err(struct spdk_idxd_device *idxd, void *portal) { uint64_t sw_error_0; uint16_t i; sw_error_0 = _idxd_read_8(idxd, IDXD_SWERR_OFFSET); SPDK_NOTICELOG("SW Error bits set:"); for (i = 0; i < CHAR_BIT; i++) { if ((1ULL << i) & sw_error_0) { SPDK_NOTICELOG(" %d\n", i); } } SPDK_NOTICELOG("SW Error error code: %#x\n", (uint8_t)(sw_error_0 >> 8)); SPDK_NOTICELOG("SW Error WQ index: %u\n", (uint8_t)(sw_error_0 >> 16)); SPDK_NOTICELOG("SW Error Operation: %u\n", (uint8_t)(sw_error_0 >> 32)); } static char * user_idxd_portal_get_addr(struct spdk_idxd_device *idxd) { return (char *)idxd->portals + idxd->wq_id * WQ_TOTAL_PORTAL_SIZE; } static bool user_idxd_nop_check(struct spdk_idxd_device *idxd) { struct spdk_user_idxd_device *user_idxd = __user_idxd(idxd); /* TODO: temp workaround for simulator. Remove this function when fixed or w/silicon. */ if (user_idxd->registers.gencap.raw == 0x1833f011f) { return true; } return false; } static struct spdk_idxd_impl g_user_idxd_impl = { .name = "user", .set_config = user_idxd_set_config, .probe = user_idxd_probe, .destruct = user_idxd_device_destruct, .dump_sw_error = user_idxd_dump_sw_err, .portal_get_addr = user_idxd_portal_get_addr, .nop_check = user_idxd_nop_check, }; /* Caller must hold g_driver_lock */ static struct spdk_idxd_device * idxd_attach(struct spdk_pci_device *device) { struct spdk_user_idxd_device *user_idxd; struct spdk_idxd_device *idxd; uint32_t cmd_reg; int rc; user_idxd = calloc(1, sizeof(struct spdk_user_idxd_device)); if (user_idxd == NULL) { SPDK_ERRLOG("Failed to allocate memory for user_idxd device.\n"); return NULL; } idxd = &user_idxd->idxd; user_idxd->device = device; idxd->impl = &g_user_idxd_impl; idxd->socket_id = device->socket_id; pthread_mutex_init(&idxd->num_channels_lock, NULL); /* Enable PCI busmaster. */ spdk_pci_device_cfg_read32(device, &cmd_reg, 4); cmd_reg |= 0x4; spdk_pci_device_cfg_write32(device, cmd_reg, 4); rc = idxd_device_configure(user_idxd); if (rc) { goto err; } return idxd; err: user_idxd_device_destruct(idxd); return NULL; } SPDK_IDXD_IMPL_REGISTER(user, &g_user_idxd_impl);
7,360
1,403
<reponame>DNCoelho/clipper /** NOTE: This file was slightly adapted from `container_rpc.hpp` within the C++ container source to meet R packaging requirements. */ #ifndef CLIPPER_CONTAINER_RPC_HPP #define CLIPPER_CONTAINER_RPC_HPP #include <atomic> #include <chrono> #include <iostream> #include <mutex> #include <numeric> #include <sstream> #include <thread> #include <Rcpp.h> #include "zmq.hpp" #include "container_parsing.hpp" #include "container_util.hpp" #include "datatypes.hpp" const std::string LOGGING_TAG_CONTAINER = "CONTAINER"; using Clock = std::chrono::system_clock; namespace container { constexpr uint32_t RPC_VERSION = 3; constexpr long SOCKET_POLLING_TIMEOUT_MILLIS = 5000; constexpr long SOCKET_ACTIVITY_TIMEOUT_MILLIS = 30000; constexpr long EVENT_LOG_CAPACITY = 100; using RPCLogItem = std::pair<RPCEvent, Clock::time_point>; template <typename T> struct input_trait { static const bool is_supported = false; static const InputType input_type = InputType::Invalid; }; template <> struct input_trait<ByteVector> { static const bool is_supported = true; static const InputType input_type = InputType::Bytes; }; template <> struct input_trait<IntVector> { static const bool is_supported = true; static const InputType input_type = InputType::Ints; }; template <> struct input_trait<FloatVector> { static const bool is_supported = true; static const InputType input_type = InputType::Floats; }; template <> struct input_trait<DoubleVector> { static const bool is_supported = true; static const InputType input_type = InputType::Doubles; }; template <> struct input_trait<SerializableString> { static const bool is_supported = true; static const InputType input_type = InputType::Strings; }; template <class I> class Model { public: Model() : input_type_(input_trait<I>::input_type) { static_assert(input_trait<I>::is_supported, "Model must be of a supported input type!"); } virtual std::vector<std::string> predict( const std::vector<I> inputs) const = 0; InputType get_input_type() { return input_type_; } private: const InputType input_type_; }; // This is not thread safe class PerformanceTimer { public: static void start_timing() { log_.str(""); last_log_ = Clock::now(); } static void log_elapsed(const std::string tag) { Clock::time_point curr_time = Clock::now(); long log_diff_micros = std::chrono::duration_cast<std::chrono::microseconds>(curr_time - last_log_) .count(); log_ << tag << ": " << std::to_string(log_diff_micros) << " us" << ", "; last_log_ = curr_time; } static std::string get_log() { return log_.str(); } private: static std::stringstream log_; static Clock::time_point last_log_; }; class RPC { public: RPC(); ~RPC(); // Disallow copy RPC(const RPC&) = delete; RPC& operator=(const RPC&) = delete; // Default move constructor and assignment. RPC(RPC&& other) = default; RPC& operator=(RPC&& other) = default; template <typename D> void start(Model<Input<D>>& model, std::string model_name, int model_version, std::string clipper_ip, int clipper_port) { start_rpc(model, model_name, model_version, clipper_ip, clipper_port); } void stop(); /** * @return The `num_events` most recent RPC events that have occurred */ std::vector<RPCLogItem> get_events() const; private: std::atomic_bool active_; std::shared_ptr<std::mutex> event_log_mutex_; std::shared_ptr<CircularBuffer<RPCLogItem>> event_log_; template <typename D> void start_rpc(Model<Input<D>>& model, std::string& model_name, int model_version, std::string& clipper_ip, int clipper_port) { if (active_) { throw std::runtime_error( "Cannot start a container that is already started!"); } active_ = true; const std::string clipper_address = "tcp://" + clipper_ip + ":" + std::to_string(clipper_port); Rcpp::Rcout << "Starting container RPC with clipper ip: " << clipper_ip << " and port: " << clipper_port << std::endl; serve_model(model, model_name, model_version, clipper_address); } void validate_rpc_version(const uint32_t received_version) { if (received_version != RPC_VERSION) { Rcpp::Rcout << "ERROR: Received an RPC message with version: " << received_version << " that does not match container version: " << RPC_VERSION << std::endl; } } /** * @return `true` if the received heartbeat is a request for container metadata. * `false` otherwise. */ bool handle_heartbeat(zmq::socket_t& socket) const; void send_heartbeat(zmq::socket_t& socket) const; void send_container_metadata(std::string& model_name, int model_version, InputType model_input_type, zmq::socket_t& socket) const; void log_event(RPCEvent event) const; template <typename D> void serve_model(Model<Input<D>>& model, std::string model_name, int model_version, std::string clipper_address) { // Initialize a ZeroMQ context with a single IO thread. // This thread will be used by the socket we're about to create zmq::context_t context(1); bool connected = false; std::chrono::time_point<Clock> last_activity_time; std::vector<uint64_t> input_header_buffer; std::vector<D> input_data_buffer; std::vector<uint64_t> output_header_buffer; std::vector<uint8_t> output_buffer; while (true) { zmq::socket_t socket = zmq::socket_t(context, ZMQ_DEALER); zmq::pollitem_t items[] = {{socket, 0, ZMQ_POLLIN, 0}}; socket.connect(clipper_address); send_heartbeat(socket); while (active_) { R_CheckUserInterrupt(); zmq_poll(items, 1, SOCKET_POLLING_TIMEOUT_MILLIS); if (!(items[0].revents & ZMQ_POLLIN)) { if (connected) { std::chrono::time_point<Clock> curr_time = Clock::now(); auto time_since_last_activity = curr_time.time_since_epoch() - last_activity_time.time_since_epoch(); long time_since_last_activity_millis = std::chrono::duration_cast<std::chrono::milliseconds>( time_since_last_activity) .count(); if (time_since_last_activity_millis >= SOCKET_ACTIVITY_TIMEOUT_MILLIS) { Rcpp::Rcout << "Connection timed out, reconnecting..." << std::endl; connected = false; break; } else { send_heartbeat(socket); } } continue; } connected = true; last_activity_time = Clock::now(); PerformanceTimer::start_timing(); zmq::message_t msg_delimiter; zmq::message_t msg_rpc_version_bytes; zmq::message_t msg_msg_type_bytes; socket.recv(&msg_delimiter, 0); socket.recv(&msg_rpc_version_bytes, 0); socket.recv(&msg_msg_type_bytes, 0); uint32_t rpc_version = static_cast<uint32_t*>(msg_rpc_version_bytes.data())[0]; validate_rpc_version(rpc_version); uint32_t message_type_code = static_cast<uint32_t*>(msg_msg_type_bytes.data())[0]; MessageType message_type = static_cast<MessageType>(message_type_code); switch (message_type) { case MessageType::Heartbeat: { Rcpp::Rcout << "Received heartbeat!" << std::endl; log_event(RPCEvent::ReceivedHeartbeat); bool requesting_metadata = handle_heartbeat(socket); if (requesting_metadata) { send_container_metadata(model_name, model_version, model.get_input_type(), socket); } } break; case MessageType::ContainerContent: { log_event(RPCEvent::ReceivedContainerContent); zmq::message_t msg_request_id; zmq::message_t msg_request_header; socket.recv(&msg_request_id, 0); socket.recv(&msg_request_header, 0); uint32_t msg_id = static_cast<uint32_t*>(msg_request_id.data())[0]; uint32_t request_type_code = static_cast<uint32_t*>(msg_request_header.data())[0]; RequestType request_type = static_cast<RequestType>(request_type_code); switch (request_type) { case RequestType::PredictRequest: handle_predict_request(model, socket, input_header_buffer, input_data_buffer, output_header_buffer, msg_id); break; case RequestType::FeedbackRequest: throw std::runtime_error( "Received unsupported feedback request!"); break; default: { std::stringstream ss; ss << "Received RPC message of an unknown request type " "corresponding to integer code " << request_type_code; throw std::runtime_error(ss.str()); } } } break; case MessageType::NewContainer: log_event(RPCEvent::ReceivedContainerMetadata); Rcpp::Rcout << "Error! Received erroneous new container message from " "Clipper!" << std::endl; default: { std::stringstream ss; ss << "Received RPC message of an unknown message type " "corresponding to integer code " << message_type_code; throw std::runtime_error(ss.str()); } } } // The socket associated with the previous session is no longer // being used, so we should close it socket.close(); if (!active_) { // The container is no longer active. Exit the connection loop. return; } } } template <typename D> void handle_predict_request(Model<Input<D>>& model, zmq::socket_t& socket, std::vector<uint64_t>& input_header_buffer, std::vector<D>& input_data_buffer, std::vector<uint64_t>& output_header_buffer, int msg_id) const { zmq::message_t msg_input_header_size; socket.recv(&msg_input_header_size, 0); uint64_t input_header_size_bytes = static_cast<uint64_t*>(msg_input_header_size.data())[0]; // Resize input header buffer if necessary resize_if_necessary(input_header_buffer, input_header_size_bytes); // Receive input header socket.recv(input_header_buffer.data(), input_header_size_bytes, 0); InputType input_type = static_cast<InputType>(input_header_buffer[0]); if (input_type != model.get_input_type()) { std::stringstream ss; ss << "Received prediction request with incorrect input type '" << get_readable_input_type(input_type) << "' for model with input type '" << get_readable_input_type(model.get_input_type()) << "'"; throw std::runtime_error(ss.str()); } uint64_t num_inputs = input_header_buffer[1]; uint64_t input_content_size_bytes = std::accumulate(input_header_buffer.begin() + 2, input_header_buffer.begin() + 2 + num_inputs, 0); std::vector<Input<D>> inputs; inputs.reserve(num_inputs); resize_if_necessary(input_data_buffer, input_content_size_bytes); D* data_ptr = input_data_buffer.data(); for (uint32_t i = 0; i < num_inputs; i++) { uint64_t input_size_bytes = input_header_buffer[i + 2]; socket.recv(data_ptr, input_size_bytes, 0); inputs.push_back(Input<D>(data_ptr, input_size_bytes)); data_ptr += input_size_bytes; } PerformanceTimer::log_elapsed("Recv and Parse"); // Make predictions std::vector<std::string> outputs = model.predict(inputs); // Send outputs as a prediction response uint64_t num_outputs = outputs.size(); if (num_outputs != inputs.size()) { std::stringstream ss; ss << "Number of model outputs: " << outputs.size() << " does not equal the number of inputs: " << inputs.size(); throw std::runtime_error(ss.str()); } uint64_t output_header_size = create_output_header(outputs, output_header_buffer); zmq::message_t msg_message_type(sizeof(uint32_t)); static_cast<uint32_t*>(msg_message_type.data())[0] = static_cast<uint32_t>(MessageType::ContainerContent); zmq::message_t msg_message_id(sizeof(uint32_t)); static_cast<uint32_t*>(msg_message_id.data())[0] = msg_id; zmq::message_t msg_header_size(sizeof(uint64_t)); static_cast<uint64_t*>(msg_header_size.data())[0] = output_header_size; socket.send("", 0, ZMQ_SNDMORE); socket.send(msg_message_type, ZMQ_SNDMORE); socket.send(msg_message_id, ZMQ_SNDMORE); socket.send(msg_header_size, ZMQ_SNDMORE); socket.send(output_header_buffer.data(), output_header_size, ZMQ_SNDMORE); uint64_t last_msg_num = num_outputs - 1; for (uint64_t i = 0; i < num_outputs; i++) { std::string& output = outputs[i]; if (i < last_msg_num) { socket.send(output.begin(), output.end(), ZMQ_SNDMORE); } else { // If this is the last output, we don't want to use // the 'SNDMORE' flag socket.send(output.begin(), output.end(), 0); } } log_event(RPCEvent::SentContainerContent); PerformanceTimer::log_elapsed("Handle"); Rcpp::Rcout << PerformanceTimer::get_log() << std::endl; } uint64_t create_output_header( std::vector<std::string>& outputs, std::vector<uint64_t>& output_header_buffer) const { uint64_t num_outputs = outputs.size(); uint64_t output_header_size = (num_outputs + 1) * sizeof(uint64_t); resize_if_necessary(output_header_buffer, output_header_size); uint64_t* output_header_data = output_header_buffer.data(); output_header_data[0] = num_outputs; for (uint64_t i = 0; i < num_outputs; i++) { output_header_data[i + 1] = outputs[i].size(); } return output_header_size; } template <typename D> void resize_if_necessary(std::vector<D>& buffer, uint64_t required_buffer_size) const { if ((buffer.size() * sizeof(D)) < required_buffer_size) { buffer.reserve((2 * required_buffer_size) / sizeof(D)); } } }; } // namespace container #endif // CLIPPER_CONTAINER_RPC_HPP
6,475
1,371
<reponame>lilotech/caffe-fast-rcnn #include <cstring> #include <vector> #include "gtest/gtest.h" #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" #include "caffe/vision_layers.hpp" #include "caffe/test/test_caffe_main.hpp" #include "caffe/test/test_gradient_check_util.hpp" namespace caffe { // Since ConvolutionLayerTest checks the shared conv/deconv code in detail, // we'll just do a simple forward test and a gradient check. template <typename TypeParam> class DeconvolutionLayerTest : public MultiDeviceTest<TypeParam> { typedef typename TypeParam::Dtype Dtype; protected: DeconvolutionLayerTest() : blob_bottom_(new Blob<Dtype>(2, 3, 6, 4)), blob_bottom_2_(new Blob<Dtype>(2, 3, 6, 4)), blob_top_(new Blob<Dtype>()), blob_top_2_(new Blob<Dtype>()) {} virtual void SetUp() { // fill the values FillerParameter filler_param; filler_param.set_value(1.); GaussianFiller<Dtype> filler(filler_param); filler.Fill(this->blob_bottom_); filler.Fill(this->blob_bottom_2_); blob_bottom_vec_.push_back(blob_bottom_); blob_top_vec_.push_back(blob_top_); } virtual ~DeconvolutionLayerTest() { delete blob_bottom_; delete blob_bottom_2_; delete blob_top_; delete blob_top_2_; } Blob<Dtype>* const blob_bottom_; Blob<Dtype>* const blob_bottom_2_; Blob<Dtype>* const blob_top_; Blob<Dtype>* const blob_top_2_; vector<Blob<Dtype>*> blob_bottom_vec_; vector<Blob<Dtype>*> blob_top_vec_; }; TYPED_TEST_CASE(DeconvolutionLayerTest, TestDtypesAndDevices); TYPED_TEST(DeconvolutionLayerTest, TestSetup) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); convolution_param->set_kernel_size(3); convolution_param->set_stride(2); convolution_param->set_num_output(4); this->blob_bottom_vec_.push_back(this->blob_bottom_2_); this->blob_top_vec_.push_back(this->blob_top_2_); shared_ptr<Layer<Dtype> > layer( new DeconvolutionLayer<Dtype>(layer_param)); layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), 2); EXPECT_EQ(this->blob_top_->channels(), 4); EXPECT_EQ(this->blob_top_->height(), 13); EXPECT_EQ(this->blob_top_->width(), 9); EXPECT_EQ(this->blob_top_2_->num(), 2); EXPECT_EQ(this->blob_top_2_->channels(), 4); EXPECT_EQ(this->blob_top_2_->height(), 13); EXPECT_EQ(this->blob_top_2_->width(), 9); // setting group should not change the shape convolution_param->set_num_output(3); convolution_param->set_group(3); layer.reset(new DeconvolutionLayer<Dtype>(layer_param)); layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), 2); EXPECT_EQ(this->blob_top_->channels(), 3); EXPECT_EQ(this->blob_top_->height(), 13); EXPECT_EQ(this->blob_top_->width(), 9); EXPECT_EQ(this->blob_top_2_->num(), 2); EXPECT_EQ(this->blob_top_2_->channels(), 3); EXPECT_EQ(this->blob_top_2_->height(), 13); EXPECT_EQ(this->blob_top_2_->width(), 9); } TYPED_TEST(DeconvolutionLayerTest, TestSimpleDeconvolution) { typedef typename TypeParam::Dtype Dtype; this->blob_bottom_vec_.push_back(this->blob_bottom_2_); this->blob_top_vec_.push_back(this->blob_top_2_); LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); convolution_param->set_kernel_size(3); convolution_param->set_stride(2); convolution_param->set_num_output(4); convolution_param->mutable_weight_filler()->set_type("constant"); convolution_param->mutable_weight_filler()->set_value(1); convolution_param->mutable_bias_filler()->set_type("constant"); convolution_param->mutable_bias_filler()->set_value(0.1); shared_ptr<Layer<Dtype> > layer( new DeconvolutionLayer<Dtype>(layer_param)); layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); // constant-fill the bottom blobs FillerParameter filler_param; filler_param.set_value(1.); ConstantFiller<Dtype> filler(filler_param); filler.Fill(this->blob_bottom_); filler.Fill(this->blob_bottom_2_); layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); // simply check that accumulation works with overlapping filters const Dtype* top_data = this->blob_top_->cpu_data(); for (int n = 0; n < this->blob_top_->num(); ++n) { for (int c = 0; c < this->blob_top_->channels(); ++c) { for (int h = 0; h < this->blob_top_->height(); ++h) { for (int w = 0; w < this->blob_top_->width(); ++w) { Dtype expected = 3.1; bool h_overlap = h % 2 == 0 && h > 0 && h < this->blob_top_->height() - 1; bool w_overlap = w % 2 == 0 && w > 0 && w < this->blob_top_->width() - 1; if (h_overlap && w_overlap) { expected += 9; } else if (h_overlap || w_overlap) { expected += 3; } EXPECT_NEAR(top_data[this->blob_top_->offset(n, c, h, w)], expected, 1e-4); } } } } } TYPED_TEST(DeconvolutionLayerTest, TestGradient) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); this->blob_bottom_vec_.push_back(this->blob_bottom_2_); this->blob_top_vec_.push_back(this->blob_top_2_); convolution_param->set_kernel_size(2); convolution_param->set_stride(1); convolution_param->set_num_output(1); convolution_param->mutable_weight_filler()->set_type("gaussian"); convolution_param->mutable_bias_filler()->set_type("gaussian"); DeconvolutionLayer<Dtype> layer(layer_param); GradientChecker<Dtype> checker(1e-2, 1e-3); checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, this->blob_top_vec_); } } // namespace caffe
2,487
1,442
#include <kandinsky/point.h> #include <assert.h> KDPoint KDPoint::translatedBy(KDPoint other) const { assert((other.x() >= 0 && m_x <= KDCOORDINATE_MAX - other.x()) || (other.x() < 0 && m_x >= KDCOORDINATE_MIN - other.x())); assert((other.y() >= 0 && m_y <= KDCOORDINATE_MAX - other.y()) || (other.y() < 0 && m_y >= KDCOORDINATE_MIN - other.y())); return KDPoint(m_x+other.x(), m_y+other.y()); } KDPoint KDPoint::opposite() const { return KDPoint(-m_x, -m_y); } uint16_t KDPoint::squareDistanceTo(KDPoint other) const { return (m_x-other.x()) * (m_x-other.x()) + (m_y-other.y()) * (m_y-other.y()); }
270
14,425
<reponame>bzhaoopenstack/hadoop<filename>hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.util.DataChecksum; /** MD5 of MD5 of CRC32. */ @InterfaceAudience.LimitedPrivate({"HDFS"}) @InterfaceStability.Unstable public class MD5MD5CRC32FileChecksum extends FileChecksum { public static final int LENGTH = MD5Hash.MD5_LEN + (Integer.SIZE + Long.SIZE)/Byte.SIZE; private int bytesPerCRC; private long crcPerBlock; private MD5Hash md5; /** Same as this(0, 0, null) */ public MD5MD5CRC32FileChecksum() { this(0, 0, null); } /** Create a MD5FileChecksum */ public MD5MD5CRC32FileChecksum(int bytesPerCRC, long crcPerBlock, MD5Hash md5) { this.bytesPerCRC = bytesPerCRC; this.crcPerBlock = crcPerBlock; this.md5 = md5; } @Override public String getAlgorithmName() { return "MD5-of-" + crcPerBlock + "MD5-of-" + bytesPerCRC + getCrcType().name(); } public static DataChecksum.Type getCrcTypeFromAlgorithmName(String algorithm) throws IOException { if (algorithm.endsWith(DataChecksum.Type.CRC32.name())) { return DataChecksum.Type.CRC32; } else if (algorithm.endsWith(DataChecksum.Type.CRC32C.name())) { return DataChecksum.Type.CRC32C; } throw new IOException("Unknown checksum type in " + algorithm); } @Override public int getLength() {return LENGTH;} @Override public byte[] getBytes() { return WritableUtils.toByteArray(this); } /** returns the CRC type */ public DataChecksum.Type getCrcType() { // default to the one that is understood by all releases. return DataChecksum.Type.CRC32; } @Override public ChecksumOpt getChecksumOpt() { return new ChecksumOpt(getCrcType(), bytesPerCRC); } @Override public void readFields(DataInput in) throws IOException { bytesPerCRC = in.readInt(); crcPerBlock = in.readLong(); md5 = MD5Hash.read(in); } @Override public void write(DataOutput out) throws IOException { out.writeInt(bytesPerCRC); out.writeLong(crcPerBlock); md5.write(out); } @Override public String toString() { return getAlgorithmName() + ":" + md5; } }
1,178
3,083
// Copyright 2011-2016 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.security.zynamics.zylib.gui.zygraph.layouters; public enum OrthogonalStyle { NORMAL, TREE; public static OrthogonalStyle parseInt(final int style) { if (style == NORMAL.ordinal()) { return NORMAL; } else if (style == TREE.ordinal()) { return TREE; } else { throw new IllegalStateException("Error: Invalid style " + style); } } }
294
929
<filename>includes/acl/compression/compression_settings.h #pragma once //////////////////////////////////////////////////////////////////////////////// // The MIT License (MIT) // // Copyright (c) 2017 <NAME> & Animation Compression Library contributors // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. //////////////////////////////////////////////////////////////////////////////// #include "acl/core/impl/compiler_utils.h" #include "acl/core/error_result.h" #include "acl/core/hash.h" #include "acl/core/track_formats.h" #include "acl/core/track_types.h" #include "acl/core/range_reduction_types.h" #include "acl/compression/compression_level.h" #include "acl/compression/transform_error_metrics.h" #include <rtm/scalarf.h> #include <cstdint> ACL_IMPL_FILE_PRAGMA_PUSH namespace acl { ////////////////////////////////////////////////////////////////////////// // Encapsulates all the compression settings related to database usage. struct compression_database_settings { ////////////////////////////////////////////////////////////////////////// // What proportions we should use when distributing our frames based on // their importance to the overall error contribution. If a sample doesn't // go into the medium or low importance tiers, it will end up in the high // importance tier stored within each compressed track instance. // Proportion values must be between 0.0 and 1.0 and their sum as well. // If the sum is less than 1.0, remaining frames are considered to have high // importance. A low importance proportion of 30% means that the least important // 30% of frames will end up in that corresponding database tier. // Note that only movable frames can end up in the database as some frames must remain // within the compressed track instance. A frame is movable if it isn't the first or last // frame of its segment. // Defaults to '0.0' (the medium importance tier is empty) float medium_importance_tier_proportion = 0.0F; ////////////////////////////////////////////////////////////////////////// // See above for details. // Defaults to '0.5' (the least important 50% of frames are moved to the database) float low_importance_tier_proportion = 0.5F; ////////////////////////////////////////////////////////////////////////// // How large should each chunk be, in bytes. // This value must be at least 4 KB and ideally it should be a multiple of // the virtual memory page size used on the platform that will decompress // from the database. // Defaults to '1 MB' uint32_t max_chunk_size = 1 * 1024 * 1024; ////////////////////////////////////////////////////////////////////////// // Calculates a hash from the internal state to uniquely identify a configuration. uint32_t get_hash() const; ////////////////////////////////////////////////////////////////////////// // Checks if everything is valid and if it isn't, returns an error string. // Returns nullptr if the settings are valid. error_result is_valid() const; }; ////////////////////////////////////////////////////////////////////////// // Encapsulates all the optional metadata compression settings. struct compression_metadata_settings { ////////////////////////////////////////////////////////////////////////// // Whether to include the optional metadata for the track list name // Defaults to 'false' bool include_track_list_name = false; ////////////////////////////////////////////////////////////////////////// // Whether to include the optional metadata for track names // Defaults to 'false' bool include_track_names = false; ////////////////////////////////////////////////////////////////////////// // Whether to include the optional metadata for parent track indices // Transform tracks only // Defaults to 'false' bool include_parent_track_indices = false; ////////////////////////////////////////////////////////////////////////// // Whether to include the optional metadata for track descriptions // For transforms, also enables the parent track indices metadata // Defaults to 'false' bool include_track_descriptions = false; ////////////////////////////////////////////////////////////////////////// // Whether to include the optional metadata for the contributing error // of each frame. These are sorted from lowest to largest error. // This is required when the compressed tracks will later be merged into // a database. // Transform tracks only // Defaults to 'false' bool include_contributing_error = false; ////////////////////////////////////////////////////////////////////////// // Calculates a hash from the internal state to uniquely identify a configuration. uint32_t get_hash() const; ////////////////////////////////////////////////////////////////////////// // Checks if everything is valid and if it isn't, returns an error string. // Returns nullptr if the settings are valid. error_result is_valid() const; }; ////////////////////////////////////////////////////////////////////////// // Encapsulates all the compression settings. struct compression_settings { ////////////////////////////////////////////////////////////////////////// // The compression level determines how aggressively we attempt to reduce the memory // footprint. Higher levels will try more permutations and bit rates. The higher // the level, the slower the compression but the smaller the memory footprint. // Transform tracks only. compression_level8 level = compression_level8::low; ////////////////////////////////////////////////////////////////////////// // The rotation, translation, and scale formats to use. See functions get_rotation_format(..) and get_vector_format(..) // Defaults to raw: 'quatf_full' and 'vector3f_full' // Transform tracks only. rotation_format8 rotation_format = rotation_format8::quatf_full; vector_format8 translation_format = vector_format8::vector3f_full; vector_format8 scale_format = vector_format8::vector3f_full; ////////////////////////////////////////////////////////////////////////// // The error metric to use. // Defaults to 'null', this value must be set manually! // Transform tracks only. itransform_error_metric* error_metric = nullptr; ////////////////////////////////////////////////////////////////////////// // Whether or not to enable database support on the output compressed clip. // This enables the required metadata which will later be stripped once // the database is built. // Transform tracks only. bool enable_database_support = false; ////////////////////////////////////////////////////////////////////////// // These are optional metadata that can be added to compressed clips. compression_metadata_settings metadata; ////////////////////////////////////////////////////////////////////////// // Calculates a hash from the internal state to uniquely identify a configuration. uint32_t get_hash() const; ////////////////////////////////////////////////////////////////////////// // Checks if everything is valid and if it isn't, returns an error string. error_result is_valid() const; }; ////////////////////////////////////////////////////////////////////////// // Returns raw compression settings. No compression is performed and // samples are all retained with full precision. compression_settings get_raw_compression_settings(); ////////////////////////////////////////////////////////////////////////// // Returns the recommended and default compression settings. These have // been tested in a wide range of scenarios and perform best overall. compression_settings get_default_compression_settings(); } #include "acl/compression/impl/compression_settings.impl.h" ACL_IMPL_FILE_PRAGMA_POP
2,071
12,718
/** * This file has no copyright assigned and is placed in the Public Domain. * This file is part of the mingw-w64 runtime package. * No warranty is given; refer to the file DISCLAIMER.PD within this package. */ #ifndef __REQUIRED_RPCNDR_H_VERSION__ #define __REQUIRED_RPCNDR_H_VERSION__ 440 #endif #include "rpc.h" #include "rpcndr.h" #ifndef __msdatsrc_h__ #define __msdatsrc_h__ #ifndef __DataSourceListener_FWD_DEFINED__ #define __DataSourceListener_FWD_DEFINED__ typedef struct DataSourceListener DataSourceListener; #endif #ifndef __DataSource_FWD_DEFINED__ #define __DataSource_FWD_DEFINED__ typedef struct DataSource DataSource; #endif #ifdef __cplusplus extern "C" { #endif #ifndef __MIDL_user_allocate_free_DEFINED__ #define __MIDL_user_allocate_free_DEFINED__ void *__RPC_API MIDL_user_allocate(size_t); void __RPC_API MIDL_user_free(void *); #endif #define IDataSource DataSource #define IDataSourceListener DataSourceListener EXTERN_C const IID CATID_DataSource; EXTERN_C const IID CATID_DataConsumer; extern RPC_IF_HANDLE __MIDL_itf_msdatsrc_0000_v0_0_c_ifspec; extern RPC_IF_HANDLE __MIDL_itf_msdatsrc_0000_v0_0_s_ifspec; #ifndef __MSDATASRC_LIBRARY_DEFINED__ #define __MSDATASRC_LIBRARY_DEFINED__ typedef BSTR DataMember; EXTERN_C const IID LIBID_MSDATASRC; #ifndef __DataSourceListener_INTERFACE_DEFINED__ #define __DataSourceListener_INTERFACE_DEFINED__ EXTERN_C const IID IID_DataSourceListener; #if defined(__cplusplus) && !defined(CINTERFACE) struct DataSourceListener : public IUnknown { public: virtual HRESULT WINAPI dataMemberChanged(DataMember bstrDM) = 0; virtual HRESULT WINAPI dataMemberAdded(DataMember bstrDM) = 0; virtual HRESULT WINAPI dataMemberRemoved(DataMember bstrDM) = 0; }; #else typedef struct DataSourceListenerVtbl { BEGIN_INTERFACE HRESULT (WINAPI *QueryInterface)(DataSourceListener *This,REFIID riid,void **ppvObject); ULONG (WINAPI *AddRef)(DataSourceListener *This); ULONG (WINAPI *Release)(DataSourceListener *This); HRESULT (WINAPI *dataMemberChanged)(DataSourceListener *This,DataMember bstrDM); HRESULT (WINAPI *dataMemberAdded)(DataSourceListener *This,DataMember bstrDM); HRESULT (WINAPI *dataMemberRemoved)(DataSourceListener *This,DataMember bstrDM); END_INTERFACE } DataSourceListenerVtbl; struct DataSourceListener { CONST_VTBL struct DataSourceListenerVtbl *lpVtbl; }; #ifdef COBJMACROS #define DataSourceListener_QueryInterface(This,riid,ppvObject) (This)->lpVtbl->QueryInterface(This,riid,ppvObject) #define DataSourceListener_AddRef(This) (This)->lpVtbl->AddRef(This) #define DataSourceListener_Release(This) (This)->lpVtbl->Release(This) #define DataSourceListener_dataMemberChanged(This,bstrDM) (This)->lpVtbl->dataMemberChanged(This,bstrDM) #define DataSourceListener_dataMemberAdded(This,bstrDM) (This)->lpVtbl->dataMemberAdded(This,bstrDM) #define DataSourceListener_dataMemberRemoved(This,bstrDM) (This)->lpVtbl->dataMemberRemoved(This,bstrDM) #endif #endif HRESULT WINAPI DataSourceListener_dataMemberChanged_Proxy(DataSourceListener *This,DataMember bstrDM); void __RPC_STUB DataSourceListener_dataMemberChanged_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); HRESULT WINAPI DataSourceListener_dataMemberAdded_Proxy(DataSourceListener *This,DataMember bstrDM); void __RPC_STUB DataSourceListener_dataMemberAdded_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); HRESULT WINAPI DataSourceListener_dataMemberRemoved_Proxy(DataSourceListener *This,DataMember bstrDM); void __RPC_STUB DataSourceListener_dataMemberRemoved_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); #endif #ifndef __DataSource_INTERFACE_DEFINED__ #define __DataSource_INTERFACE_DEFINED__ EXTERN_C const IID IID_DataSource; #if defined(__cplusplus) && !defined(CINTERFACE) struct DataSource : public IUnknown { public: virtual HRESULT WINAPI getDataMember(DataMember bstrDM,REFIID riid,IUnknown **ppunk) = 0; virtual HRESULT WINAPI getDataMemberName(__LONG32 lIndex,DataMember *pbstrDM) = 0; virtual HRESULT WINAPI getDataMemberCount(__LONG32 *plCount) = 0; virtual HRESULT WINAPI addDataSourceListener(DataSourceListener *pDSL) = 0; virtual HRESULT WINAPI removeDataSourceListener(DataSourceListener *pDSL) = 0; }; #else typedef struct DataSourceVtbl { BEGIN_INTERFACE HRESULT (WINAPI *QueryInterface)(DataSource *This,REFIID riid,void **ppvObject); ULONG (WINAPI *AddRef)(DataSource *This); ULONG (WINAPI *Release)(DataSource *This); HRESULT (WINAPI *getDataMember)(DataSource *This,DataMember bstrDM,REFIID riid,IUnknown **ppunk); HRESULT (WINAPI *getDataMemberName)(DataSource *This,__LONG32 lIndex,DataMember *pbstrDM); HRESULT (WINAPI *getDataMemberCount)(DataSource *This,__LONG32 *plCount); HRESULT (WINAPI *addDataSourceListener)(DataSource *This,DataSourceListener *pDSL); HRESULT (WINAPI *removeDataSourceListener)(DataSource *This,DataSourceListener *pDSL); END_INTERFACE } DataSourceVtbl; struct DataSource { CONST_VTBL struct DataSourceVtbl *lpVtbl; }; #ifdef COBJMACROS #define DataSource_QueryInterface(This,riid,ppvObject) (This)->lpVtbl->QueryInterface(This,riid,ppvObject) #define DataSource_AddRef(This) (This)->lpVtbl->AddRef(This) #define DataSource_Release(This) (This)->lpVtbl->Release(This) #define DataSource_getDataMember(This,bstrDM,riid,ppunk) (This)->lpVtbl->getDataMember(This,bstrDM,riid,ppunk) #define DataSource_getDataMemberName(This,lIndex,pbstrDM) (This)->lpVtbl->getDataMemberName(This,lIndex,pbstrDM) #define DataSource_getDataMemberCount(This,plCount) (This)->lpVtbl->getDataMemberCount(This,plCount) #define DataSource_addDataSourceListener(This,pDSL) (This)->lpVtbl->addDataSourceListener(This,pDSL) #define DataSource_removeDataSourceListener(This,pDSL) (This)->lpVtbl->removeDataSourceListener(This,pDSL) #endif #endif HRESULT WINAPI DataSource_getDataMember_Proxy(DataSource *This,DataMember bstrDM,REFIID riid,IUnknown **ppunk); void __RPC_STUB DataSource_getDataMember_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); HRESULT WINAPI DataSource_getDataMemberName_Proxy(DataSource *This,__LONG32 lIndex,DataMember *pbstrDM); void __RPC_STUB DataSource_getDataMemberName_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); HRESULT WINAPI DataSource_getDataMemberCount_Proxy(DataSource *This,__LONG32 *plCount); void __RPC_STUB DataSource_getDataMemberCount_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); HRESULT WINAPI DataSource_addDataSourceListener_Proxy(DataSource *This,DataSourceListener *pDSL); void __RPC_STUB DataSource_addDataSourceListener_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); HRESULT WINAPI DataSource_removeDataSourceListener_Proxy(DataSource *This,DataSourceListener *pDSL); void __RPC_STUB DataSource_removeDataSourceListener_Stub(IRpcStubBuffer *This,IRpcChannelBuffer *_pRpcChannelBuffer,PRPC_MESSAGE _pRpcMessage,DWORD *_pdwStubPhase); #endif #endif #ifdef __cplusplus } #endif #endif
2,752
1,189
<reponame>oertl/opentelemetry-java /* * Copyright The OpenTelemetry Authors * SPDX-License-Identifier: Apache-2.0 */ package io.opentelemetry.sdk.extension.aws.resource; import static org.assertj.core.api.Assertions.assertThat; import com.google.common.base.Charsets; import com.google.common.io.Files; import java.io.File; import java.io.IOException; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; class DockerHelperTest { @Test void testCgroupFileMissing() { DockerHelper dockerHelper = new DockerHelper("a_file_never_existing"); assertThat(dockerHelper.getContainerId()).isEmpty(); } @Test void testContainerIdMissing(@TempDir File tempFolder) throws IOException { File file = new File(tempFolder, "no_container_id"); String content = "13:pids:/\n" + "12:hugetlb:/\n" + "11:net_prio:/"; Files.write(content.getBytes(Charsets.UTF_8), file); DockerHelper dockerHelper = new DockerHelper(file.getPath()); assertThat(dockerHelper.getContainerId()).isEmpty(); } @Test void testGetContainerId(@TempDir File tempFolder) throws IOException { File file = new File(tempFolder, "cgroup"); String expected = "386a1920640799b5bf5a39bd94e489e5159a88677d96ca822ce7c433ff350163"; String content = "dummy\n11:devices:/ecs/bbc36dd0-5ee0-4007-ba96-c590e0b278d2/" + expected; Files.write(content.getBytes(Charsets.UTF_8), file); DockerHelper dockerHelper = new DockerHelper(file.getPath()); assertThat(dockerHelper.getContainerId()).isEqualTo(expected); } }
563
404
<reponame>yash30401/sbt-android<filename>sbt-test/gradle-build/plaid/app/src/main/java/io/plaidapp/util/glide/DribbbleTarget.java /* * Copyright 2015 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.plaidapp.util.glide; import android.graphics.Bitmap; import android.support.v4.content.ContextCompat; import android.support.v7.graphics.Palette; import com.bumptech.glide.load.resource.bitmap.GlideBitmapDrawable; import com.bumptech.glide.load.resource.drawable.GlideDrawable; import com.bumptech.glide.load.resource.gif.GifDrawable; import com.bumptech.glide.request.animation.GlideAnimation; import com.bumptech.glide.request.target.GlideDrawableImageViewTarget; import io.plaidapp.R; import io.plaidapp.ui.widget.BadgedFourThreeImageView; import io.plaidapp.util.ColorUtils; import io.plaidapp.util.ViewUtils; /** * A Glide {@see ViewTarget} for {@link BadgedFourThreeImageView}s. It applies a badge for animated * images, can prevent GIFs from auto-playing & applies a palette generated ripple. */ public class DribbbleTarget extends GlideDrawableImageViewTarget implements Palette.PaletteAsyncListener { private boolean playGifs; public DribbbleTarget(BadgedFourThreeImageView view, boolean playGifs) { super(view); this.playGifs = playGifs; } @Override public void onResourceReady(GlideDrawable resource, GlideAnimation<? super GlideDrawable> animation) { super.onResourceReady(resource, animation); if (!playGifs) { resource.stop(); } BadgedFourThreeImageView badgedImageView = (BadgedFourThreeImageView) getView(); if (resource instanceof GlideBitmapDrawable) { Palette.from(((GlideBitmapDrawable) resource).getBitmap()) .clearFilters() .generate(this); badgedImageView.showBadge(false); } else if (resource instanceof GifDrawable) { Bitmap image = ((GifDrawable) resource).getFirstFrame(); Palette.from(image).clearFilters().generate(this); badgedImageView.showBadge(true); // look at the corner to determine the gif badge color int cornerSize = (int) (56 * getView().getContext().getResources().getDisplayMetrics ().scaledDensity); Bitmap corner = Bitmap.createBitmap(image, image.getWidth() - cornerSize, image.getHeight() - cornerSize, cornerSize, cornerSize); boolean isDark = ColorUtils.isDark(corner); corner.recycle(); badgedImageView.setBadgeColor(ContextCompat.getColor(getView().getContext(), isDark ? R.color.gif_badge_dark_image : R.color.gif_badge_light_image)); } } @Override public void onStart() { if (playGifs) { super.onStart(); } } @Override public void onStop() { if (playGifs) { super.onStop(); } } @Override public void onGenerated(Palette palette) { ((BadgedFourThreeImageView) getView()).setForeground( ViewUtils.createRipple(palette, 0.25f, 0.5f, ContextCompat.getColor(getView().getContext(), R.color.mid_grey), true)); } }
1,555
11,351
package com.netflix.discovery.util; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import com.netflix.appinfo.InstanceInfo; import com.netflix.discovery.shared.Application; import com.netflix.discovery.shared.Applications; /** * Collection of functions operating on {@link Applications} and {@link Application} data * structures. * * @author <NAME> * @deprecated Use instead {@link EurekaEntityFunctions} */ public final class ApplicationFunctions { private ApplicationFunctions() { } public static Map<String, Application> toApplicationMap(List<InstanceInfo> instances) { Map<String, Application> applicationMap = new HashMap<String, Application>(); for (InstanceInfo instance : instances) { String appName = instance.getAppName(); Application application = applicationMap.get(appName); if (application == null) { applicationMap.put(appName, application = new Application(appName)); } application.addInstance(instance); } return applicationMap; } public static Applications toApplications(Map<String, Application> applicationMap) { Applications applications = new Applications(); for (Application application : applicationMap.values()) { applications.addApplication(application); } return updateMeta(applications); } public static Set<String> applicationNames(Applications applications) { Set<String> names = new HashSet<>(); for (Application application : applications.getRegisteredApplications()) { names.add(application.getName()); } return names; } public static Application copyOf(Application application) { Application copy = new Application(application.getName()); for (InstanceInfo instance : application.getInstances()) { copy.addInstance(instance); } return copy; } public static Application merge(Application first, Application second) { if (!first.getName().equals(second.getName())) { throw new IllegalArgumentException("Cannot merge applications with different names"); } Application merged = copyOf(first); for (InstanceInfo instance : second.getInstances()) { switch (instance.getActionType()) { case ADDED: case MODIFIED: merged.addInstance(instance); break; case DELETED: merged.removeInstance(instance); } } return merged; } public static Applications merge(Applications first, Applications second) { Set<String> firstNames = applicationNames(first); Set<String> secondNames = applicationNames(second); Set<String> allNames = new HashSet<>(firstNames); allNames.addAll(secondNames); Applications merged = new Applications(); for (String appName : allNames) { if (firstNames.contains(appName)) { if (secondNames.contains(appName)) { merged.addApplication(merge(first.getRegisteredApplications(appName), second.getRegisteredApplications(appName))); } else { merged.addApplication(copyOf(first.getRegisteredApplications(appName))); } } else { merged.addApplication(copyOf(second.getRegisteredApplications(appName))); } } return updateMeta(merged); } public static Applications updateMeta(Applications applications) { applications.setVersion(1L); applications.setAppsHashCode(applications.getReconcileHashCode()); return applications; } public static int countInstances(Applications applications) { int count = 0; for(Application application: applications.getRegisteredApplications()) { count += application.getInstances().size(); } return count; } }
1,566