max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
307
package com.tairanchina.csp.avm.mapper; import com.baomidou.mybatisplus.mapper.BaseMapper; import com.tairanchina.csp.avm.entity.App; import org.apache.ibatis.annotations.Mapper; /** * Created by hzlizx on 2018/5/17 0017 */ @Mapper public interface AppMapper extends BaseMapper<App> { }
116
650
<filename>Userland/Libraries/LibGfx/ICOLoader.cpp /* * Copyright (c) 2020, <NAME> <<EMAIL>> * * SPDX-License-Identifier: BSD-2-Clause */ #include <AK/ByteBuffer.h> #include <AK/Debug.h> #include <AK/LexicalPath.h> #include <AK/MappedFile.h> #include <AK/MemoryStream.h> #include <AK/NonnullOwnPtrVector.h> #include <AK/Types.h> #include <LibGfx/ICOLoader.h> #include <LibGfx/PNGLoader.h> #include <string.h> namespace Gfx { // FIXME: This is in little-endian order. Maybe need a NetworkOrdered<T> equivalent eventually. struct ICONDIR { u16 must_be_0 = 0; u16 must_be_1 = 0; u16 image_count = 0; }; static_assert(AssertSize<ICONDIR, 6>()); struct ICONDIRENTRY { u8 width; u8 height; u8 color_count; u8 reserved_0; u16 planes; u16 bits_per_pixel; u32 size; u32 offset; }; static_assert(AssertSize<ICONDIRENTRY, 16>()); struct [[gnu::packed]] BMPFILEHEADER { u8 signature[2]; u32 size; u16 reserved1; u16 reserved2; u32 offset; }; static_assert(sizeof(BMPFILEHEADER) == 14); struct BITMAPINFOHEADER { u32 size; i32 width; i32 height; u16 planes; u16 bpp; u32 compression; u32 size_image; u32 vres; u32 hres; u32 palette_size; u32 important_colors; }; static_assert(sizeof(BITMAPINFOHEADER) == 40); struct [[gnu::packed]] BMP_ARGB { u8 b; u8 g; u8 r; u8 a; }; static_assert(sizeof(BMP_ARGB) == 4); struct ICOImageDescriptor { u16 width; u16 height; size_t offset; size_t size; RefPtr<Gfx::Bitmap> bitmap; }; struct ICOLoadingContext { enum State { NotDecoded = 0, Error, DirectoryDecoded, BitmapDecoded }; State state { NotDecoded }; const u8* data { nullptr }; size_t data_size { 0 }; Vector<ICOImageDescriptor> images; size_t largest_index; }; RefPtr<Gfx::Bitmap> load_ico(const StringView& path) { auto file_or_error = MappedFile::map(path); if (file_or_error.is_error()) return nullptr; return load_ico_from_memory((u8 const*)file_or_error.value()->data(), file_or_error.value()->size(), LexicalPath::canonicalized_path(path)); } RefPtr<Gfx::Bitmap> load_ico_from_memory(u8 const* data, size_t length, String const& mmap_name) { ICOImageDecoderPlugin decoder(data, length); auto bitmap = decoder.bitmap(); if (bitmap) bitmap->set_mmap_name(String::formatted("Gfx::Bitmap [{}] - Decoded ICO: {}", bitmap->size(), mmap_name)); return bitmap; } static Optional<size_t> decode_ico_header(InputMemoryStream& stream) { ICONDIR header; stream >> Bytes { &header, sizeof(header) }; if (stream.handle_any_error()) return {}; if (header.must_be_0 != 0 || header.must_be_1 != 1) return {}; return { header.image_count }; } static Optional<ICOImageDescriptor> decode_ico_direntry(InputMemoryStream& stream) { ICONDIRENTRY entry; stream >> Bytes { &entry, sizeof(entry) }; if (stream.handle_any_error()) return {}; ICOImageDescriptor desc = { entry.width, entry.height, entry.offset, entry.size, nullptr }; if (desc.width == 0) desc.width = 256; if (desc.height == 0) desc.height = 256; return { desc }; } static size_t find_largest_image(const ICOLoadingContext& context) { size_t max_area = 0; size_t index = 0; size_t largest_index = 0; for (const auto& desc : context.images) { if (desc.width * desc.height > max_area) { max_area = desc.width * desc.height; largest_index = index; } ++index; } return largest_index; } static bool load_ico_directory(ICOLoadingContext& context) { InputMemoryStream stream { { context.data, context.data_size } }; auto image_count = decode_ico_header(stream); if (!image_count.has_value() || image_count.value() == 0) { return false; } for (size_t i = 0; i < image_count.value(); ++i) { auto maybe_desc = decode_ico_direntry(stream); if (!maybe_desc.has_value()) { dbgln_if(ICO_DEBUG, "load_ico_directory: error loading entry: {}", i); return false; } auto& desc = maybe_desc.value(); if (desc.offset + desc.size < desc.offset // detect integer overflow || (desc.offset + desc.size) > context.data_size) { dbgln_if(ICO_DEBUG, "load_ico_directory: offset: {} size: {} doesn't fit in ICO size: {}", desc.offset, desc.size, context.data_size); return false; } dbgln_if(ICO_DEBUG, "load_ico_directory: index {} width: {} height: {} offset: {} size: {}", i, desc.width, desc.height, desc.offset, desc.size); context.images.append(desc); } context.largest_index = find_largest_image(context); context.state = ICOLoadingContext::State::DirectoryDecoded; return true; } static bool load_ico_bmp(ICOLoadingContext& context, ICOImageDescriptor& desc) { BITMAPINFOHEADER info; if (desc.size < sizeof(info)) return false; memcpy(&info, context.data + desc.offset, sizeof(info)); if (info.size != sizeof(info)) { dbgln_if(ICO_DEBUG, "load_ico_bmp: info size: {}, expected: {}", info.size, sizeof(info)); return false; } if (info.width < 0) { dbgln_if(ICO_DEBUG, "load_ico_bmp: width {} < 0", info.width); return false; } if (info.height == NumericLimits<i32>::min()) { dbgln_if(ICO_DEBUG, "load_ico_bmp: height == NumericLimits<i32>::min()"); return false; } bool topdown = false; if (info.height < 0) { topdown = true; info.height = -info.height; } if (info.planes != 1) { dbgln_if(ICO_DEBUG, "load_ico_bmp: planes: {} != 1", info.planes); return false; } if (info.bpp != 32) { dbgln_if(ICO_DEBUG, "load_ico_bmp: unsupported bpp: {}", info.bpp); return false; } dbgln_if(ICO_DEBUG, "load_ico_bmp: width: {} height: {} direction: {} bpp: {} size_image: {}", info.width, info.height, topdown ? "TopDown" : "BottomUp", info.bpp, info.size_image); if (info.compression != 0 || info.palette_size != 0 || info.important_colors != 0) { dbgln_if(ICO_DEBUG, "load_ico_bmp: following fields must be 0: compression: {} palette_size: {} important_colors: {}", info.compression, info.palette_size, info.important_colors); return false; } if (info.width != desc.width || info.height != 2 * desc.height) { dbgln_if(ICO_DEBUG, "load_ico_bmp: size mismatch: ico {}x{}, bmp {}x{}", desc.width, desc.height, info.width, info.height); return false; } // Mask is 1bpp, and each row must be 4-byte aligned size_t mask_row_len = align_up_to(align_up_to(desc.width, 8) / 8, 4); size_t required_len = desc.height * (desc.width * sizeof(BMP_ARGB) + mask_row_len); size_t available_len = desc.size - sizeof(info); if (required_len > available_len) { dbgln_if(ICO_DEBUG, "load_ico_bmp: required_len: {} > available_len: {}", required_len, available_len); return false; } desc.bitmap = Bitmap::try_create(BitmapFormat::BGRA8888, { desc.width, desc.height }); if (!desc.bitmap) return false; Bitmap& bitmap = *desc.bitmap; const u8* image_base = context.data + desc.offset + sizeof(info); const BMP_ARGB* data_base = (const BMP_ARGB*)image_base; const u8* mask_base = image_base + desc.width * desc.height * sizeof(BMP_ARGB); for (int y = 0; y < desc.height; y++) { const u8* row_mask = mask_base + mask_row_len * y; const BMP_ARGB* row_data = data_base + desc.width * y; for (int x = 0; x < desc.width; x++) { u8 mask = !!(row_mask[x / 8] & (0x80 >> (x % 8))); BMP_ARGB data = row_data[x]; bitmap.set_pixel(x, topdown ? y : desc.height - y - 1, Color(data.r, data.g, data.b, mask ? 0 : data.a)); } } return true; } static bool load_ico_bitmap(ICOLoadingContext& context, Optional<size_t> index) { if (context.state < ICOLoadingContext::State::DirectoryDecoded) { if (!load_ico_directory(context)) { context.state = ICOLoadingContext::State::Error; return false; } context.state = ICOLoadingContext::State::DirectoryDecoded; } size_t real_index = context.largest_index; if (index.has_value()) real_index = index.value(); if (real_index >= context.images.size()) { return false; } ICOImageDescriptor& desc = context.images[real_index]; PNGImageDecoderPlugin png_decoder(context.data + desc.offset, desc.size); if (png_decoder.sniff()) { desc.bitmap = png_decoder.bitmap(); if (!desc.bitmap) { dbgln_if(ICO_DEBUG, "load_ico_bitmap: failed to load PNG encoded image index: {}", real_index); return false; } return true; } else { if (!load_ico_bmp(context, desc)) { dbgln_if(ICO_DEBUG, "load_ico_bitmap: failed to load BMP encoded image index: {}", real_index); return false; } return true; } } ICOImageDecoderPlugin::ICOImageDecoderPlugin(const u8* data, size_t size) { m_context = make<ICOLoadingContext>(); m_context->data = data; m_context->data_size = size; } ICOImageDecoderPlugin::~ICOImageDecoderPlugin() { } IntSize ICOImageDecoderPlugin::size() { if (m_context->state == ICOLoadingContext::State::Error) { return {}; } if (m_context->state < ICOLoadingContext::State::DirectoryDecoded) { if (!load_ico_directory(*m_context)) { m_context->state = ICOLoadingContext::State::Error; return {}; } m_context->state = ICOLoadingContext::State::DirectoryDecoded; } return { m_context->images[m_context->largest_index].width, m_context->images[m_context->largest_index].height }; } RefPtr<Gfx::Bitmap> ICOImageDecoderPlugin::bitmap() { if (m_context->state == ICOLoadingContext::State::Error) return nullptr; if (m_context->state < ICOLoadingContext::State::BitmapDecoded) { // NOTE: This forces the chunk decoding to happen. bool success = load_ico_bitmap(*m_context, {}); if (!success) { m_context->state = ICOLoadingContext::State::Error; return nullptr; } m_context->state = ICOLoadingContext::State::BitmapDecoded; } VERIFY(m_context->images[m_context->largest_index].bitmap); return m_context->images[m_context->largest_index].bitmap; } void ICOImageDecoderPlugin::set_volatile() { if (m_context->images[0].bitmap) m_context->images[0].bitmap->set_volatile(); } bool ICOImageDecoderPlugin::set_nonvolatile(bool& was_purged) { if (!m_context->images[0].bitmap) return false; return m_context->images[0].bitmap->set_nonvolatile(was_purged); } bool ICOImageDecoderPlugin::sniff() { InputMemoryStream stream { { m_context->data, m_context->data_size } }; return decode_ico_header(stream).has_value(); } bool ICOImageDecoderPlugin::is_animated() { return false; } size_t ICOImageDecoderPlugin::loop_count() { return 0; } size_t ICOImageDecoderPlugin::frame_count() { return 1; } ImageFrameDescriptor ICOImageDecoderPlugin::frame(size_t i) { if (i > 0) return {}; return { bitmap(), 0 }; } }
4,911
380
package org.gluu.oxauth.interop; import static org.gluu.oxauth.model.common.GrantType.AUTHORIZATION_CODE; import static org.gluu.oxauth.model.common.ResponseType.CODE; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNotNull; import java.util.Arrays; import org.gluu.oxauth.BaseTest; import org.gluu.oxauth.client.RegisterClient; import org.gluu.oxauth.client.RegisterRequest; import org.gluu.oxauth.client.RegisterResponse; import org.gluu.oxauth.model.common.AuthenticationMethod; import org.gluu.oxauth.model.register.ApplicationType; import org.gluu.oxauth.model.util.StringUtils; import org.testng.annotations.Parameters; import org.testng.annotations.Test; /** * OP-3rd_party-init-login-nohttps * * @author <NAME> * @version October 22, 2019 */ public class Supports3rdPartyInitLoginNoHttps extends BaseTest { @Parameters({"redirectUri", "clientJwksUri", "postLogoutRedirectUri"}) @Test public void supports3rdPartyInitLoginNoHttps(final String redirectUri, final String clientJwksUri, final String postLogoutRedirectUri) throws Exception { showTitle("supports3rdPartyInitLoginNoHttps"); // 1. Register Client RegisterRequest registerRequest = new RegisterRequest(ApplicationType.WEB, "oxAuth test app", StringUtils.spaceSeparatedToList(redirectUri)); registerRequest.setContacts(Arrays.asList("<EMAIL>")); registerRequest.setGrantTypes(Arrays.asList(AUTHORIZATION_CODE)); registerRequest.setResponseTypes(Arrays.asList(CODE)); registerRequest.setInitiateLoginUri("http://client.example.com/start-3rd-party-initiated-sso"); registerRequest.setJwksUri(clientJwksUri); registerRequest.setPostLogoutRedirectUris(Arrays.asList(postLogoutRedirectUri)); registerRequest.setTokenEndpointAuthMethod(AuthenticationMethod.CLIENT_SECRET_BASIC); RegisterClient registerClient = new RegisterClient(registrationEndpoint); registerClient.setRequest(registerRequest); RegisterResponse registerResponse = registerClient.exec(); showClient(registerClient); assertEquals(registerResponse.getStatus(), 400, "Unexpected response code: " + registerResponse.getEntity()); assertNotNull(registerResponse.getEntity(), "The entity is null"); assertNotNull(registerResponse.getErrorType(), "The error type is null"); assertNotNull(registerResponse.getErrorDescription(), "The error description is null"); } }
884
350
/* * Copyright (C) 2005-2017 <NAME> (<EMAIL>). * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * See the file COPYING for License information. */ /* * Compressed 32bit integer keys */ #ifndef UPS_BTREE_KEYS_SIMDCOMP_H #define UPS_BTREE_KEYS_SIMDCOMP_H #ifdef HAVE_SSE2 #include <sstream> #include <iostream> #include <algorithm> #include "0root/root.h" #include "3rdparty/simdcomp/include/simdcomp.h" // Always verify that a file of level N does not include headers > N! #include "3btree/btree_zint32_block.h" #ifndef UPS_ROOT_H # error "root.h was not included" #endif namespace upscaledb { // // The template classes in this file are wrapped in a separate namespace // to avoid naming clashes with other KeyLists // namespace Zint32 { // This structure is an "index" entry which describes the location // of a variable-length block #include "1base/packstart.h" UPS_PACK_0 struct UPS_PACK_1 SimdCompIndex : IndexBase { enum { // Initial size of a new block (1 bit per key = 16 bytes) kInitialBlockSize = 16, // Maximum keys per block (a compressed block holds up to 128 keys, // and one key is stored in the index) kMaxKeysPerBlock = 128 + 1, }; // initialize this block index void initialize(uint32_t offset, uint8_t *block_data, uint32_t block_size) { IndexBase::initialize(offset, block_data, block_size); _bits = block_size / 16; _key_count = 0; } // returns the used block of the block uint32_t used_size() const { return block_size(); } // sets the used size; not required void set_used_size(uint32_t size) { // nop } // returns the total block size uint32_t block_size() const { return _bits * 128 / 8; } // sets the block size; not required void set_block_size(uint32_t new_size) { // nop } // returns the key count uint32_t key_count() const { return _key_count; } // sets the key count void set_key_count(uint32_t key_count) { _key_count = key_count; } // returns the bits used to encode the block uint32_t bits() const { return _bits; } // sets the bits used to encode the block void set_bits(uint32_t bits) { _bits = bits; } // copies this block to the |dest| block void copy_to(const uint8_t *block_data, SimdCompIndex *dest, uint8_t *dest_data) { assert(dest->bits() == bits()); dest->set_value(value()); dest->set_key_count(key_count()); dest->set_highest(highest()); ::memcpy(dest_data, block_data, block_size()); } // the number of keys in this block; max 129 (kMaxKeysPerBlock) unsigned short _key_count : 8; // stored bits per integer; max 32 unsigned short _bits : 6; } UPS_PACK_2; #include "1base/packstop.h" struct SimdCompCodecImpl : BlockCodecBase<SimdCompIndex> { enum { kHasCompressApi = 1, kHasFindLowerBoundApi = 1, kHasSelectApi = 1, kHasAppendApi = 1, kHasDelApi = 1, }; static uint32_t compress_block(SimdCompIndex *index, const uint32_t *in, uint32_t *out) { assert(index->key_count() > 0); simdpackwithoutmaskd1(index->value(), in, (__m128i *)out, index->bits()); return index->used_size(); } static uint32_t *uncompress_block(SimdCompIndex *index, const uint32_t *block_data, uint32_t *out) { simdunpackd1(index->value(), (__m128i *)block_data, out, index->bits()); return out; } static int find_lower_bound(SimdCompIndex *index, const uint32_t *block_data, uint32_t key, uint32_t *presult) { return simdsearchwithlengthd1(index->value(), (const __m128i *)block_data, index->bits(), (int)index->key_count() - 1, key, presult); } // Returns a decompressed value static uint32_t select(SimdCompIndex *index, uint32_t *block_data, int position_in_block) { return simdselectd1(index->value(), (const __m128i *)block_data, index->bits(), position_in_block); } static bool append(SimdCompIndex *index, uint32_t *in32, uint32_t key, int *pslot) { // 32 bits: don't store delta if (unlikely(index->bits() == 32)) simdfastset((__m128i *)in32, index->bits(), key, index->key_count() - 1); else simdfastset((__m128i *)in32, index->bits(), key - index->highest(), index->key_count() - 1); index->set_key_count(index->key_count() + 1); *pslot += index->key_count() - 1; return true; } template<typename GrowHandler> static void del(SimdCompIndex *index, uint32_t *block_data, int slot, GrowHandler *key_list) { // The key is now deleted from the block, and afterwards the block // is compressed again. The simdcomp algorithm is not delete-stable, // which means that after compression it might require more storage // than before. If this is the case an Exception is thrown and the // caller will provide more space. // // !! // Make sure that this code path is Exception Safe! Do not modify any // persistent data until we are 100% sure that no exception will be // thrown! // uncompress the block and remove the key uint32_t data[128]; uncompress_block(index, block_data, data); // delete the first value? if (slot == 0) { index->set_value(data[0]); slot++; } if (slot < (int)index->key_count() - 1) ::memmove(&data[slot - 1], &data[slot], sizeof(uint32_t) * (index->key_count() - slot - 1)); // grow the block? if (unlikely(index->bits() < 32 && slot < (int)index->key_count() - 1)) { uint32_t new_bits; assert(slot > 0); if (unlikely(slot == 1)) new_bits = bits(data[0] - index->value()); else new_bits = bits(data[slot - 1] - data[slot - 2]); if (new_bits > index->bits()) { // yes, try to grow; this will cause a split if it fails uint32_t new_size = new_bits * 128 / 8; key_list->grow_block_size(index, new_size); index->set_bits(new_bits); } } index->set_key_count(index->key_count() - 1); // update the cached highest block value? if (unlikely(index->key_count() <= 1)) index->set_highest(index->value()); else index->set_highest(data[index->key_count() - 2]); if (likely(index->key_count() > 1)) compress_block(index, data, block_data); } static uint32_t estimate_required_size(SimdCompIndex *index, uint8_t *block_data, uint32_t key) { /* not used */ assert(!"shouldn't be here"); return 0; } }; typedef Zint32Codec<SimdCompIndex, SimdCompCodecImpl> SimdCompCodec; class SimdCompKeyList : public BlockKeyList<SimdCompCodec> { public: // Constructor SimdCompKeyList(LocalDb *db, PBtreeNode *node) : BlockKeyList<SimdCompCodec>(db, node) { } // Copies all keys from this[sstart] to dest[dstart]; this method // is used to split and merge btree nodes. void copy_to(int sstart, size_t node_count, SimdCompKeyList &dest, size_t other_count, int dstart) { assert(check_integrity(0, node_count)); // if the destination node is empty (often the case when merging nodes) // then re-initialize it. if (other_count == 0) dest.initialize(); // find the start block int src_position_in_block; Index *srci = find_block_by_slot(sstart, &src_position_in_block); // find the destination block int dst_position_in_block; Index *dsti = dest.find_block_by_slot(dstart, &dst_position_in_block); bool initial_block_used = false; // If start offset or destination offset > 0: uncompress both blocks, // merge them if (src_position_in_block > 0 || dst_position_in_block > 0) { uint32_t sdata_buf[Index::kMaxKeysPerBlock]; uint32_t ddata_buf[Index::kMaxKeysPerBlock]; uint32_t *sdata = uncompress_block(srci, &sdata_buf[0]); uint32_t *ddata = dest.uncompress_block(dsti, &ddata_buf[0]); uint32_t *d = &ddata[srci->key_count()]; dsti->set_highest(srci->highest()); if (src_position_in_block == 0) { assert(dst_position_in_block != 0); srci->set_highest(srci->value()); *d = srci->value(); d++; } else { assert(dst_position_in_block == 0); dsti->set_value(sdata[src_position_in_block - 1]); if (src_position_in_block == 1) srci->set_highest(sdata[src_position_in_block - 1]); else srci->set_highest(sdata[src_position_in_block - 2]); src_position_in_block++; } dsti->set_key_count(dsti->key_count() + 1); for (int i = src_position_in_block; i < (int)srci->key_count(); i++) { ddata[dsti->key_count() - 1] = sdata[i - 1]; dsti->set_key_count(dsti->key_count() + 1); } srci->set_key_count(srci->key_count() - dsti->key_count()); if (srci->key_count() == 1) srci->set_highest(srci->value()); // grow destination block? if (dsti->bits() < 32) { uint32_t new_bits = calc_max_bits(dsti->value(), &ddata[0], dsti->key_count() - 1); if (new_bits > dsti->bits()) { uint32_t new_size = new_bits * 128 / 8; dest.grow_block_size(dsti, new_size); dsti->set_bits(new_bits); } } dest.compress_block(dsti, ddata); srci++; dsti++; initial_block_used = true; } // When merging nodes, check if we actually append to the other node if (dst_position_in_block == 0 && dstart > 0) initial_block_used = true; // forces loop below to create a new block // Now copy the remaining blocks (w/o uncompressing them) // TODO this could be sped up by adding multiple blocks in one step int copied_blocks = 0; for (; srci < block_index(block_count()); srci++, copied_blocks++) { if (initial_block_used == true) dsti = dest.add_block(dest.block_count(), srci->block_size()); else if (dsti->bits() < srci->bits()) { dest.grow_block_size(dsti, srci->block_size()); dsti->set_bits(srci->bits()); initial_block_used = true; } srci->copy_to(block_data(srci), dsti, dest.block_data(dsti)); } // remove the copied blocks uint8_t *pend = &data_[used_size()]; uint8_t *pold = (uint8_t *)block_index(block_count()); uint8_t *pnew = (uint8_t *)block_index(block_count() - copied_blocks); ::memmove(pnew, pold, pend - pold); set_block_count(block_count() - copied_blocks); reset_used_size(); // we need at least ONE empty block, otherwise a few functions will bail if (block_count() == 0) { initialize(); } assert(dest.check_integrity(0, other_count + (node_count - sstart))); assert(check_integrity(0, sstart)); } private: // Returns the number of bits required to store a block uint32_t calc_max_bits(uint32_t initial_value, uint32_t *data, uint32_t length) const { if (unlikely(length == 0)) return 1; return simdmaxbitsd1_length(initial_value, data, length); } // Implementation for insert() virtual PBtreeNode::InsertResult insert_impl(size_t node_count, uint32_t key, uint32_t flags) { int slot = 0; block_cache.is_active = false; // perform a linear search through the index and get the block // which will receive the new key Index *index = find_index(key, &slot); // first key in an empty block? then don't store a delta if (unlikely(index->key_count() == 0)) { index->set_key_count(1); index->set_value(key); index->set_highest(key); return PBtreeNode::InsertResult(0, slot); } // fail if the key already exists if (unlikely(key == index->value() || key == index->highest())) return PBtreeNode::InsertResult(UPS_DUPLICATE_KEY, slot); uint32_t new_data[Index::kMaxKeysPerBlock]; uint32_t datap[Index::kMaxKeysPerBlock]; // A split is required if the block maxxed out the keys or if // (used_size >= block_size and block_size >= max_size) bool requires_split = index->key_count() + 1 >= (SimdCompIndex::kMaxKeysPerBlock + 1); // grow the block if it is full if (unlikely(requires_split)) { int block = index - block_index(0); // if the new key is prepended then also prepend the new block if (key < index->value()) { Index *new_index = add_block(block + 1, SimdCompIndex::kInitialBlockSize); new_index->set_key_count(1); new_index->set_value(key); new_index->set_highest(key); // swap the indices, done std::swap(*index, *new_index); assert(check_integrity(0, node_count + 1)); return PBtreeNode::InsertResult(0, slot < 0 ? 0 : slot); } // if the new key is appended then also append the new block if (key > index->highest()) { Index *new_index = add_block(block + 1, SimdCompIndex::kInitialBlockSize); new_index->set_key_count(1); new_index->set_value(key); new_index->set_highest(key); assert(check_integrity(0, node_count + 1)); return PBtreeNode::InsertResult(0, slot + index->key_count()); } // otherwise split the block in the middle and move half of the keys // to the new block. // // The pivot position is aligned to 4. uint32_t *data = uncompress_block(index, datap); uint32_t to_copy = (index->key_count() / 2) & ~0x03; assert(to_copy > 0); uint32_t new_key_count = index->key_count() - to_copy - 1; uint32_t new_value = data[to_copy]; // once more check if the key already exists if (unlikely(new_value == key)) return PBtreeNode::InsertResult(UPS_DUPLICATE_KEY, slot + to_copy); to_copy++; ::memmove(&new_data[0], &data[to_copy], sizeof(int32_t) * (index->key_count() - to_copy)); // calculate the required bits for the new block uint32_t required_bits = calc_max_bits(new_value, &new_data[0], new_key_count - 1); // Now create a new block. This can throw, but so far we have not // modified existing data. Index *new_index = add_block(block + 1, required_bits * 128 / 8); new_index->set_value(new_value); new_index->set_highest(index->highest()); new_index->set_key_count(new_key_count); // Adjust the size of the old block index->set_key_count(index->key_count() - new_key_count); index->set_highest(data[to_copy - 2]); // Now check if the new key will be inserted in the old or the new block if (key >= new_index->value()) { compress_block(index, data); slot += index->key_count(); // continue with the new block index = new_index; data = new_data; } else { new_index->set_used_size(compress_block(new_index, new_data)); assert(new_index->used_size() <= new_index->block_size()); } // the block was modified and needs to be compressed again, even if // the actual insert operation fails (i.e. b/c the key already exists) index->set_used_size(compress_block(index, data)); assert(index->used_size() <= index->block_size()); // fall through... } uint32_t *data = 0; uint32_t required_bits = 0; // check if the block needs to grow; this CAN be the case if the stored // bits are not large enough for the new delta if (key > index->highest()) { required_bits = bits(key - index->highest()); } else if (key < index->value()) { required_bits = bits(index->value() - key); } else if (index->key_count() == 1) { required_bits = bits(key - index->value()); } else { data = uncompress_block(index, datap); if (key < data[0]) required_bits = bits(key - index->value()); } bool resized = false; if (required_bits > index->bits()) { data = uncompress_block(index, datap); grow_block_size(index, required_bits * 128 / 8); index->set_bits(required_bits); resized = true; } // now append or insert the key, but only if the block was not resized; // otherwise the block has to be fully re-encoded if (key > index->highest() && !resized) { SimdCompCodecImpl::append(index, (uint32_t *)block_data(index), key, &slot); } else { if (!data) data = uncompress_block(index, datap); // swap |key| and |index->value| if (key < index->value()) { uint32_t tmp = index->value(); index->set_value(key); key = tmp; } // locate the position of the new key uint32_t *it = data; if (index->key_count() > 1) { uint32_t *end = &data[index->key_count() - 1]; it = std::lower_bound(&data[0], end, key); // if the new key already exists then throw an exception if (unlikely(it < end && *it == key)) return PBtreeNode::InsertResult(UPS_DUPLICATE_KEY, slot + (it - &data[0]) + 1); // insert the new key if (it < end) ::memmove(it + 1, it, (end - it) * sizeof(uint32_t)); } *it = key; slot += it - &data[0] + 1; index->set_key_count(index->key_count() + 1); // then compress and store the block compress_block(index, data); } if (key > index->highest()) index->set_highest(key); assert(check_integrity(0, node_count + 1)); return PBtreeNode::InsertResult(0, slot); } // Implementation of vacuumize() void vacuumize_weak() { // This is not implemented. Caller will abort the current operation and // perform a page split. throw Exception(UPS_LIMITS_REACHED); } // Implementation of vacuumize() void vacuumize_full() { block_cache.is_active = false; int capacity = block_count() * SimdCompIndex::kMaxKeysPerBlock; // iterate over all blocks, uncompress them into a big array uint32_t *p = (uint32_t *)::alloca(capacity * sizeof(uint32_t)); uint32_t *p_end = p; Index *index = block_index(0); Index *end = index + block_count(); for (; index < end; index++) { *p_end = index->value(); p_end++; uncompress_block(index, p_end); p_end += index->key_count() - 1; } // now re-build the page initialize(); index = block_index(0); // how many blocks are required? int required_blocks = (p_end - p) / SimdCompIndex::kMaxKeysPerBlock; if (required_blocks * SimdCompIndex::kMaxKeysPerBlock < p_end - p) required_blocks++; set_block_count(required_blocks); // Now create and fill all the blocks uint32_t offset = 0; while (p_end - p >= SimdCompIndex::kMaxKeysPerBlock) { uint32_t required_bits = calc_max_bits(*p, p + 1, 128); uint32_t required_size = required_bits * 128 / 8; index->set_bits(required_bits); index->set_offset(offset); index->set_value(*p); index->set_highest(*(p + 128)); index->set_key_count(SimdCompIndex::kMaxKeysPerBlock); compress_block(index, p + 1); offset += required_size; p += SimdCompIndex::kMaxKeysPerBlock; index++; } // only one key left? then create an empty block with an initial value if (p_end - p == 1) { index->set_value(*p); index->set_highest(*p); index->set_key_count(1); index->set_bits(1); index->set_offset(offset); offset += 16; // minimum block size for 1 bit } // more keys left? then create a new block and fill it else if (p_end - p > 1) { uint32_t value = *p; p++; uint32_t required_bits = calc_max_bits(value, p, p_end - p); uint32_t required_size = required_bits * 128 / 8; index->set_offset(offset); index->set_bits(required_bits); index->set_key_count(p_end - p + 1); index->set_value(value); index->set_highest(*(p_end - 1)); compress_block(index, p); offset += required_size; } set_used_size(2 * sizeof(uint32_t) + required_blocks * sizeof(Index) + offset); } }; } // namespace Zint32 } // namespace upscaledb #endif // HAVE_SSE2 #endif // UPS_BTREE_KEYS_SIMDCOMP_H
9,451
305
<filename>demos/real_time_stream_analysis/python/inference_manager.py # # Copyright (c) 2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import queue from typing import Tuple import numpy as np import threading import time import multiprocessing from logger import get_logger from inference_executor import InferenceExecutor from inference_pipeline_handler import InferencePipelineHandler class InferenceManager: def __init__(self, ovms_url, model_name, model_version, num_inference_executors, binary_input, buffer_size): self.exit_event = threading.Event() self.abort_event = threading.Event() self.logger = get_logger(__name__) model_version_str = "latest" if model_version == 0 else model_version self.logger.info(f"OVMS Endpoint spec - ovms_url: {ovms_url}; model_name: {model_name}; model_version: {model_version_str}") ovms_info = { "ovms_url": ovms_url, "model_name": model_name, "model_version": model_version } self.logger.info(f"Input buffer capacity set to: {buffer_size} frames") self.inputs_queue = queue.Queue(maxsize=buffer_size) self.results_queue = queue.Queue(maxsize=buffer_size) if binary_input: self.logger.info("Using binary input switched on") self.logger.info(f"Number of Inference Executors: {num_inference_executors}") self.inference_executors = [InferenceExecutor(i, ovms_info, binary_input, input_queue=multiprocessing.Queue(buffer_size), result_queue=multiprocessing.Queue(buffer_size)) for i in range(num_inference_executors)] def initialize(self) -> None: self.logger.info("Initializing Inference Manager...") self.logger.info("Starting Inference Executors...") for inference_executor in self.inference_executors: inference_executor.start() self.logger.info("Starting inference pipeline thread") self.inference_pipeline_thread = threading.Thread(target=self._inference_pipeline_thread) self.inference_pipeline_thread.start() self.logger.info("Starting inference executors monitoring thread") self.inference_executors_monitoring_thread = threading.Thread(target=self._inference_executors_monitoring_thread) self.inference_executors_monitoring_thread.start() self.logger.info("Inference Manager initialized successfully") def shutdown(self) -> None: self.logger.info("Shutting down Inference Manager...") self.logger.info("Exiting Inference Manager thread...") self.exit_event.set() self.inference_pipeline_thread.join() self.logger.info("Inference pipeline thread exited successfully") self.inference_executors_monitoring_thread.join() self.logger.info("Inference executors monitoring thread exited successfully") self.logger.info("Shutting down inference executors...") for inference_executor in self.inference_executors: inference_executor.shutdown() inference_executor.join() self.logger.info(f"Inference-Executor-{inference_executor.id} shut down successfully") self.logger.info("Inference Executors shut down successfully") self.logger.info("Inference Manager shut down successfully") def schedule_inference(self, frame) -> bool: # Non blocking inference scheduling method. Returns True on success. # Returns False if buffer is full and new data cannot be scheduled for inference at that moment. try: self.inputs_queue.put_nowait(frame) return True except queue.Full: return False def pull_result(self) -> Tuple[bool, Tuple[np.ndarray, np.ndarray]]: # Non blocking results pull method. Returns tuple (status, (frame, result)) # status == True informs that pull was successful # status == False informs that there are no results to be pulled # (frame, result) tuple is the actual element pulled from the results queue # For status == False, it's set to (None, None) as it's N/A try: return True, self.results_queue.get_nowait() except queue.Empty: return False, (None, None) def _inference_pipeline_thread(self): num_inference_executors = len(self.inference_executors) # In the first iteration only fill executors inputs i = 0 while i < num_inference_executors: try: input = self.inputs_queue.get(timeout=1) self.inference_executors[i].input_queue.put(input) except queue.Empty: if self.exit_event.is_set(): return continue i += 1 inference_pipeline_handler = InferencePipelineHandler(self.inputs_queue, self.results_queue) initial_pipeline_step = InferencePipelineHandler.PipelineStep.PULL_RESULT execution_result = InferencePipelineHandler.ExecutionResult(initial_pipeline_step, None) i = 0 while not self.exit_event.is_set(): inference_executor = self.inference_executors[i] previous_execution_result = execution_result execution_result = inference_pipeline_handler.run_inference_pipeline(inference_executor, previous_execution_result) if execution_result.pipeline_step == InferencePipelineHandler.PipelineStep.FINISHED: i = (i + 1) % num_inference_executors execution_result.pipeline_step = initial_pipeline_step def _inference_executors_monitoring_thread(self): while not self.exit_event.is_set(): time.sleep(1) if not self.abort_event.is_set(): for inference_executor in self.inference_executors: if inference_executor.abort_event.is_set(): self.logger.info(f"Received abort signal from Inference-Executor-{inference_executor.id}. Notifying Stream Analyzer...") self.abort_event.set()
2,034
855
<reponame>vmayoral/Vitis_Libraries // Copyright 2018 Google Inc. All Rights Reserved. // // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file or at // https://opensource.org/licenses/MIT. #include "pik/status.h" #include <stdarg.h> #include <string> namespace pik { bool Abort(const char* f, int l, const char* format, ...) { char buf[2000]; va_list args; va_start(args, format); vsnprintf(buf, sizeof(buf), format, args); va_end(args); const std::string call_stack; fprintf(stderr, "Abort at %s:%d: %s\n%s\n", f, l, buf, call_stack.c_str()); exit(1); return false; } void Warning(const char* format, ...) { char buf[2000]; va_list args; va_start(args, format); vsnprintf(buf, sizeof(buf), format, args); va_end(args); fprintf(stderr, "%s\n", buf); } } // namespace pik
327
942
#include "../src/tmongocursor.h"
14
4,512
<gh_stars>1000+ """ weasyprint.tests ---------------- The Weasyprint test suite. """
38
2,761
#include <clog.h> #ifdef BUILDING_TRACEPOINT_PROVIDER #define TRACEPOINT_CREATE_PROBES #else #define TRACEPOINT_DEFINE #endif #include "stream.c.clog.h"
70
2,611
<gh_stars>1000+ # Lint as: python3 # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for metrics.""" import lingvo.compat as tf from lingvo.core import ml_perf_bleu_metric from lingvo.core import test_utils class MlPerfMetricsTest(test_utils.TestCase): def testMlPerfBleuMetric(self): m = ml_perf_bleu_metric.MlPerfBleuMetric() m.Update(u"a b a z", u"a b a c") m.Update(u"y f g d k l m", u"e f \u2028 d") self.assertAllClose(0.2638, m.value, atol=1e-03) if __name__ == "__main__": tf.test.main()
378
445
// SPDX-License-Identifier: Apache-2.0 #include "kompute/Sequence.hpp" namespace kp { Sequence::Sequence(std::shared_ptr<vk::PhysicalDevice> physicalDevice, std::shared_ptr<vk::Device> device, std::shared_ptr<vk::Queue> computeQueue, uint32_t queueIndex, uint32_t totalTimestamps) { KP_LOG_DEBUG("Kompute Sequence Constructor with existing device & queue"); this->mPhysicalDevice = physicalDevice; this->mDevice = device; this->mComputeQueue = computeQueue; this->mQueueIndex = queueIndex; this->createCommandPool(); this->createCommandBuffer(); if (totalTimestamps > 0) this->createTimestampQueryPool(totalTimestamps + 1); //+1 for the first one } Sequence::~Sequence() { KP_LOG_DEBUG("Kompute Sequence Destructor started"); if (this->mDevice) { this->destroy(); } } void Sequence::begin() { KP_LOG_DEBUG("Kompute sequence called BEGIN"); if (this->isRecording()) { KP_LOG_DEBUG("Kompute Sequence begin called when already recording"); return; } if (this->isRunning()) { throw std::runtime_error( "Kompute Sequence begin called when sequence still running"); } KP_LOG_INFO("Kompute Sequence command now started recording"); this->mCommandBuffer->begin(vk::CommandBufferBeginInfo()); this->mRecording = true; // latch the first timestamp before any commands are submitted if (this->timestampQueryPool) this->mCommandBuffer->writeTimestamp( vk::PipelineStageFlagBits::eAllCommands, *this->timestampQueryPool, 0); } void Sequence::end() { KP_LOG_DEBUG("Kompute Sequence calling END"); if (this->isRunning()) { throw std::runtime_error( "Kompute Sequence begin called when sequence still running"); } if (!this->isRecording()) { KP_LOG_WARN("Kompute Sequence end called when not recording"); return; } else { KP_LOG_INFO("Kompute Sequence command recording END"); this->mCommandBuffer->end(); this->mRecording = false; } } void Sequence::clear() { KP_LOG_DEBUG("Kompute Sequence calling clear"); if (this->isRecording()) { this->end(); } } std::shared_ptr<Sequence> Sequence::eval() { KP_LOG_DEBUG("Kompute sequence EVAL BEGIN"); return this->evalAsync()->evalAwait(); } std::shared_ptr<Sequence> Sequence::eval(std::shared_ptr<OpBase> op) { this->clear(); return this->record(op)->eval(); } std::shared_ptr<Sequence> Sequence::evalAsync() { if (this->isRecording()) { this->end(); } if (this->mIsRunning) { throw std::runtime_error( "Kompute Sequence evalAsync called when an eval async was " "called without successful wait"); } this->mIsRunning = true; for (size_t i = 0; i < this->mOperations.size(); i++) { this->mOperations[i]->preEval(*this->mCommandBuffer); } vk::SubmitInfo submitInfo( 0, nullptr, nullptr, 1, this->mCommandBuffer.get()); this->mFence = this->mDevice->createFence(vk::FenceCreateInfo()); KP_LOG_DEBUG( "Kompute sequence submitting command buffer into compute queue"); this->mComputeQueue->submit(1, &submitInfo, this->mFence); return shared_from_this(); } std::shared_ptr<Sequence> Sequence::evalAsync(std::shared_ptr<OpBase> op) { this->clear(); this->record(op); this->evalAsync(); return shared_from_this(); } std::shared_ptr<Sequence> Sequence::evalAwait(uint64_t waitFor) { if (!this->mIsRunning) { KP_LOG_WARN("Kompute Sequence evalAwait called without existing eval"); return shared_from_this(); } vk::Result result = this->mDevice->waitForFences(1, &this->mFence, VK_TRUE, waitFor); this->mDevice->destroy( this->mFence, (vk::Optional<const vk::AllocationCallbacks>)nullptr); this->mIsRunning = false; if (result == vk::Result::eTimeout) { KP_LOG_WARN("Kompute Sequence evalAwait reached timeout of {}", waitFor); return shared_from_this(); } for (size_t i = 0; i < this->mOperations.size(); i++) { this->mOperations[i]->postEval(*this->mCommandBuffer); } return shared_from_this(); } bool Sequence::isRunning() { return this->mIsRunning; } bool Sequence::isRecording() { return this->mRecording; } bool Sequence::isInit() { return this->mDevice && this->mCommandPool && this->mCommandBuffer && this->mComputeQueue; } void Sequence::rerecord() { this->end(); std::vector<std::shared_ptr<OpBase>> ops = this->mOperations; this->mOperations.clear(); for (const std::shared_ptr<kp::OpBase>& op : ops) { this->record(op); } } void Sequence::destroy() { KP_LOG_DEBUG("Kompute Sequence destroy called"); if (!this->mDevice) { KP_LOG_WARN("Kompute Sequence destroy called " "with null Device pointer"); return; } if (this->mFreeCommandBuffer) { KP_LOG_INFO("Freeing CommandBuffer"); if (!this->mCommandBuffer) { KP_LOG_WARN("Kompute Sequence destroy called with null " "CommandPool pointer"); return; } this->mDevice->freeCommandBuffers( *this->mCommandPool, 1, this->mCommandBuffer.get()); this->mCommandBuffer = nullptr; this->mFreeCommandBuffer = false; KP_LOG_DEBUG("Kompute Sequence Freed CommandBuffer"); } if (this->mFreeCommandPool) { KP_LOG_INFO("Destroying CommandPool"); if (this->mCommandPool == nullptr) { KP_LOG_WARN("Kompute Sequence destroy called with null " "CommandPool pointer"); return; } this->mDevice->destroy( *this->mCommandPool, (vk::Optional<const vk::AllocationCallbacks>)nullptr); this->mCommandPool = nullptr; this->mFreeCommandPool = false; KP_LOG_DEBUG("Kompute Sequence Destroyed CommandPool"); } if (this->mOperations.size()) { KP_LOG_INFO("Kompute Sequence clearing operations buffer"); this->mOperations.clear(); } if (this->timestampQueryPool) { KP_LOG_INFO("Destroying QueryPool"); this->mDevice->destroy( *this->timestampQueryPool, (vk::Optional<const vk::AllocationCallbacks>)nullptr); this->timestampQueryPool = nullptr; KP_LOG_DEBUG("Kompute Sequence Destroyed QueryPool"); } if (this->mDevice) { this->mDevice = nullptr; } if (this->mPhysicalDevice) { this->mPhysicalDevice = nullptr; } if (this->mComputeQueue) { this->mComputeQueue = nullptr; } } std::shared_ptr<Sequence> Sequence::record(std::shared_ptr<OpBase> op) { KP_LOG_DEBUG("Kompute Sequence record function started"); this->begin(); KP_LOG_DEBUG( "Kompute Sequence running record on OpBase derived class instance"); op->record(*this->mCommandBuffer); this->mOperations.push_back(op); if (this->timestampQueryPool) this->mCommandBuffer->writeTimestamp( vk::PipelineStageFlagBits::eAllCommands, *this->timestampQueryPool, this->mOperations.size()); return shared_from_this(); } void Sequence::createCommandPool() { KP_LOG_DEBUG("Kompute Sequence creating command pool"); if (!this->mDevice) { throw std::runtime_error("Kompute Sequence device is null"); } if (this->mQueueIndex < 0) { throw std::runtime_error("Kompute Sequence queue index not provided"); } this->mFreeCommandPool = true; vk::CommandPoolCreateInfo commandPoolInfo(vk::CommandPoolCreateFlags(), this->mQueueIndex); this->mCommandPool = std::make_shared<vk::CommandPool>(); this->mDevice->createCommandPool( &commandPoolInfo, nullptr, this->mCommandPool.get()); KP_LOG_DEBUG("Kompute Sequence Command Pool Created"); } void Sequence::createCommandBuffer() { KP_LOG_DEBUG("Kompute Sequence creating command buffer"); if (!this->mDevice) { throw std::runtime_error("Kompute Sequence device is null"); } if (!this->mCommandPool) { throw std::runtime_error("Kompute Sequence command pool is null"); } this->mFreeCommandBuffer = true; vk::CommandBufferAllocateInfo commandBufferAllocateInfo( *this->mCommandPool, vk::CommandBufferLevel::ePrimary, 1); this->mCommandBuffer = std::make_shared<vk::CommandBuffer>(); this->mDevice->allocateCommandBuffers(&commandBufferAllocateInfo, this->mCommandBuffer.get()); KP_LOG_DEBUG("Kompute Sequence Command Buffer Created"); } void Sequence::createTimestampQueryPool(uint32_t totalTimestamps) { KP_LOG_DEBUG("Kompute Sequence creating query pool"); if (!this->isInit()) { throw std::runtime_error( "createTimestampQueryPool() called on uninitialized Sequence"); } if (!this->mPhysicalDevice) { throw std::runtime_error("Kompute Sequence physical device is null"); } vk::PhysicalDeviceProperties physicalDeviceProperties = this->mPhysicalDevice->getProperties(); if (physicalDeviceProperties.limits.timestampComputeAndGraphics) { vk::QueryPoolCreateInfo queryPoolInfo; queryPoolInfo.setQueryCount(totalTimestamps); queryPoolInfo.setQueryType(vk::QueryType::eTimestamp); this->timestampQueryPool = std::make_shared<vk::QueryPool>( this->mDevice->createQueryPool(queryPoolInfo)); KP_LOG_DEBUG("Query pool for timestamps created"); } else { throw std::runtime_error("Device does not support timestamps"); } } std::vector<std::uint64_t> Sequence::getTimestamps() { if (!this->timestampQueryPool) throw std::runtime_error("Timestamp latching not enabled"); const auto n = this->mOperations.size() + 1; std::vector<std::uint64_t> timestamps(n, 0); this->mDevice->getQueryPoolResults( *this->timestampQueryPool, 0, n, timestamps.size() * sizeof(std::uint64_t), timestamps.data(), sizeof(uint64_t), vk::QueryResultFlagBits::e64 | vk::QueryResultFlagBits::eWait); return timestamps; } }
4,300
743
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.guacamole.auth.jdbc.permission; import java.util.Collection; import org.apache.guacamole.auth.jdbc.base.EntityModel; import org.apache.ibatis.annotations.Param; import org.apache.guacamole.net.auth.permission.SystemPermission; /** * Mapper for system-level permissions. */ public interface SystemPermissionMapper extends PermissionMapper<SystemPermissionModel> { /** * Retrieve the permission of the given type associated with the given * entity, if it exists. If no such permission exists, null is returned. * * @param entity * The entity to retrieve permissions for. * * @param type * The type of permission to return. * * @param effectiveGroups * The identifiers of all groups that should be taken into account * when determining the permissions effectively granted to the user. If * no groups are given, only permissions directly granted to the user * will be used. * * @return * The requested permission, or null if no such permission is granted * to the given entity. */ SystemPermissionModel selectOne(@Param("entity") EntityModel entity, @Param("type") SystemPermission.Type type, @Param("effectiveGroups") Collection<String> effectiveGroups); }
654
526
<reponame>rychagova/egeria /* SPDX-License-Identifier: Apache-2.0 */ /* Copyright Contributors to the ODPi Egeria project. */ package org.odpi.openmetadata.commonservices.ffdc.rest; import java.util.Collection; import java.util.List; import java.util.Optional; /** * If an OMAS uses Generic types to implement it's Java API, then the responses can implement this interface. */ public interface GenericResponse<R> extends FFDCResponse { /** * Add several results for the response * @param results collection with results * */ void addAllResults(Collection<? extends R> results); /** * Add single result for the response * @param result - one result * */ void addResult(R result); /** * Get all results of the response * * @return results **/ List<R> results(); /** * Get head element from result array. * Needed when we know for sure that the answer is single object * * @return result **/ default Optional<R> head() { final List<R> results = results(); if (results != null && !results.isEmpty()) { return Optional.of(results.get(0)); } return Optional.empty(); } }
440
2,151
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef UI_BASE_COCOA_FOCUS_WINDOW_SET_H_ #define UI_BASE_COCOA_FOCUS_WINDOW_SET_H_ #include <set> #include "ui/base/ui_base_export.h" #include "ui/gfx/native_widget_types.h" namespace ui { // Brings a group of windows to the front without changing their order, and // makes the frontmost one key and main. If none are visible, the frontmost // miniaturized window is deminiaturized. UI_BASE_EXPORT void FocusWindowSet(const std::set<gfx::NativeWindow>& windows); // Brings a group of windows to the front without changing their // order, and makes the frontmost one key and main. If none are // visible, the frontmost miniaturized window is deminiaturized. This // variant is meant to clean up after the system-default Dock icon // behavior. Unlike FocusWindowSet, only windows on the current space // are considered. It also ignores the hidden state of windows; the // window system may be in the middle of unhiding the application. UI_BASE_EXPORT void FocusWindowSetOnCurrentSpace( const std::set<gfx::NativeWindow>& windows); } // namespace ui #endif // UI_BASE_COCOA_FOCUS_WINDOW_SET_H_
392
1,025
//================================================================================== // Copyright (c) 2016 , Advanced Micro Devices, Inc. All rights reserved. // /// \author AMD Developer Tools Team /// \file gwApplicationCommands.cpp /// //================================================================================== //------------------------------ gwApplicationCommands.cpp ------------------------------ // Warnings: #include <AMDTBaseTools/Include/gtIgnoreCompilerWarnings.h> // Qt: #include <QtWidgets> #include <QObject> #include <AMDTApplicationFramework/Include/views/afSourceCodeView.h> #include <AMDTGpuDebuggingComponents/Include/gdHTMLProperties.h> #include <AMDTApplicationComponents/Include/acFunctions.h> // Infra: #include <AMDTBaseTools/Include/gtASCIIStringTokenizer.h> #include <AMDTBaseTools/Include/gtAssert.h> #include <AMDTApiFunctions/Include/gaGRApiFunctions.h> #include <AMDTAPIClasses/Include/Events/apEventsHandler.h> #include <AMDTAPIClasses/Include/Events/apMDIViewCreateEvent.h> #include <AMDTAPIClasses/Include/apExecutionMode.h> #include <AMDTApplicationComponents/Include/acMessageBox.h> #include <AMDTOSWrappers/Include/osGeneralFunctions.h> // AMDTApplicationFramework: #include <AMDTApplicationFramework/Include/afSourceCodeViewsManager.h> #include <AMDTApplicationFramework/Include/afQMdiSubWindow.h> #include <AMDTApplicationFramework/Include/afAidFunctions.h> #include <AMDTApplicationFramework/Include/afMainAppWindow.h> #include <AMDTApplicationFramework/Include/afProjectManager.h> #include <AMDTApplicationFramework/Include/afExecutionModeManager.h> // AMDTGpuDebuggingComponents: #include <AMDTGpuDebuggingComponents/Include/gdAidFunctions.h> #include <AMDTGpuDebuggingComponents/Include/gdCommandIDs.h> #include <AMDTGpuDebuggingComponents/Include/gdGDebuggerGlobalVariablesManager.h> #include <AMDTGpuDebuggingComponents/Include/gdImagesAndBuffersManager.h> #include <AMDTGpuDebuggingComponents/Include/gdStatisticsPanel.h> #include <AMDTGpuDebuggingComponents/Include/gdStringConstants.h> #include <AMDTGpuDebuggingComponents/Include/commands/gdSaveProjectCommand.h> #include <AMDTGpuDebuggingComponents/Include/dialogs/gdBreakpointsDialog.h> #include <AMDTGpuDebuggingComponents/Include/views/gdCallsStackListCtrl.h> #include <AMDTGpuDebuggingComponents/Include/views/gdMemoryView.h> #include <AMDTGpuDebuggingComponents/Include/gdDebugApplicationTreeData.h> // Local: #include <AMDTGpuDebugging/Include/gwgDEBuggerAppWrapperDLLBuild.h> #include <AMDTGpuDebugging/Include/gwgDEBuggerAppWrapper.h> #include <AMDTGpuDebugging/Include/gwStringConstants.h> #include <src/gwApplicationCommands.h> #include <src/gwImagesAndBuffersMDIViewCreator.h> // --------------------------------------------------------------------------- // Name: gwApplicationCommands::gwApplicationCommands // Description: // Return Val: // Author: <NAME> // Date: 7/2/2011 // --------------------------------------------------------------------------- gwApplicationCommands::gwApplicationCommands() { } // --------------------------------------------------------------------------- // Name: gwApplicationCommands::~gwApplicationCommands // Description: // Return Val: // Author: <NAME> // Date: 7/2/2011 // --------------------------------------------------------------------------- gwApplicationCommands::~gwApplicationCommands() { } // --------------------------------------------------------------------------- // Name: gwApplicationCommands::openBreakpointsDialog // Description: Open the breakpoints dialog // Return Val: bool - Success / failure. // Author: <NAME> // Date: 7/2/2011 // --------------------------------------------------------------------------- bool gwApplicationCommands::openBreakpointsDialog() { bool retVal = false; // Perform the command only if it is enabled: if (isBreakpointsDialogCommandEnabled()) { afApplicationCommands* pApplicationCommands = afApplicationCommands::instance(); GT_IF_WITH_ASSERT(NULL != pApplicationCommands) { gdBreakpointsDialog dialog(NULL); int rc = dialog.exec(); if (QDialog::Accepted == rc) { retVal = true; } } } return retVal; } // --------------------------------------------------------------------------- // Name: gwApplicationCommands::isBreakpointsDialogCommandEnabled // Description: Return true iff the breakpoints dialog command is enabled // Return Val: bool - Success / failure. // Author: <NAME> // Date: 7/2/2011 // --------------------------------------------------------------------------- bool gwApplicationCommands::isBreakpointsDialogCommandEnabled() { bool retVal = false; if (afExecutionModeManager::instance().isActiveMode(GD_STR_executionMode)) { // Get current execution mode; apExecutionMode currentExecMode = AP_DEBUGGING_MODE; gaGetDebuggedProcessExecutionMode(currentExecMode); retVal = true; } return retVal; } // --------------------------------------------------------------------------- // Name: gwApplicationCommands::propertiesEventObserver // Description: Return the application properties view // Return Val: gdPropertiesEventObserver* // Author: <NAME> // Date: 21/7/2011 // --------------------------------------------------------------------------- gdPropertiesEventObserver* gwApplicationCommands::propertiesEventObserver() { gdPropertiesEventObserver* pRetVal = NULL; pRetVal = gwgDEBuggerAppWrapper::propertiesView(); return pRetVal; } // --------------------------------------------------------------------------- // Name: gwApplicationCommands::callsHistoryPanel // Description: Return the application calls history panel // Return Val: gdAPICallsHistoryPanel* // Author: <NAME> // Date: 21/7/2011 // --------------------------------------------------------------------------- gdAPICallsHistoryPanel* gwApplicationCommands::callsHistoryPanel() { gdAPICallsHistoryPanel* pRetVal = NULL; pRetVal = gwgDEBuggerAppWrapper::callsHistoryPanel(); return pRetVal; } // --------------------------------------------------------------------------- // Name: gwApplicationCommands::callStackView // Description: Return the application call stack view // Return Val: gdCallStackView* // Author: <NAME> // Date: 21/7/2011 // --------------------------------------------------------------------------- gdCallStackView* gwApplicationCommands::callStackView() { gdCallStackView* pRetVal = NULL; pRetVal = gwgDEBuggerAppWrapper::callStackView(); return pRetVal; } // --------------------------------------------------------------------------- // Name: gwApplicationCommands::debuggedProcessEventsView // Description: Return the application debugged process events view // Return Val: gdDebuggedProcessEventsView* // Author: <NAME> // Date: 21/7/2011 // --------------------------------------------------------------------------- gdDebuggedProcessEventsView* gwApplicationCommands::debuggedProcessEventsView() { gdDebuggedProcessEventsView* pRetVal = NULL; pRetVal = gwgDEBuggerAppWrapper::debuggedProcessEventsView(); return pRetVal; } // --------------------------------------------------------------------------- // Name: gwApplicationCommands::stateVariablesView // Description: Return the application state variable view // Return Val: gdStateVariablesView* // Author: <NAME> // Date: 21/7/2011 // --------------------------------------------------------------------------- gdStateVariablesView* gwApplicationCommands::stateVariablesView() { gdStateVariablesView* pRetVal = NULL; pRetVal = gwgDEBuggerAppWrapper::stateVariablesView(); return pRetVal; } // --------------------------------------------------------------------------- // Name: gwApplicationCommands::commandQueuesView // Description: Return the application command queues view // Return Val: gdCommandQueuesView* // Author: <NAME> // Date: 15/12/2011 // --------------------------------------------------------------------------- gdCommandQueuesView* gwApplicationCommands::commandQueuesView() { gdCommandQueuesView* pRetVal = NULL; // pRetVal = gwgDEBuggerAppWrapper::commandQueuesView(); return pRetVal; } // --------------------------------------------------------------------------- // Name: gwApplicationCommands::statisticsView // Description: Return the application statistics view // Return Val: gdStatisticsPanel* // Author: <NAME> // Date: 18/9/2011 // --------------------------------------------------------------------------- gdStatisticsPanel* gwApplicationCommands::statisticsPanel() { gdStatisticsPanel* pRetVal = NULL; pRetVal = gwgDEBuggerAppWrapper::statisticsPanel(); return pRetVal; } // --------------------------------------------------------------------------- // Name: gwApplicationCommands::memoryView // Description: Return the application memory view // Return Val: gdMemoryView* // Author: <NAME> // Date: 18/9/2011 // --------------------------------------------------------------------------- gdMemoryView* gwApplicationCommands::memoryView() { gdMemoryView* pRetVal = NULL; pRetVal = gwgDEBuggerAppWrapper::memoryView(); return pRetVal; } // --------------------------------------------------------------------------- // Name: gwApplicationCommands::breakpointsView // Description: Get the application breakpoints view // Return Val: gdBreakpointsView* // Author: <NAME> // Date: 25/9/2011 // --------------------------------------------------------------------------- gdBreakpointsView* gwApplicationCommands::breakpointsView() { gdBreakpointsView* pRetVal = NULL; pRetVal = gwgDEBuggerAppWrapper::breakpointsView(); return pRetVal; } // --------------------------------------------------------------------------- // Name: gwApplicationCommands::watchView // Description: Get the application watch view // Return Val: gdWatchView* // Author: <NAME> // Date: 25/9/2011 // --------------------------------------------------------------------------- gdWatchView* gwApplicationCommands::watchView() { gdWatchView* pRetVal = NULL; pRetVal = gwgDEBuggerAppWrapper::watchView(); return pRetVal; } // --------------------------------------------------------------------------- // --------------------------------------------------------------------------- // Name: gwApplicationCommands::localsView // Description: Get the application locals view // Return Val: gdLocalsView* // Author: <NAME> // Date: 25/9/2011 // --------------------------------------------------------------------------- gdLocalsView* gwApplicationCommands::localsView() { gdLocalsView* pRetVal = NULL; pRetVal = gwgDEBuggerAppWrapper::localsView(); return pRetVal; } // --------------------------------------------------------------------------- // Name: vspApplicationCommands::displayImageBufferObject // Description: Display an image / buffer object in VS // Arguments: afApplicationTreeItemData* pItemData // Return Val: bool - Success / failure. // Author: <NAME> // Date: 27/7/2011 // --------------------------------------------------------------------------- bool gwApplicationCommands::displayImageBufferObject(afApplicationTreeItemData* pItemData, const gtString& itemText) { bool retVal = false; // Sanity check: GT_IF_WITH_ASSERT(pItemData != NULL) { if (pItemData->m_itemType != AF_TREE_ITEM_ITEM_NONE) { // Get the main application window: afMainAppWindow* pApplicationWindow = afMainAppWindow::instance(); GT_IF_WITH_ASSERT(pApplicationWindow != NULL) { gtString fileName; bool rcFileName = gdHTMLProperties::objectDataToHTMLLink(*pItemData, -1, fileName); GT_IF_WITH_ASSERT(rcFileName) { // Build the file path: // Get the User AppData directory osFilePath imageObjectsFilePath; afGetUserDataFolderPath(imageObjectsFilePath); // Add the VS_Cache files directory: imageObjectsFilePath.appendSubDirectory(GW_STR_VSCacheFolderName); // Get the project name: gdGDebuggerGlobalVariablesManager& globalVarsManager = gdGDebuggerGlobalVariablesManager::instance(); // Get the debugged application name: gtString projectName; osFilePath currentProject = globalVarsManager.currentDebugProjectSettings().executablePath(); currentProject.getFileName(projectName); // Create the folder if not created: osDirectory directoryPath; directoryPath.setDirectoryPath(imageObjectsFilePath); bool rcCreateDir = directoryPath.create(); GT_IF_WITH_ASSERT(rcCreateDir) { // Add the VS_Cache files directory: imageObjectsFilePath.appendSubDirectory(projectName); directoryPath.setDirectoryPath(imageObjectsFilePath); directoryPath.create(); // Create a text file with the description of the current buffer / image object: // Write the files to the cache folder: imageObjectsFilePath.setFileName(fileName); imageObjectsFilePath.setFileExtension(AF_STR_CodeXMLImageBuffersFilesExtension); // Just save the file: osFile objectfile; bool rc = objectfile.open(imageObjectsFilePath, osChannel::OS_BINARY_CHANNEL, osFile::OS_OPEN_TO_WRITE); GT_IF_WITH_ASSERT(rc) { gdDebugApplicationTreeData* pGDData = qobject_cast<gdDebugApplicationTreeData*>(pItemData->extendedItemData()); GT_IF_WITH_ASSERT(pGDData != NULL) { // Trigger a source code view creation event: gtString viewTitle; pGDData->_contextId.toString(viewTitle); viewTitle.appendFormattedString(L" %ls", itemText.asCharArray()); apMDIViewCreateEvent imageBufferViewEvent(AF_STR_ImageBuffersViewsCreatorID, imageObjectsFilePath, viewTitle, 0, -1); apEventsHandler::instance().registerPendingDebugEvent(imageBufferViewEvent); } retVal = true; } } } } } } return retVal; } // --------------------------------------------------------------------------- // Name: gwApplicationCommands::displayOpenCLProgramSourceCode // Description: Display an OpenCL program source code // Arguments: afApplicationTreeItemData* pProgramItemData // Author: <NAME> // Date: 2/8/2011 // --------------------------------------------------------------------------- void gwApplicationCommands::displayOpenCLProgramSourceCode(afApplicationTreeItemData* pProgramItemData) { // Sanity check: GT_IF_WITH_ASSERT(pProgramItemData != NULL) { if (pProgramItemData->m_itemType != AF_TREE_ITEM_ITEM_NONE) { // Get the monitored object tree: gdDebugApplicationTreeHandler* pMonitoredObjectsTree = gdDebugApplicationTreeHandler::instance(); GT_IF_WITH_ASSERT(pMonitoredObjectsTree != NULL) { gtString viewTitle; // Get the item text from the tree: gtString itemText = acQStringToGTString(pMonitoredObjectsTree->GetTreeItemText(pProgramItemData->m_pTreeWidgetItem)); gdDebugApplicationTreeData* pGDData = qobject_cast<gdDebugApplicationTreeData*>(pProgramItemData->extendedItemData()); GT_IF_WITH_ASSERT(pGDData != NULL) { // Append the context string to the item string: gtString itemNameWithContext; pGDData->_contextId.toString(viewTitle); // Append the context as string to the item name: viewTitle.appendFormattedString(L"%ls ", itemText.asCharArray()); // Find the item file path: osFilePath filePath; int lineNumber = -1; bool rc = gdFindObjectFilePath(pProgramItemData, filePath, lineNumber); GT_IF_WITH_ASSERT(rc) { // Check if the file should be displayed with program counter: int displayedLineNumber = -1, displayedPCCounter = -1; (void) afSourceCodeViewsManager::instance().getLineNumberAndProgramCounter(filePath, displayedLineNumber, displayedPCCounter); // Check if the file is opened: bool isFileOpen = afSourceCodeViewsManager::instance().isFileOpen(filePath); if (!isFileOpen) { // Display the file with the original line number and program counter, and then display again with the new line number and no pc: apMDIViewCreateEvent sourceCodeViewEvent(AF_STR_GenericMDIViewsCreatorID, filePath, viewTitle, 0, displayedLineNumber, displayedPCCounter); apEventsHandler::instance().registerPendingDebugEvent(sourceCodeViewEvent); } // Trigger a source code view creation event: apMDIViewCreateEvent sourceCodeViewEvent(AF_STR_GenericMDIViewsCreatorID, filePath, viewTitle, 0, lineNumber, -1); apEventsHandler::instance().registerPendingDebugEvent(sourceCodeViewEvent); } } } } } } // --------------------------------------------------------------------------- // Name: gwApplicationCommands::displayOpenGLSLShaderCode // Description: Display an OpenGL shader source code // Arguments: afApplicationTreeItemData* pShaderItemData // Author: <NAME> // Date: 2/8/2011 // --------------------------------------------------------------------------- void gwApplicationCommands::displayOpenGLSLShaderCode(afApplicationTreeItemData* pShaderItemData) { if (pShaderItemData->m_itemType != AF_TREE_ITEM_ITEM_NONE) { // Get the monitored object tree: gdDebugApplicationTreeHandler* pMonitoredObjectsTree = gdDebugApplicationTreeHandler::instance(); GT_IF_WITH_ASSERT(pMonitoredObjectsTree != NULL) { gtString viewTitle; // Get the item text from the tree: gtString itemText = acQStringToGTString(pMonitoredObjectsTree->GetTreeItemText(pShaderItemData->m_pTreeWidgetItem)); gdDebugApplicationTreeData* pGDData = qobject_cast<gdDebugApplicationTreeData*>(pShaderItemData->extendedItemData()); GT_IF_WITH_ASSERT(pGDData != NULL) { // Append the context string to the item string: gtString itemNameWithContext; pGDData->_contextId.toString(viewTitle); // Append the context as string to the item name: viewTitle.appendFormattedString(L"%ls ", itemText.asCharArray()); // Find the item file path: osFilePath filePath; int lineNumber = -1; bool rc = gdFindObjectFilePath(pShaderItemData, filePath, lineNumber); GT_IF_WITH_ASSERT(rc) { // Trigger a source code view creation event: apMDIViewCreateEvent sourceCodeViewEvent(AF_STR_GenericMDIViewsCreatorID, filePath, viewTitle, 0, lineNumber, -1); apEventsHandler::instance().registerPendingDebugEvent(sourceCodeViewEvent); } } } } } // --------------------------------------------------------------------------- // Name: gwApplicationCommands::updateToolbarCommands // Description: Update the main frame toolbar commands // Author: <NAME> // Date: 1/8/2011 // --------------------------------------------------------------------------- void gwApplicationCommands::updateToolbarCommands() { // Get the main frame: afMainAppWindow* pApplicationWindow = afMainAppWindow::instance(); // NOTICE: Some of the actions are initialized before the main application // initialization is over. Do not assert this if: if (pApplicationWindow != NULL) { pApplicationWindow->updateToolbarsCommands(); } } // --------------------------------------------------------------------------- // Name: gwApplicationCommands::updateToolbarCommands // Description: Update the main frame toolbars // Author: <NAME> // Date: 1/8/2011 // --------------------------------------------------------------------------- void gwApplicationCommands::updateToolbars() { // Get the main frame: afMainAppWindow* pApplicationWindow = afMainAppWindow::instance(); GT_IF_WITH_ASSERT(pApplicationWindow != NULL) { pApplicationWindow->updateToolbars(); } } // --------------------------------------------------------------------------- // Name: gwApplicationCommands::raiseStatisticsView // Description: Raise statistics view through the package commands // Return Val: bool - Success / failure. // Author: <NAME> // Date: 31/7/2011 // --------------------------------------------------------------------------- bool gwApplicationCommands::raiseStatisticsView() { bool retVal = false; // Get the main window: afMainAppWindow* pMainWindow = afMainAppWindow::instance(); GT_IF_WITH_ASSERT(pMainWindow != NULL) { gdStatisticsPanel* pStatisticsPanel = statisticsPanel(); GT_IF_WITH_ASSERT(pStatisticsPanel != NULL) { // Get the widget parent: QDockWidget* pDockWidgetParent = qobject_cast<QDockWidget*>(pStatisticsPanel->parent()); GT_IF_WITH_ASSERT(pDockWidgetParent != NULL) { pDockWidgetParent->show(); pDockWidgetParent->setFocus(); pDockWidgetParent->raise(); } } } return retVal; } // --------------------------------------------------------------------------- // Name: gwApplicationCommands::raiseCommandQueuesView // Description: Raise command queues view through the application main window // Return Val: bool - Success / failure. // Author: <NAME> // Date: 4/1/2012 // --------------------------------------------------------------------------- bool gwApplicationCommands::raiseCommandQueuesView() { bool retVal = false; // Get the main window: afMainAppWindow* pMainWindow = afMainAppWindow::instance(); GT_IF_WITH_ASSERT(pMainWindow != NULL) { /* gdCommandQueuesView* pCommandQueuesView = commandQueuesView(); GT_IF_WITH_ASSERT (pCommandQueuesView != NULL) { // Get the widget parent: QDockWidget* pDockWidgetParent = qobject_cast<QDockWidget*>(pCommandQueuesView->parent()); GT_IF_WITH_ASSERT(pDockWidgetParent != NULL) { pDockWidgetParent->show(); pDockWidgetParent->setFocus(); pDockWidgetParent->raise(); } }*/ } return retVal; } // --------------------------------------------------------------------------- // Name: gwApplicationCommands::raiseMemoryView // Description: Raise memory view through the main window interface // Return Val: bool - Success / failure. // Author: <NAME> // Date: 31/7/2011 // --------------------------------------------------------------------------- bool gwApplicationCommands::raiseMemoryView() { bool retVal = false; // Get the main window: afMainAppWindow* pMainWindow = afMainAppWindow::instance(); GT_IF_WITH_ASSERT(pMainWindow != NULL) { gdMemoryView* pMemoryView = memoryView(); GT_IF_WITH_ASSERT(pMemoryView != NULL) { // Get the widget parent: QDockWidget* pDockWidgetParent = qobject_cast<QDockWidget*>(pMemoryView->parent()); GT_IF_WITH_ASSERT(pDockWidgetParent != NULL) { pDockWidgetParent->show(); pDockWidgetParent->setFocus(); pDockWidgetParent->raise(); } // Update the view: bool rc = pMemoryView->updateView(true); GT_ASSERT(rc); } } return retVal; }
9,317
14,668
<filename>components/payments/core/test_payment_manifest_downloader.h // Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef COMPONENTS_PAYMENTS_CORE_TEST_PAYMENT_MANIFEST_DOWNLOADER_H_ #define COMPONENTS_PAYMENTS_CORE_TEST_PAYMENT_MANIFEST_DOWNLOADER_H_ #include <map> #include <memory> #include <string> #include "components/payments/core/payment_manifest_downloader.h" class GURL; template <class T> class scoped_refptr; namespace network { class SharedURLLoaderFactory; } namespace payments { // Downloads payment method manifests from the test server. // // Sample usage #1: // // TestDownloader downloader(context); // downloader.AddTestServerURL("https://", "https://127.0.0.1:7070"); // // Actual URL downloaded is https://127.0.0.1:7070/alicepay.com/webpay. // downloader.DownloadPaymentMethodManifest( // "https://alicepay.com/webpay", callback); // // Sample usage #2: // // TestDownloader downloader(context); // downloader.AddTestServerURL( // "https://alicepay.com", "https://127.0.0.1:8080"); // downloader.AddTestServerURL( // "https://bobpay.com", "https://127.0.0.1:9090"); // // Actual URL downloaded is https://127.0.0.1:8080/webpay. // downloader.DownloadPaymentMethodManifest( // "https://alicepay.com/webpay", callback); // // Actual URL downloaded is https://127.0.0.1:9090/webpay. // downloader.DownloadPaymentMethodManifest( // "https://bobpay.com/webpay", callback); class TestDownloader : public PaymentManifestDownloader { public: explicit TestDownloader( scoped_refptr<network::SharedURLLoaderFactory> url_loader_factory); TestDownloader(const TestDownloader&) = delete; TestDownloader& operator=(const TestDownloader&) = delete; ~TestDownloader() override; // Modifies the downloader to replace all instances of |prefix| with // |test_server_url| when downloading payment method manifests and web app // manifests. // // For example, if AddTestServerURL("https://", "https://127.0.0.1:7070") is // called, then all calls to DownloadPaymentMethodManifest(some_url, callback) // will replace the "https://" prefix of some_url with // "https://127.0.0.1:7070". This is useful when running a single test server // that serves files in components/test/data/payments/, which has // subdirectories that look like hostnames. So, downloading // "https://alicepay.com/webpay" would actually download // https://127.0.0.1:7070/alicepay.com/webpay, which is a file located at // components/test/data/payments/alicepay.com/webpay. // // For anoter example, if AddTestServerURL("https://alicepay.com", // "https://127.0.0.1:8080") is called, then all calls to // DownloadPaymentMethodManifest(some_url, callback) will replace the // "https://alicepay.com" prefix of some_url with "https://127.0.0.1:8080". // This is useful when running multiple test servers, each one serving file // from individual subdirectories for components/test/data/payments/. So, // downloading "https://alicepay.com/webpay" would actually download // https://127.0.0.1:8080/webpay, which is a file located at // components/test/data/payments/alicepay.com/webpay. Multiple test servers // are useful for testing where the RFC6454 origins should be considered. // // Any call to DownloadPaymentMethodManifest(some_url, callback) where // some_url does not have a previously added prefix will use the original // some_url without modifications. // // If you call this method multiple times, avoid |prefix| parameters that are // prefixes of each other, as that will cause undefined confusion. That is, // AddTestServerURL("x");AddTestServerURL("y"); is OK, but // AddTestServerURL("x");AddTestServerURL("xy"); is not. void AddTestServerURL(const std::string& prefix, const GURL& test_server_url); // PaymentManifestDownloader: // // The reverse operation as AddTestServerURL: converts |url| back to a test // server URL so it can be fetched as a normal resource outside of this class. GURL FindTestServerURL(const GURL& url) const override; private: // PaymentManifestDownloader implementation. void InitiateDownload(const url::Origin& request_initiator, const GURL& url, Download::Type download_type, int allowed_number_of_redirects, PaymentManifestDownloadCallback callback) override; // The mapping from the URL prefix to the URL of the test server to be used. // Example 1: // // {"https://": "https://127.0.0.1:7070"} // // Example 2: // // { // "https://alicepay.com": "https://127.0.0.1:8080", // "https://bobpay.com": "https://127.0.0.1:9090" // } std::map<std::string, GURL> test_server_url_; }; } // namespace payments #endif // COMPONENTS_PAYMENTS_CORE_TEST_PAYMENT_MANIFEST_DOWNLOADER_H_
1,705
1,658
<reponame>SergeyPetrachkov/DownloadButton // // PKViewController.h // DownloadButton // // Created by <NAME> on 06/01/2015. // Copyright (c) 2014 <NAME>. All rights reserved. // @import UIKit; @interface PKViewController : UIViewController @end
87
1,049
package me.devilsen.czxing.thread; import java.util.Deque; import java.util.concurrent.ExecutorService; import java.util.concurrent.LinkedBlockingDeque; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import me.devilsen.czxing.util.BarCodeUtil; /** * desc : 任务分发器 * date : 2019-06-29 11:32 * * @author : dongSen */ public final class Dispatcher { private static final String TAG = Dispatcher.class.getSimpleName(); private static final int MAX_RUNNABLE = 10; private ExecutorService executorService; private final LinkedBlockingDeque<Runnable> blockingDeque; public Dispatcher() { blockingDeque = new LinkedBlockingDeque<>(); executorService = new ThreadPoolExecutor(1, 2, 10, TimeUnit.SECONDS, blockingDeque, ExecutorUtil.threadFactory("decode dispatcher", false)); } public ProcessRunnable newRunnable(FrameData frameData, Callback callback) { return new ProcessRunnable(this, frameData, callback); } public ProcessRunnable newRunnable(byte[] data, int left, int top, int width, int height, int rowWidth, int rowHeight, Callback callback) { return newRunnable(new FrameData(data, left, top, width, height, rowWidth, rowHeight), callback); } synchronized int enqueue(ProcessRunnable runnable) { if (blockingDeque.size() > MAX_RUNNABLE) { blockingDeque.remove(); } execute(runnable); BarCodeUtil.d("blockingDeque: " + blockingDeque.size()); return blockingDeque.size(); } private synchronized void execute(Runnable runnable) { executorService.execute(runnable); } public void finished(ProcessRunnable runnable) { finish(blockingDeque, runnable); } private void finish(Deque<Runnable> decodeDeque, ProcessRunnable runnable) { synchronized (this) { if (decodeDeque.size() > 0) { decodeDeque.remove(runnable); promoteCalls(); } } } private synchronized void promoteCalls() { if (blockingDeque.isEmpty()) { return; } Runnable first = blockingDeque.getFirst(); execute(first); } public synchronized void cancelAll() { for (Runnable runnable : blockingDeque) { ((ProcessRunnable) runnable).cancel(); } blockingDeque.clear(); } }
1,050
348
<gh_stars>100-1000 {"nom":"Vismes","circ":"3ème circonscription","dpt":"Somme","inscrits":390,"abs":208,"votants":182,"blancs":16,"nuls":6,"exp":160,"res":[{"nuance":"LR","nom":"<NAME>","voix":106},{"nuance":"REM","nom":"M. <NAME>","voix":54}]}
100
3,066
/* * Licensed to Crate.io GmbH ("Crate") under one or more contributor * license agreements. See the NOTICE file distributed with this work for * additional information regarding copyright ownership. Crate licenses * this file to you under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. You may * obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * However, if you have executed another commercial license agreement * with Crate these terms will supersede the license and you may use the * software solely pursuant to the terms of the relevant commercial agreement. */ package org.elasticsearch.node; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.hamcrest.Matchers; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; import java.nio.file.Path; import java.util.HashMap; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.is; public class InternalSettingsPreparerTest { @Rule public ExpectedException expectedException = ExpectedException.none(); @Test public void testThatCommandLineArgumentsOverrideSettingsFromConfigFile() throws Exception { HashMap<String, String> settings = new HashMap<>(); settings.put("path.home", "."); Path config = PathUtils.get(getClass().getResource("config").toURI()); settings.put("path.conf", config.toString()); settings.put("stats.enabled", "false"); settings.put("cluster.name", "clusterNameOverridden"); settings.put("path.logs", "/some/other/path"); Settings finalSettings = InternalSettingsPreparer .prepareEnvironment(Settings.EMPTY, settings, config, () -> "node1").settings(); // Overriding value from crate.yml assertThat(finalSettings.getAsBoolean("stats.enabled", null), is(false)); // Value kept from crate.yml assertThat(finalSettings.getAsBoolean("psql.enabled", null), is(false)); // Overriding value from crate.yml assertThat(finalSettings.get("cluster.name"), is("clusterNameOverridden")); // Value kept from crate.yml assertThat(finalSettings.get("path.logs"), Matchers.anyOf( is("/some/other/path"), is("D:\\some\\other\\path") )); } @Test public void testCustomConfigMustNotContainSettingsFromDefaultCrateYml() throws Exception { HashMap<String, String> settings = new HashMap<>(); Path home = PathUtils.get(getClass().getResource(".").toURI()); settings.put("path.home", home.toString()); Path config = PathUtils.get(getClass().getResource("config_custom").toURI()); settings.put("path.conf", config.toString()); Settings finalSettings = InternalSettingsPreparer .prepareEnvironment(Settings.EMPTY, settings, config, () -> "node1").settings(); // Values from crate.yml assertThat(finalSettings.get("cluster.name"), is("custom")); // path.logs is not set in config_custom/crate.yml // so it needs to use default value and not the value set in config/crate.yml assertThat(finalSettings.get("path.logs"), Matchers.anyOf( endsWith("org/elasticsearch/node/logs"), endsWith("org\\elasticsearch\\node\\logs") )); } @Test public void testClusterNameMissingFromConfigFile() throws Exception { HashMap<String, String> settings = new HashMap<>(); settings.put("path.home", "."); settings.put("cluster.name", "clusterName"); Path config = PathUtils.get(getClass().getResource("config").toURI()); Settings finalSettings = InternalSettingsPreparer .prepareEnvironment(Settings.EMPTY, settings, config, () -> "node1").settings(); assertThat(finalSettings.get("cluster.name"), is("clusterName")); } @Test public void testErrorWithDuplicateSettingInConfigFile() throws Exception { HashMap<String, String> settings = new HashMap<>(); settings.put("path.home", "."); Path config = PathUtils.get(getClass().getResource("config_invalid").toURI()); settings.put("path.conf", config.toString()); expectedException.expect(SettingsException.class); expectedException.expectMessage("Failed to load settings from"); expectedException.expectCause(Matchers.hasProperty("message", containsString("Duplicate field 'stats.enabled'"))); InternalSettingsPreparer.prepareEnvironment(Settings.EMPTY, settings, config, () -> "node1"); } }
1,757
751
/* $NetBSD: radix.c,v 1.47 2016/12/12 03:55:57 ozaki-r Exp $ */ /* * Copyright (c) 1988, 1989, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)radix.c 8.6 (Berkeley) 10/17/95 */ /* * Routines to build and maintain radix trees for routing lookups. */ #include <vnet/util/radix.h> typedef void (*rn_printer_t)(void *, const char *fmt, ...); static int max_keylen = 33; // me struct radix_mask *rn_mkfreelist; struct radix_node_head *mask_rnhead; static char *addmask_key; static const char normal_chars[] = {0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, -1}; static char *rn_zeros, *rn_ones; #define rn_masktop (mask_rnhead->rnh_treetop) static int rn_satisfies_leaf(const char *, struct radix_node *, int); static int rn_lexobetter(const void *, const void *); static struct radix_mask *rn_new_radix_mask(struct radix_node *, struct radix_mask *); static struct radix_node *rn_walknext(struct radix_node *, rn_printer_t, void *); static struct radix_node *rn_walkfirst(struct radix_node *, rn_printer_t, void *); static void rn_nodeprint(struct radix_node *, rn_printer_t, void *, const char *); #define SUBTREE_OPEN "[ " #define SUBTREE_CLOSE " ]" #ifdef RN_DEBUG static void rn_treeprint(struct radix_node_head *, rn_printer_t, void *); #endif /* RN_DEBUG */ #define MIN(x,y) (((x)<(y))?(x):(y)) static struct radix_mask* rm_alloc (void) { struct radix_mask *rm = clib_mem_alloc(sizeof(struct radix_mask)); clib_memset(rm, 0, sizeof(*rm)); return (rm); } static void rm_free (struct radix_mask *rm) { clib_mem_free(rm); } #define R_Malloc(p, t, n) \ { \ p = (t) clib_mem_alloc((unsigned int)(n)); \ clib_memset(p, 0, n); \ } #define Free(p) clib_mem_free((p)) #define log(a,b, c...) #define bool i32 /* * The data structure for the keys is a radix tree with one way * branching removed. The index rn_b at an internal node n represents a bit * position to be tested. The tree is arranged so that all descendants * of a node n have keys whose bits all agree up to position rn_b - 1. * (We say the index of n is rn_b.) * * There is at least one descendant which has a one bit at position rn_b, * and at least one with a zero there. * * A route is determined by a pair of key and mask. We require that the * bit-wise logical and of the key and mask to be the key. * We define the index of a route to associated with the mask to be * the first bit number in the mask where 0 occurs (with bit number 0 * representing the highest order bit). * * We say a mask is normal if every bit is 0, past the index of the mask. * If a node n has a descendant (k, m) with index(m) == index(n) == rn_b, * and m is a normal mask, then the route applies to every descendant of n. * If the index(m) < rn_b, this implies the trailing last few bits of k * before bit b are all 0, (and hence consequently true of every descendant * of n), so the route applies to all descendants of the node as well. * * Similar logic shows that a non-normal mask m such that * index(m) <= index(n) could potentially apply to many children of n. * Thus, for each non-host route, we attach its mask to a list at an internal * node as high in the tree as we can go. * * The present version of the code makes use of normal routes in short- * circuiting an explicit mask and compare operation when testing whether * a key satisfies a normal route, and also in remembering the unique leaf * that governs a subtree. */ struct radix_node * rn_search( const void *v_arg, struct radix_node *head) { const u8 * const v = v_arg; struct radix_node *x; for (x = head; x->rn_b >= 0;) { if (x->rn_bmask & v[x->rn_off]) x = x->rn_r; else x = x->rn_l; } return x; } struct radix_node * rn_search_m( const void *v_arg, struct radix_node *head, const void *m_arg) { struct radix_node *x; const u8 * const v = v_arg; const u8 * const m = m_arg; for (x = head; x->rn_b >= 0;) { if ((x->rn_bmask & m[x->rn_off]) && (x->rn_bmask & v[x->rn_off])) x = x->rn_r; else x = x->rn_l; } return x; } int rn_refines( const void *m_arg, const void *n_arg) { const char *m = m_arg; const char *n = n_arg; const char *lim = n + *(const u8 *)n; const char *lim2 = lim; int longer = (*(const u8 *)n++) - (int)(*(const u8 *)m++); int masks_are_equal = 1; if (longer > 0) lim -= longer; while (n < lim) { if (*n & ~(*m)) return 0; if (*n++ != *m++) masks_are_equal = 0; } while (n < lim2) if (*n++) return 0; if (masks_are_equal && (longer < 0)) for (lim2 = m - longer; m < lim2; ) if (*m++) return 1; return !masks_are_equal; } struct radix_node * rn_lookup( const void *v_arg, const void *m_arg, struct radix_node_head *head) { struct radix_node *x; const char *netmask = NULL; if (m_arg) { if ((x = rn_addmask(m_arg, 1, head->rnh_treetop->rn_off)) == 0) return NULL; netmask = x->rn_key; } x = rn_match(v_arg, head); if (x != NULL && netmask != NULL) { while (x != NULL && x->rn_mask != netmask) x = x->rn_dupedkey; } return x; } static int rn_satisfies_leaf( const char *trial, struct radix_node *leaf, int skip) { const char *cp = trial; const char *cp2 = leaf->rn_key; const char *cp3 = leaf->rn_mask; const char *cplim; int length = MIN(*(const u8 *)cp, *(const u8 *)cp2); if (cp3 == 0) cp3 = rn_ones; else length = MIN(length, *(const u8 *)cp3); cplim = cp + length; cp3 += skip; cp2 += skip; for (cp += skip; cp < cplim; cp++, cp2++, cp3++) if ((*cp ^ *cp2) & *cp3) return 0; return 1; } struct radix_node * rn_match( const void *v_arg, struct radix_node_head *head) { const char * const v = v_arg; struct radix_node *t = head->rnh_treetop; struct radix_node *top = t; struct radix_node *x; struct radix_node *saved_t; const char *cp = v; const char *cp2; const char *cplim; int off = t->rn_off; int vlen = *(const u8 *)cp; int matched_off; int test, b, rn_b; /* * Open code rn_search(v, top) to avoid overhead of extra * subroutine call. */ for (; t->rn_b >= 0; ) { if (t->rn_bmask & cp[t->rn_off]) t = t->rn_r; else t = t->rn_l; } /* * See if we match exactly as a host destination * or at least learn how many bits match, for normal mask finesse. * * It doesn't hurt us to limit how many bytes to check * to the length of the mask, since if it matches we had a genuine * match and the leaf we have is the most specific one anyway; * if it didn't match with a shorter length it would fail * with a long one. This wins big for class B&C netmasks which * are probably the most common case... */ if (t->rn_mask) vlen = *(const u8 *)t->rn_mask; cp += off; cp2 = t->rn_key + off; cplim = v + vlen; for (; cp < cplim; cp++, cp2++) if (*cp != *cp2) goto on1; /* * This extra grot is in case we are explicitly asked * to look up the default. Ugh! */ if ((t->rn_flags & RNF_ROOT) && t->rn_dupedkey) t = t->rn_dupedkey; return t; on1: test = (*cp ^ *cp2) & 0xff; /* find first bit that differs */ for (b = 7; (test >>= 1) > 0;) b--; matched_off = cp - v; b += matched_off << 3; rn_b = -1 - b; /* * If there is a host route in a duped-key chain, it will be first. */ if ((saved_t = t)->rn_mask == 0) t = t->rn_dupedkey; for (; t; t = t->rn_dupedkey) /* * Even if we don't match exactly as a host, * we may match if the leaf we wound up at is * a route to a net. */ if (t->rn_flags & RNF_NORMAL) { if (rn_b <= t->rn_b) return t; } else if (rn_satisfies_leaf(v, t, matched_off)) return t; t = saved_t; /* start searching up the tree */ do { struct radix_mask *m; t = t->rn_p; m = t->rn_mklist; if (m) { /* * If non-contiguous masks ever become important * we can restore the masking and open coding of * the search and satisfaction test and put the * calculation of "off" back before the "do". */ do { if (m->rm_flags & RNF_NORMAL) { if (rn_b <= m->rm_b) return m->rm_leaf; } else { off = MIN(t->rn_off, matched_off); x = rn_search_m(v, t, m->rm_mask); while (x && x->rn_mask != m->rm_mask) x = x->rn_dupedkey; if (x && rn_satisfies_leaf(v, x, off)) return x; } m = m->rm_mklist; } while (m); } } while (t != top); return NULL; } static void rn_nodeprint(struct radix_node *rn, rn_printer_t printer, void *arg, const char *delim) { (*printer)(arg, "%s(%s%p: p<%p> l<%p> r<%p>)", delim, ((void *)rn == arg) ? "*" : "", rn, rn->rn_p, rn->rn_l, rn->rn_r); } #ifdef RN_DEBUG int rn_debug = 1; static void rn_dbg_print(void *arg, const char *fmt, ...) { va_list ap; va_start(ap, fmt); vlog(LOG_DEBUG, fmt, ap); va_end(ap); } static void rn_treeprint(struct radix_node_head *h, rn_printer_t printer, void *arg) { struct radix_node *dup, *rn; const char *delim; if (printer == NULL) return; rn = rn_walkfirst(h->rnh_treetop, printer, arg); for (;;) { /* Process leaves */ delim = ""; for (dup = rn; dup != NULL; dup = dup->rn_dupedkey) { if ((dup->rn_flags & RNF_ROOT) != 0) continue; rn_nodeprint(dup, printer, arg, delim); delim = ", "; } rn = rn_walknext(rn, printer, arg); if (rn->rn_flags & RNF_ROOT) return; } /* NOTREACHED */ } #define traverse(__head, __rn) rn_treeprint((__head), rn_dbg_print, (__rn)) #endif /* RN_DEBUG */ struct radix_node * rn_newpair( const void *v, int b, struct radix_node nodes[2]) { struct radix_node *tt = nodes; struct radix_node *t = tt + 1; t->rn_b = b; t->rn_bmask = 0x80 >> (b & 7); t->rn_l = tt; t->rn_off = b >> 3; tt->rn_b = -1; tt->rn_key = v; tt->rn_p = t; tt->rn_flags = t->rn_flags = RNF_ACTIVE; return t; } struct radix_node * rn_insert( const void *v_arg, struct radix_node_head *head, int *dupentry, struct radix_node nodes[2]) { struct radix_node *top = head->rnh_treetop; struct radix_node *t = rn_search(v_arg, top); struct radix_node *tt; const char *v = v_arg; int head_off = top->rn_off; int vlen = *((const u8 *)v); const char *cp = v + head_off; int b; /* * Find first bit at which v and t->rn_key differ */ { const char *cp2 = t->rn_key + head_off; const char *cplim = v + vlen; int cmp_res; while (cp < cplim) if (*cp2++ != *cp++) goto on1; *dupentry = 1; return t; on1: *dupentry = 0; cmp_res = (cp[-1] ^ cp2[-1]) & 0xff; for (b = (cp - v) << 3; cmp_res; b--) cmp_res >>= 1; } { struct radix_node *p, *x = top; cp = v; do { p = x; if (cp[x->rn_off] & x->rn_bmask) x = x->rn_r; else x = x->rn_l; } while (b > (unsigned) x->rn_b); /* x->rn_b < b && x->rn_b >= 0 */ #ifdef RN_DEBUG if (rn_debug) log(LOG_DEBUG, "%s: Going In:\n", __func__), traverse(head, p); #endif t = rn_newpair(v_arg, b, nodes); tt = t->rn_l; if ((cp[p->rn_off] & p->rn_bmask) == 0) p->rn_l = t; else p->rn_r = t; x->rn_p = t; t->rn_p = p; /* frees x, p as temp vars below */ if ((cp[t->rn_off] & t->rn_bmask) == 0) { t->rn_r = x; } else { t->rn_r = tt; t->rn_l = x; } #ifdef RN_DEBUG if (rn_debug) { log(LOG_DEBUG, "%s: Coming Out:\n", __func__), traverse(head, p); } #endif /* RN_DEBUG */ } return tt; } struct radix_node * rn_addmask( const void *n_arg, int search, int skip) { const char *netmask = n_arg; const char *cp; const char *cplim; struct radix_node *x; struct radix_node *saved_x; int b = 0, mlen, j; int maskduplicated, m0, isnormal; static int last_zeroed = 0; if ((mlen = *(const u8 *)netmask) > max_keylen) mlen = max_keylen; if (skip == 0) skip = 1; if (mlen <= skip) return mask_rnhead->rnh_nodes; if (skip > 1) memmove(addmask_key + 1, rn_ones + 1, skip - 1); if ((m0 = mlen) > skip) memmove(addmask_key + skip, netmask + skip, mlen - skip); /* * Trim trailing zeroes. */ for (cp = addmask_key + mlen; (cp > addmask_key) && cp[-1] == 0;) cp--; mlen = cp - addmask_key; if (mlen <= skip) { if (m0 >= last_zeroed) last_zeroed = mlen; return mask_rnhead->rnh_nodes; } if (m0 < last_zeroed) clib_memset(addmask_key + m0, 0, last_zeroed - m0); *addmask_key = last_zeroed = mlen; x = rn_search(addmask_key, rn_masktop); if (memcmp(addmask_key, x->rn_key, mlen) != 0) x = 0; if (x || search) return x; R_Malloc(x, struct radix_node *, max_keylen + 2 * sizeof (*x)); if ((saved_x = x) == NULL) return NULL; clib_memset(x, 0, max_keylen + 2 * sizeof (*x)); cp = netmask = (void *)(x + 2); memmove(x + 2, addmask_key, mlen); x = rn_insert(cp, mask_rnhead, &maskduplicated, x); if (maskduplicated) { log(LOG_ERR, "rn_addmask: mask impossibly already in tree\n"); Free(saved_x); return x; } /* * Calculate index of mask, and check for normalcy. */ cplim = netmask + mlen; isnormal = 1; for (cp = netmask + skip; (cp < cplim) && *(const u8 *)cp == 0xff;) cp++; if (cp != cplim) { for (j = 0x80; (j & *cp) != 0; j >>= 1) b++; if (*cp != normal_chars[b] || cp != (cplim - 1)) isnormal = 0; } b += (cp - netmask) << 3; x->rn_b = -1 - b; if (isnormal) x->rn_flags |= RNF_NORMAL; return x; } static int /* XXX: arbitrary ordering for non-contiguous masks */ rn_lexobetter( const void *m_arg, const void *n_arg) { const u8 *mp = m_arg; const u8 *np = n_arg; const u8 *lim; if (*mp > *np) return 1; /* not really, but need to check longer one first */ if (*mp == *np) for (lim = mp + *mp; mp < lim;) if (*mp++ > *np++) return 1; return 0; } static struct radix_mask * rn_new_radix_mask( struct radix_node *tt, struct radix_mask *next) { struct radix_mask *m; m = rm_alloc(); if (m == NULL) { log(LOG_ERR, "Mask for route not entered\n"); return NULL; } clib_memset(m, 0, sizeof(*m)); m->rm_b = tt->rn_b; m->rm_flags = tt->rn_flags; if (tt->rn_flags & RNF_NORMAL) m->rm_leaf = tt; else m->rm_mask = tt->rn_mask; m->rm_mklist = next; tt->rn_mklist = m; return m; } struct radix_node * rn_addroute( const void *v_arg, const void *n_arg, struct radix_node_head *head, struct radix_node treenodes[2]) { const char *v = v_arg, *netmask = n_arg; struct radix_node *t, *x = NULL, *tt; struct radix_node *saved_tt, *top = head->rnh_treetop; short b = 0, b_leaf = 0; int keyduplicated; const char *mmask; struct radix_mask *m, **mp; /* * In dealing with non-contiguous masks, there may be * many different routes which have the same mask. * We will find it useful to have a unique pointer to * the mask to speed avoiding duplicate references at * nodes and possibly save time in calculating indices. */ if (netmask != NULL) { if ((x = rn_addmask(netmask, 0, top->rn_off)) == NULL) return NULL; b_leaf = x->rn_b; b = -1 - x->rn_b; netmask = x->rn_key; } /* * Deal with duplicated keys: attach node to previous instance */ saved_tt = tt = rn_insert(v, head, &keyduplicated, treenodes); if (keyduplicated) { for (t = tt; tt != NULL; t = tt, tt = tt->rn_dupedkey) { if (tt->rn_mask == netmask) return NULL; if (netmask == NULL || (tt->rn_mask != NULL && (b_leaf < tt->rn_b || /* index(netmask) > node */ rn_refines(netmask, tt->rn_mask) || rn_lexobetter(netmask, tt->rn_mask)))) break; } /* * If the mask is not duplicated, we wouldn't * find it among possible duplicate key entries * anyway, so the above test doesn't hurt. * * We sort the masks for a duplicated key the same way as * in a masklist -- most specific to least specific. * This may require the unfortunate nuisance of relocating * the head of the list. * * We also reverse, or doubly link the list through the * parent pointer. */ if (tt == saved_tt) { struct radix_node *xx = x; /* link in at head of list */ (tt = treenodes)->rn_dupedkey = t; tt->rn_flags = t->rn_flags; tt->rn_p = x = t->rn_p; t->rn_p = tt; if (x->rn_l == t) x->rn_l = tt; else x->rn_r = tt; saved_tt = tt; x = xx; } else { (tt = treenodes)->rn_dupedkey = t->rn_dupedkey; t->rn_dupedkey = tt; tt->rn_p = t; if (tt->rn_dupedkey) tt->rn_dupedkey->rn_p = tt; } tt->rn_key = v; tt->rn_b = -1; tt->rn_flags = RNF_ACTIVE; } /* * Put mask in tree. */ if (netmask != NULL) { tt->rn_mask = netmask; tt->rn_b = x->rn_b; tt->rn_flags |= x->rn_flags & RNF_NORMAL; } t = saved_tt->rn_p; if (keyduplicated) goto on2; b_leaf = -1 - t->rn_b; if (t->rn_r == saved_tt) x = t->rn_l; else x = t->rn_r; /* Promote general routes from below */ if (x->rn_b < 0) { for (mp = &t->rn_mklist; x != NULL; x = x->rn_dupedkey) { if (x->rn_mask != NULL && x->rn_b >= b_leaf && x->rn_mklist == NULL) { *mp = m = rn_new_radix_mask(x, NULL); if (m != NULL) mp = &m->rm_mklist; } } } else if (x->rn_mklist != NULL) { /* * Skip over masks whose index is > that of new node */ for (mp = &x->rn_mklist; (m = *mp) != NULL; mp = &m->rm_mklist) if (m->rm_b >= b_leaf) break; t->rn_mklist = m; *mp = NULL; } on2: /* Add new route to highest possible ancestor's list */ if (netmask == NULL || b > t->rn_b) return tt; /* can't lift at all */ b_leaf = tt->rn_b; do { x = t; t = t->rn_p; } while (b <= t->rn_b && x != top); /* * Search through routes associated with node to * insert new route according to index. * Need same criteria as when sorting dupedkeys to avoid * double loop on deletion. */ for (mp = &x->rn_mklist; (m = *mp) != NULL; mp = &m->rm_mklist) { if (m->rm_b < b_leaf) continue; if (m->rm_b > b_leaf) break; if (m->rm_flags & RNF_NORMAL) { mmask = m->rm_leaf->rn_mask; if (tt->rn_flags & RNF_NORMAL) { log(LOG_ERR, "Non-unique normal route," " mask not entered\n"); return tt; } } else mmask = m->rm_mask; if (mmask == netmask) { m->rm_refs++; tt->rn_mklist = m; return tt; } if (rn_refines(netmask, mmask) || rn_lexobetter(netmask, mmask)) break; } *mp = rn_new_radix_mask(tt, *mp); return tt; } struct radix_node * rn_delete1( const void *v_arg, const void *netmask_arg, struct radix_node_head *head, struct radix_node *rn) { struct radix_node *t, *p, *x, *tt; struct radix_mask *m, *saved_m, **mp; struct radix_node *dupedkey, *saved_tt, *top; const char *v, *netmask; int b, head_off, vlen; v = v_arg; netmask = netmask_arg; x = head->rnh_treetop; tt = rn_search(v, x); head_off = x->rn_off; vlen = *(const u8 *)v; saved_tt = tt; top = x; if (tt == NULL || memcmp(v + head_off, tt->rn_key + head_off, vlen - head_off) != 0) return NULL; /* * Delete our route from mask lists. */ if (netmask != NULL) { if ((x = rn_addmask(netmask, 1, head_off)) == NULL) return NULL; netmask = x->rn_key; while (tt->rn_mask != netmask) if ((tt = tt->rn_dupedkey) == NULL) return NULL; } if (tt->rn_mask == NULL || (saved_m = m = tt->rn_mklist) == NULL) goto on1; if (tt->rn_flags & RNF_NORMAL) { if (m->rm_leaf != tt || m->rm_refs > 0) { log(LOG_ERR, "rn_delete: inconsistent annotation\n"); return NULL; /* dangling ref could cause disaster */ } } else { if (m->rm_mask != tt->rn_mask) { log(LOG_ERR, "rn_delete: inconsistent annotation\n"); goto on1; } if (--m->rm_refs >= 0) goto on1; } b = -1 - tt->rn_b; t = saved_tt->rn_p; if (b > t->rn_b) goto on1; /* Wasn't lifted at all */ do { x = t; t = t->rn_p; } while (b <= t->rn_b && x != top); for (mp = &x->rn_mklist; (m = *mp) != NULL; mp = &m->rm_mklist) { if (m == saved_m) { *mp = m->rm_mklist; rm_free(m); break; } } if (m == NULL) { log(LOG_ERR, "rn_delete: couldn't find our annotation\n"); if (tt->rn_flags & RNF_NORMAL) return NULL; /* Dangling ref to us */ } on1: /* * Eliminate us from tree */ if (tt->rn_flags & RNF_ROOT) return NULL; #ifdef RN_DEBUG if (rn_debug) log(LOG_DEBUG, "%s: Going In:\n", __func__), traverse(head, tt); #endif t = tt->rn_p; dupedkey = saved_tt->rn_dupedkey; if (dupedkey != NULL) { /* * Here, tt is the deletion target, and * saved_tt is the head of the dupedkey chain. */ if (tt == saved_tt) { x = dupedkey; x->rn_p = t; if (t->rn_l == tt) t->rn_l = x; else t->rn_r = x; } else { /* find node in front of tt on the chain */ for (x = p = saved_tt; p != NULL && p->rn_dupedkey != tt;) p = p->rn_dupedkey; if (p != NULL) { p->rn_dupedkey = tt->rn_dupedkey; if (tt->rn_dupedkey != NULL) tt->rn_dupedkey->rn_p = p; } else log(LOG_ERR, "rn_delete: couldn't find us\n"); } t = tt + 1; if (t->rn_flags & RNF_ACTIVE) { *++x = *t; p = t->rn_p; if (p->rn_l == t) p->rn_l = x; else p->rn_r = x; x->rn_l->rn_p = x; x->rn_r->rn_p = x; } goto out; } if (t->rn_l == tt) x = t->rn_r; else x = t->rn_l; p = t->rn_p; if (p->rn_r == t) p->rn_r = x; else p->rn_l = x; x->rn_p = p; /* * Demote routes attached to us. */ if (t->rn_mklist == NULL) ; else if (x->rn_b >= 0) { for (mp = &x->rn_mklist; (m = *mp) != NULL; mp = &m->rm_mklist) ; *mp = t->rn_mklist; } else { /* If there are any key,mask pairs in a sibling duped-key chain, some subset will appear sorted in the same order attached to our mklist */ for (m = t->rn_mklist; m != NULL && x != NULL; x = x->rn_dupedkey) { if (m == x->rn_mklist) { struct radix_mask *mm = m->rm_mklist; x->rn_mklist = NULL; if (--(m->rm_refs) < 0) rm_free(m); m = mm; } } if (m != NULL) { log(LOG_ERR, "rn_delete: Orphaned Mask %p at %p\n", m, x); } } /* * We may be holding an active internal node in the tree. */ x = tt + 1; if (t != x) { *t = *x; t->rn_l->rn_p = t; t->rn_r->rn_p = t; p = x->rn_p; if (p->rn_l == x) p->rn_l = t; else p->rn_r = t; } out: #ifdef RN_DEBUG if (rn_debug) { log(LOG_DEBUG, "%s: Coming Out:\n", __func__), traverse(head, tt); } #endif /* RN_DEBUG */ tt->rn_flags &= ~RNF_ACTIVE; tt[1].rn_flags &= ~RNF_ACTIVE; return tt; } struct radix_node * rn_delete( const void *v_arg, const void *netmask_arg, struct radix_node_head *head) { return rn_delete1(v_arg, netmask_arg, head, NULL); } static struct radix_node * rn_walknext(struct radix_node *rn, rn_printer_t printer, void *arg) { /* If at right child go back up, otherwise, go right */ while (rn->rn_p->rn_r == rn && (rn->rn_flags & RNF_ROOT) == 0) { if (printer != NULL) (*printer)(arg, SUBTREE_CLOSE); rn = rn->rn_p; } if (printer) rn_nodeprint(rn->rn_p, printer, arg, ""); /* Find the next *leaf* since next node might vanish, too */ for (rn = rn->rn_p->rn_r; rn->rn_b >= 0;) { if (printer != NULL) (*printer)(arg, SUBTREE_OPEN); rn = rn->rn_l; } return rn; } static struct radix_node * rn_walkfirst(struct radix_node *rn, rn_printer_t printer, void *arg) { /* First time through node, go left */ while (rn->rn_b >= 0) { if (printer != NULL) (*printer)(arg, SUBTREE_OPEN); rn = rn->rn_l; } return rn; } int rn_walktree( struct radix_node_head *h, int (*f)(struct radix_node *, void *), void *w) { int error; struct radix_node *base, *next, *rn; /* * This gets complicated because we may delete the node * while applying the function f to it, so we need to calculate * the successor node in advance. */ rn = rn_walkfirst(h->rnh_treetop, NULL, NULL); for (;;) { base = rn; next = rn_walknext(rn, NULL, NULL); /* Process leaves */ while ((rn = base) != NULL) { base = rn->rn_dupedkey; if (!(rn->rn_flags & RNF_ROOT) && (error = (*f)(rn, w))) return error; } rn = next; if (rn->rn_flags & RNF_ROOT) return 0; } /* NOTREACHED */ } struct radix_node * rn_search_matched(struct radix_node_head *h, int (*matcher)(struct radix_node *, void *), void *w) { bool matched; struct radix_node *base, *next, *rn; /* * This gets complicated because we may delete the node * while applying the function f to it, so we need to calculate * the successor node in advance. */ rn = rn_walkfirst(h->rnh_treetop, NULL, NULL); for (;;) { base = rn; next = rn_walknext(rn, NULL, NULL); /* Process leaves */ while ((rn = base) != NULL) { base = rn->rn_dupedkey; if (!(rn->rn_flags & RNF_ROOT)) { matched = (*matcher)(rn, w); if (matched) return rn; } } rn = next; if (rn->rn_flags & RNF_ROOT) return NULL; } /* NOTREACHED */ } int rn_inithead(void **head, int off) { struct radix_node_head *rnh; if (*head != NULL) return 1; R_Malloc(rnh, struct radix_node_head *, sizeof (*rnh)); if (rnh == NULL) return 0; *head = rnh; return rn_inithead0(rnh, off); } int rn_inithead0(struct radix_node_head *rnh, int off) { struct radix_node *t; struct radix_node *tt; struct radix_node *ttt; clib_memset(rnh, 0, sizeof(*rnh)); t = rn_newpair(rn_zeros, off, rnh->rnh_nodes); ttt = rnh->rnh_nodes + 2; t->rn_r = ttt; t->rn_p = t; tt = t->rn_l; tt->rn_flags = t->rn_flags = RNF_ROOT | RNF_ACTIVE; tt->rn_b = -1 - off; *ttt = *tt; ttt->rn_key = rn_ones; rnh->rnh_addaddr = rn_addroute; rnh->rnh_deladdr = rn_delete; rnh->rnh_matchaddr = rn_match; rnh->rnh_lookup = rn_lookup; rnh->rnh_treetop = t; return 1; } static clib_error_t * rn_module_init (vlib_main_t * vm) { char *cp, *cplim; R_Malloc(rn_zeros, char *, 3 * max_keylen); if (rn_zeros == NULL) return (clib_error_return (0, "RN Zeros...")); clib_memset(rn_zeros, 0, 3 * max_keylen); rn_ones = cp = rn_zeros + max_keylen; addmask_key = cplim = rn_ones + max_keylen; while (cp < cplim) *cp++ = -1; if (rn_inithead((void *)&mask_rnhead, 0) == 0) return (clib_error_return (0, "RN Init 2")); return (NULL); } VLIB_INIT_FUNCTION(rn_module_init);
12,394
428
<gh_stars>100-1000 #ifndef __STRING_UTILS_H__ #define __STRING_UTILS_H__ #include <stddef.h> #include <string.h> char *xstrdup(const char *string); char *xstrdupn(const char *str, size_t n); char *trim(char **str); char *randomstr(char *buf, int len); #endif
113
329
package kr.dogfoot.hwplib.object.docinfo; /** * 아이디 매핑 헤더를 나타내는 레코드. "DocInfo" stream 안에 있는 다른 객체들의 개수를 저전한다. * * @author neolord */ public class IDMappings { /** * 바이너리 데이터의 개수 */ private int binDataCount; /** * 한글 글꼴의 개수 */ private int hangulFaceNameCount; /** * 영어 글꼴의 개수 */ private int englishFaceNameCount; /** * 한자 글꼴의 개수 */ private int hanjaFaceNameCount; /** * 일본어 글꼴의 개수 */ private int japaneseFaceNameCount; /** * 기타 글꼴의 개수 */ private int etcFaceNameCount; /** * 기호 글꼴의 개수 */ private int symbolFaceNameCount; /** * 사용자 글꼴의 개수 */ private int userFaceNameCount; /** * 테두리/배경의 개수 */ private int borderFillCount; /** * 글자 모양의 개수 */ private int charShapeCount; /** * 탭 정의의 개수 */ private int tabDefCount; /** * 문단 번호의 개수 */ private int numberingCount; /** * 글머리표의 개수 */ private int bulletCount; /** * 문단 모양의 개수 */ private int paraShapeCount; /** * 스타일의 개수 */ private int styleCount; /** * 메모 모양의 개수(5.0.2.1 이상) */ private int memoShapeCount; /** * 변경 추적의 개수(5.0.3.2 이상) */ private int trackChangeCount; /** * 변경추적 사용자의 개수 (5.0.3.2 이상) */ private int trackChangeAuthorCount; /** * 생성자 */ public IDMappings() { } /** * 바이너리 데이터 객체의 개수를 반환한다. * * @return 바이너리 데이터 객체의 개수 */ public int getBinDataCount() { return binDataCount; } /** * 바이너리 데이터 객체의 개수를 설정한다. * * @param binDataCount 바이너리 데이터 객체의 개수 */ public void setBinDataCount(int binDataCount) { this.binDataCount = binDataCount; } /** * 한글 글꼴 객체의 개수를 반환한다. * * @return 한글 글꼴 객체의 개수 */ public int getHangulFaceNameCount() { return hangulFaceNameCount; } /** * 한글 글꼴 객체의 개수를 설정한다. * * @param hangulFaceNameCount 한글 글꼴 객체의 개수 */ public void setHangulFaceNameCount(int hangulFaceNameCount) { this.hangulFaceNameCount = hangulFaceNameCount; } /** * 영어 글꼴 객체의 개수를 반환한다. * * @return 영어 글꼴 객체의 개수 */ public int getEnglishFaceNameCount() { return englishFaceNameCount; } /** * 영어 글꼴 객체의 개수를 설정한다. * * @param englishFaceNameCount 영어 글꼴 객체의 개수 */ public void setEnglishFaceNameCount(int englishFaceNameCount) { this.englishFaceNameCount = englishFaceNameCount; } /** * 한자 글꼴 객체의 개수를 반환한다. * * @return 한자 글꼴 객체의 개수 */ public int getHanjaFaceNameCount() { return hanjaFaceNameCount; } /** * 한자 글꼴 객체의 개수를 설정한다. * * @param hanjaFaceNameCount 한자 글꼴 객체의 개수 */ public void setHanjaFaceNameCount(int hanjaFaceNameCount) { this.hanjaFaceNameCount = hanjaFaceNameCount; } /** * 일본어 글꼴 객체의 개수를 반환한다. * * @return 일본어 글꼴 객체의 개수 */ public int getJapaneseFaceNameCount() { return japaneseFaceNameCount; } /** * 일본어 글꼴 객체의 개수를 설정한다. * * @param japaneseFaceNameCount 일본어 글꼴 객체의 개수 */ public void setJapaneseFaceNameCount(int japaneseFaceNameCount) { this.japaneseFaceNameCount = japaneseFaceNameCount; } /** * 기타 글꼴 객체의 개수를 반환한다. * * @return 기타 글꼴 객체의 개수 */ public int getEtcFaceNameCount() { return etcFaceNameCount; } /** * 기타 글꼴 객체의 개수를 설정한다. * * @param etcFaceNameCount 기타 글꼴 객체의 개수 */ public void setEtcFaceNameCount(int etcFaceNameCount) { this.etcFaceNameCount = etcFaceNameCount; } /** * 기호 글꼴 객체의 개수를 반환한다. * * @return 기호 글꼴 객체의 개수 */ public int getSymbolFaceNameCount() { return symbolFaceNameCount; } /** * 기호 글꼴 객체의 개수를 설정한다. * * @param symbolFaceNameCount 기호 글꼴 객체의 개수 */ public void setSymbolFaceNameCount(int symbolFaceNameCount) { this.symbolFaceNameCount = symbolFaceNameCount; } /** * 사용자 글꼴 객체의 개수를 반환한다. * * @return 사용자 글꼴 객체의 개수 */ public int getUserFaceNameCount() { return userFaceNameCount; } /** * 사용자 글꼴 객체의 개수를 설정한다. * * @param userFaceNameCount 사용자 글꼴 객체의 개수 */ public void setUserFaceNameCount(int userFaceNameCount) { this.userFaceNameCount = userFaceNameCount; } /** * 배경/테두리 객체의 개수를 반환한다. * * @return 배경/테두리 객체의 개수 */ public int getBorderFillCount() { return borderFillCount; } /** * 배경/테두리 객체의 개수를 설정한다. * * @param borderFillCount 배경/테두리 객체의 개수 */ public void setBorderFillCount(int borderFillCount) { this.borderFillCount = borderFillCount; } /** * 글자 모양 객체의 개수를 반환한다. * * @return 글자 모양 객체의 개수 */ public int getCharShapeCount() { return charShapeCount; } /** * 글자 모양 객체의 개수를 설정한다. * * @param charShapeCount 글자 모양 객체의 개수 */ public void setCharShapeCount(int charShapeCount) { this.charShapeCount = charShapeCount; } /** * 탭 정의 객체의 개수를 반환한다. * * @return 탭 정의 객체의 개수 */ public int getTabDefCount() { return tabDefCount; } /** * 탭 정의 객체의 개수를 설정한다. * * @param tabDefCount 탭 정의 객체의 개수 */ public void setTabDefCount(int tabDefCount) { this.tabDefCount = tabDefCount; } /** * 문단 번호 객체의 개수를 반환한다. * * @return 문단 번호 객체의 개수 */ public int getNumberingCount() { return numberingCount; } /** * 문단 번호 객체의 개수를 설정한다. * * @param numberingCount 문단 번호 객체의 개수 */ public void setNumberingCount(int numberingCount) { this.numberingCount = numberingCount; } /** * 글머리표 객체의 개수를 반환한다. * * @return 글머리표 객체의 개수 */ public int getBulletCount() { return bulletCount; } /** * 글머리표 객체의 개수를 설정한다. * * @param bulletCount 글머리표 객체의 개수 */ public void setBulletCount(int bulletCount) { this.bulletCount = bulletCount; } /** * 믄단 모양 객체의 개수를 반환한다. * * @return 믄단 모양 객체의 개수 */ public int getParaShapeCount() { return paraShapeCount; } /** * 믄단 모양 객체의 개수를 설정한다. * * @param paraShapeCount 믄단 모양 객체의 개수 */ public void setParaShapeCount(int paraShapeCount) { this.paraShapeCount = paraShapeCount; } /** * 스타일 객체의 개수를 반환한다. * * @return 스타일 객체의 개수 */ public int getStyleCount() { return styleCount; } /** * 스타일 객체의 개수를 설정한다. * * @param styleCount 스타일 객체의 개수 */ public void setStyleCount(int styleCount) { this.styleCount = styleCount; } /** * 메모 모양 객체의 개수를 반환한다. (5.0.2.1 이상) * * @return 메모 모양 객체의 개수 */ public int getMemoShapeCount() { return memoShapeCount; } /** * 메모 모양 객체의 개수를 설정한다. (5.0.2.1 이상) * * @param memoShapeCount 메모 모양 객체의 개수 */ public void setMemoShapeCount(int memoShapeCount) { this.memoShapeCount = memoShapeCount; } /** * 변경 추적 객체의 개수를 반환한다. (5.0.3.2 이상) * * @return 변경 추적 객체의 개수 */ public int getTrackChangeCount() { return trackChangeCount; } /** * 변경 추적 객체의 개수를 설정한다. (5.0.3.2 이상) * * @param trackChangeCount 변경 추적 객체의 개수 */ public void setTrackChangeCount(int trackChangeCount) { this.trackChangeCount = trackChangeCount; } /** * 변경추적 사용자 객체의 개수를 반환한다. (5.0.3.2 이상) * * @return 변경추적 사용자 객체의 개수 */ public int getTrackChangeAuthorCount() { return trackChangeAuthorCount; } /** * 변경추적 사용자 객체의 개수를 설정한다. (5.0.3.2 이상) * * @param trackChangeAuthorCount 변경추적 사용자 객체의 개수 */ public void setTrackChangeAuthorCount(int trackChangeAuthorCount) { this.trackChangeAuthorCount = trackChangeAuthorCount; } public void copy(IDMappings from) { binDataCount = from.binDataCount; hangulFaceNameCount = from.hangulFaceNameCount; englishFaceNameCount = from.englishFaceNameCount; hanjaFaceNameCount = from.hanjaFaceNameCount; japaneseFaceNameCount = from.japaneseFaceNameCount; etcFaceNameCount = from.etcFaceNameCount; symbolFaceNameCount = from.symbolFaceNameCount; userFaceNameCount = from.userFaceNameCount; borderFillCount = from.borderFillCount; charShapeCount = from.charShapeCount; tabDefCount = from.tabDefCount; numberingCount = from.numberingCount; bulletCount = from.bulletCount; paraShapeCount = from.paraShapeCount; styleCount = from.styleCount; memoShapeCount = from.memoShapeCount; trackChangeCount = from.trackChangeCount; trackChangeAuthorCount = from.trackChangeAuthorCount; } }
6,726
5,038
/* * Copyright 2019 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.restassured.module.spring.commons.config; import io.restassured.config.ParamConfig; import io.restassured.config.RestAssuredConfig; import java.lang.reflect.Field; public class ConfigConverter { public static RestAssuredConfig convertToRestAssuredConfig(SpecificationConfig specificationConfig) { return new RestAssuredConfig().jsonConfig(specificationConfig.getJsonConfig()).xmlConfig(specificationConfig.getXmlConfig()).sessionConfig(specificationConfig.getSessionConfig()). objectMapperConfig(specificationConfig.getObjectMapperConfig()).logConfig(specificationConfig.getLogConfig()).encoderConfig(specificationConfig.getEncoderConfig()). decoderConfig(specificationConfig.getDecoderConfig()).multiPartConfig(specificationConfig.getMultiPartConfig()).paramConfig(toParamConfig(specificationConfig.getParamConfig())). matcherConfig(specificationConfig.getMatcherConfig()); } private static ParamConfig toParamConfig(ParamConfig baseConfig) { ParamConfig config = new ParamConfig(baseConfig.queryParamsUpdateStrategy(), baseConfig.formParamsUpdateStrategy(), baseConfig.requestParamsUpdateStrategy()); // We need to set the user configured flag to false if needed if (!baseConfig.isUserConfigured()) { Field userConfigured = null; try { userConfigured = config.getClass().getDeclaredField("userConfigured"); userConfigured.setAccessible(true); userConfigured.set(config, false); } catch (Exception e) { throw new RuntimeException("Internal error in REST Assured, please report an issue!", e); } finally { if (userConfigured != null) { userConfigured.setAccessible(false); } } } return config; } }
875
680
package org.ff4j.store; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; /* * #%L * ff4j-store-redis * %% * Copyright (C) 2013 - 2014 Ff4J * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ import org.ff4j.core.Feature; import org.ff4j.core.FeatureStore; import org.ff4j.exception.FeatureAlreadyExistException; import org.ff4j.exception.FeatureNotFoundException; import org.ff4j.exception.GroupNotFoundException; import org.ff4j.redis.RedisConnection; import org.ff4j.redis.RedisKeysBuilder; import org.ff4j.utils.Util; import org.ff4j.utils.json.FeatureJsonParser; import redis.clients.jedis.Jedis; /** * {@link FeatureStore} to persist data into * * @author <a href="mailto:<EMAIL>"><NAME></a> * @author <NAME> */ public class FeatureStoreRedis extends AbstractFeatureStore { /** Wrapping of redis connection (isolation). */ private RedisConnection redisConnection; /** Default key builder. */ private RedisKeysBuilder keyBuilder = new RedisKeysBuilder(); /** * Constructors */ public FeatureStoreRedis() { this(new RedisConnection(), new RedisKeysBuilder()); } public FeatureStoreRedis(RedisKeysBuilder builder) { this(new RedisConnection(), builder); } public FeatureStoreRedis(RedisConnection pRedisConnection) { this(pRedisConnection, new RedisKeysBuilder()); } public FeatureStoreRedis(RedisConnection pRedisConnection, RedisKeysBuilder builder) { this.redisConnection = pRedisConnection; this.keyBuilder = builder; } /** {@inheritDoc} */ public boolean exist(String uid) { Util.assertParamHasLength(uid, "Feature identifier"); Jedis jedis = null; try { jedis = getJedis(); return jedis.exists(keyBuilder.getKeyFeature(uid)); } finally { if (jedis != null) { jedis.close(); } } } /** {@inheritDoc} */ @Override public Feature read(String uid) { if (!exist(uid)) { throw new FeatureNotFoundException(uid); } Jedis jedis = null; try { jedis = getJedis(); return FeatureJsonParser.parseFeature(jedis.get(keyBuilder.getKeyFeature(uid))); } finally { if (jedis != null) { jedis.close(); } } } /** {@inheritDoc} */ @Override public void update(Feature fp) { Util.assertNotNull("Feature" , fp); if (!exist(fp.getUid())) { throw new FeatureNotFoundException(fp.getUid()); } Jedis jedis = null; try { jedis = getJedis(); jedis.set(keyBuilder.getKeyFeature(fp.getUid()), fp.toJson()); jedis.persist(keyBuilder.getKeyFeature(fp.getUid())); } finally { if (jedis != null) { jedis.close(); } } } /** {@inheritDoc} */ @Override public void enable(String uid) { // Read from redis, feature not found if no present Feature f = read(uid); // Update within Object f.enable(); // Serialization and update key, update TTL update(f); } /** {@inheritDoc} */ @Override public void disable(String uid) { // Read from redis, feature not found if no present Feature f = read(uid); // Update within Object f.disable(); // Serialization and update key, update TTL update(f); } /** {@inheritDoc} */ @Override public void create(Feature fp) { Util.assertNotNull("Feature", fp); if (exist(fp.getUid())) { throw new FeatureAlreadyExistException(fp.getUid()); } Jedis jedis = null; try { String id = fp.getUid(); jedis = getJedis(); // Store the feature in the mapping bucket. jedis.sadd(keyBuilder.getKeyFeatureMap(), id); jedis.set(keyBuilder.getKeyFeature(id), fp.toJson()); jedis.persist(keyBuilder.getKeyFeature(id) + id); } finally { if (jedis != null) { jedis.close(); } } } /** {@inheritDoc} */ @Override public Map<String, Feature> readAll() { Jedis jedis = null; try { jedis = getJedis(); Set<String> features = jedis.smembers(keyBuilder.getKeyFeatureMap()); Map<String, Feature> featuresMap = new HashMap<>(); if (features != null) { for (String key : features) { featuresMap.put(key, read(key)); } } return featuresMap; } finally { if (jedis != null) { jedis.close(); } } } /** {@inheritDoc} */ public void delete(String fpId) { if (!exist(fpId)) { throw new FeatureNotFoundException(fpId); } Jedis jedis = null; try { jedis = getJedis(); // Store the feature in the mapping bucket. jedis.srem(keyBuilder.getKeyFeatureMap(), fpId); jedis.del(keyBuilder.getKeyFeature(fpId)); } finally { if (jedis != null) { jedis.close(); } } } /** {@inheritDoc} */ @Override public void grantRoleOnFeature(String flipId, String roleName) { Util.assertParamHasLength(roleName, "roleName (#2)"); // retrieve Feature f = read(flipId); // modify f.getPermissions().add(roleName); // persist modification update(f); } /** {@inheritDoc} */ @Override public void removeRoleFromFeature(String flipId, String roleName) { Util.assertParamHasLength(roleName, "roleName (#2)"); // retrieve Feature f = read(flipId); f.getPermissions().remove(roleName); // persist modification update(f); } /** {@inheritDoc} */ @Override public Map<String, Feature> readGroup(String groupName) { Util.assertParamHasLength(groupName, "groupName"); Map < String, Feature > features = readAll(); Map < String, Feature > group = new HashMap<String, Feature>(); for (Map.Entry<String,Feature> uid : features.entrySet()) { if (groupName.equals(uid.getValue().getGroup())) { group.put(uid.getKey(), uid.getValue()); } } if (group.isEmpty()) { throw new GroupNotFoundException(groupName); } return group; } /** {@inheritDoc} */ @Override public boolean existGroup(String groupName) { Util.assertParamHasLength(groupName, "groupName"); Map < String, Feature > features = readAll(); Map < String, Feature > group = new HashMap<String, Feature>(); for (Map.Entry<String,Feature> uid : features.entrySet()) { if (groupName.equals(uid.getValue().getGroup())) { group.put(uid.getKey(), uid.getValue()); } } return !group.isEmpty(); } /** {@inheritDoc} */ @Override public void enableGroup(String groupName) { Map < String, Feature > features = readGroup(groupName); for (Map.Entry<String,Feature> uid : features.entrySet()) { uid.getValue().enable(); update(uid.getValue()); } } /** {@inheritDoc} */ @Override public void disableGroup(String groupName) { Map < String, Feature > features = readGroup(groupName); for (Map.Entry<String,Feature> uid : features.entrySet()) { uid.getValue().disable(); update(uid.getValue()); } } /** {@inheritDoc} */ @Override public void addToGroup(String featureId, String groupName) { Util.assertParamHasLength(groupName, "groupName (#2)"); // retrieve Feature f = read(featureId); f.setGroup(groupName); // persist modification update(f); } /** {@inheritDoc} */ @Override public void removeFromGroup(String featureId, String groupName) { Util.assertParamHasLength(groupName, "groupName (#2)"); if (!existGroup(groupName)) { throw new GroupNotFoundException(groupName); } // retrieve Feature f = read(featureId); f.setGroup(null); // persist modification update(f); } /** {@inheritDoc} */ @Override public Set<String> readAllGroups() { Map < String, Feature > features = readAll(); Set < String > groups = new HashSet<String>(); for (Map.Entry<String,Feature> uid : features.entrySet()) { groups.add(uid.getValue().getGroup()); } groups.remove(null); return groups; } /** {@inheritDoc} */ @Override public void clear() { Jedis jedis = null; try { jedis = getJedis(); Set<String> myKeys = jedis.smembers(keyBuilder.getKeyFeatureMap()); for (String key : myKeys) { delete(key); } } finally { if (jedis != null) { jedis.close(); } } } /** * Getter accessor for attribute 'redisConnection'. * * @return * current value of 'redisConnection' */ public RedisConnection getRedisConnection() { return redisConnection; } /** * Setter accessor for attribute 'redisConnection'. * @param redisConnection * new value for 'redisConnection ' */ public void setRedisConnection(RedisConnection redisConnection) { this.redisConnection = redisConnection; } /** * Safe acces to Jedis, avoid JNPE. * * @return * access jedis */ public Jedis getJedis() { if (redisConnection == null) { throw new IllegalArgumentException("Cannot found any redisConnection"); } Jedis jedis = redisConnection.getJedis(); if (jedis == null) { throw new IllegalArgumentException("Cannot found any jedis connection, please build connection"); } return jedis; } }
5,365
739
<gh_stars>100-1000 #include <windows.h> #include "ImportHandler.h" #include "HunkList.h" #include "Hunk.h" #include "StringMisc.h" #include "Log.h" #include "Symbol.h" #include "data.h" #include <vector> #include <set> #include <ppl.h> #include <cassert> using namespace std; const char *LoadDLL(const char *name); static unsigned int RVAToFileOffset(const char* module, unsigned int rva) { const IMAGE_DOS_HEADER* pDH = (const PIMAGE_DOS_HEADER)module; const IMAGE_NT_HEADERS32* pNTH = (const PIMAGE_NT_HEADERS32)(module + pDH->e_lfanew); int numSections = pNTH->FileHeader.NumberOfSections; int numDataDirectories = pNTH->OptionalHeader.NumberOfRvaAndSizes; const IMAGE_SECTION_HEADER* sectionHeaders = (const IMAGE_SECTION_HEADER*)&pNTH->OptionalHeader.DataDirectory[numDataDirectories]; for(int i = 0; i < numSections; i++) { if(rva >= sectionHeaders[i].VirtualAddress && rva < sectionHeaders[i].VirtualAddress + sectionHeaders[i].SizeOfRawData) { return rva - sectionHeaders[i].VirtualAddress + sectionHeaders[i].PointerToRawData; } } return rva; } static int GetOrdinal(const char* function, const char* dll) { const char* module = LoadDLL(dll); const IMAGE_DOS_HEADER* dh = (const IMAGE_DOS_HEADER*)module; const IMAGE_FILE_HEADER* coffHeader = (const IMAGE_FILE_HEADER*)(module + dh->e_lfanew + 4); const IMAGE_OPTIONAL_HEADER32* pe = (const IMAGE_OPTIONAL_HEADER32*)(coffHeader + 1); const IMAGE_EXPORT_DIRECTORY* exportdir = (const IMAGE_EXPORT_DIRECTORY*) (module + RVAToFileOffset(module, pe->DataDirectory[IMAGE_DIRECTORY_ENTRY_EXPORT].VirtualAddress)); const short* ordinalTable = (const short*) (module + RVAToFileOffset(module, exportdir->AddressOfNameOrdinals)); const int* nameTable = (const int*)(module + RVAToFileOffset(module, exportdir->AddressOfNames)); for(int i = 0; i < (int)exportdir->NumberOfNames; i++) { int ordinal = ordinalTable[i] + exportdir->Base; const char* name = module + RVAToFileOffset(module, nameTable[i]); if(strcmp(name, function) == 0) { return ordinal; } } Log::Error("", "Import '%s' cannot be found in '%s'", function, dll); return -1; } void ForEachExportInDLL(const char *dll, std::function<void (const char*)> fun) { const char* module = LoadDLL(dll); const IMAGE_DOS_HEADER* dh = (const IMAGE_DOS_HEADER*)module; const IMAGE_FILE_HEADER* coffHeader = (const IMAGE_FILE_HEADER*)(module + dh->e_lfanew + 4); const IMAGE_OPTIONAL_HEADER32* pe = (const IMAGE_OPTIONAL_HEADER32*)(coffHeader + 1); const IMAGE_EXPORT_DIRECTORY* exportdir = (const IMAGE_EXPORT_DIRECTORY*)(module + RVAToFileOffset(module, pe->DataDirectory[IMAGE_DIRECTORY_ENTRY_EXPORT].VirtualAddress)); const int* nameTable = (const int*)(module + RVAToFileOffset(module, exportdir->AddressOfNames)); for (int i = 0; i < (int)exportdir->NumberOfNames; i++) { const char* name = module + RVAToFileOffset(module, nameTable[i]); fun(name); } } static const char *GetForwardRVA(const char* dll, const char* function) { const char* module = LoadDLL(dll); const IMAGE_DOS_HEADER* pDH = (const PIMAGE_DOS_HEADER)module; const IMAGE_NT_HEADERS32* pNTH = (const PIMAGE_NT_HEADERS32)(module + pDH->e_lfanew); const DWORD exportRVA = pNTH->OptionalHeader.DataDirectory[0].VirtualAddress; if (exportRVA == 0) { Log::Error("", "Missing export table in '%s'\n\n" "If running under Wine, copy all imported DLL files from a real Windows to your Wine path.", dll); } const IMAGE_EXPORT_DIRECTORY* pIED = (const PIMAGE_EXPORT_DIRECTORY)(module + RVAToFileOffset(module, exportRVA)); const short* ordinalTable = (const short*)(module + RVAToFileOffset(module, pIED->AddressOfNameOrdinals)); const DWORD* namePointerTable = (const DWORD*)(module + RVAToFileOffset(module, pIED->AddressOfNames)); const DWORD* addressTableRVAOffset = (const DWORD*)(module + RVAToFileOffset(module, pIED->AddressOfFunctions)); for(unsigned int i = 0; i < pIED->NumberOfNames; i++) { short ordinal = ordinalTable[i]; const char* name = (const char*)(module + RVAToFileOffset(module, namePointerTable[i])); if(strcmp(name, function) == 0) { DWORD address = addressTableRVAOffset[ordinal]; if(address >= pNTH->OptionalHeader.DataDirectory[0].VirtualAddress && address < pNTH->OptionalHeader.DataDirectory[0].VirtualAddress + pNTH->OptionalHeader.DataDirectory[0].Size) return module + RVAToFileOffset(module, address); return NULL; } } Log::Error("", "Import '%s' cannot be found in '%s'", function, dll); return false; } static bool ImportHunkRelation(const Hunk* h1, const Hunk* h2) { // Sort by DLL name if(strcmp(h1->GetImportDll(), h2->GetImportDll()) != 0) { // kernel32 always first if(strcmp(h1->GetImportDll(), "kernel32") == 0) return true; if(strcmp(h2->GetImportDll(), "kernel32") == 0) return false; // Then user32, to ensure MessageBoxA@16 is ready when we need it if(strcmp(h1->GetImportDll(), "user32") == 0) return true; if(strcmp(h2->GetImportDll(), "user32") == 0) return false; return strcmp(h1->GetImportDll(), h2->GetImportDll()) < 0; } // Sort by ordinal return GetOrdinal(h1->GetImportName(), h1->GetImportDll()) < GetOrdinal(h2->GetImportName(), h2->GetImportDll()); } static const int HashCode(const char* str) { int code = 0; char eax; do { code = _rotl(code, 6); eax = *str++; code ^= eax; } while(eax); return code; } __forceinline unsigned int HashCode1K(const char* str, int hash_multiplier, int hash_bits) { int eax = 0; unsigned char c; do { c = *str++; eax = ((eax & 0xFFFFFF00) + c) * hash_multiplier; } while(c & 0x7F); eax = (eax & 0xFFFFFF00) | (unsigned char)(c + c); return ((unsigned int)eax) >> (32 - hash_bits); } static bool SolveDllOrderConstraints(std::vector<unsigned int>& constraints, unsigned int* new_order) { if(constraints[0] > 1) // kernel32 must be first. it can't have dependencies on anything else { return false; } std::vector<unsigned int> constraints2 = constraints; unsigned int used_mask = 0; int num = (int)constraints.size(); for(int i = 0; i < num; i++) { int selected = -1; for(int j = 0; j < num; j++) { if(((used_mask >> j) & 1) == 0 && (constraints[j] == 0)) { selected = j; break; } } if(selected == -1) { return false; } *new_order++ = selected; used_mask |= (1u<<selected); for(int j = 0; j < num; j++) { constraints[j] &= ~(1u<<selected); } } return true; } static void AddKnownExportsForDll(std::vector<string>& exports, const char* dll_name) { struct s_known_exports_header { int num_dlls; struct { int name_offset; int num_exports; int export_name_offset_table; } dll_infos[1]; }; const s_known_exports_header* known_exports_header = (const s_known_exports_header*)knownDllExports; int num_known_dlls = known_exports_header->num_dlls; for(int known_dll_index = 0; known_dll_index < num_known_dlls; known_dll_index++) { const char* known_dll_name = knownDllExports + known_exports_header->dll_infos[known_dll_index].name_offset; if(strcmp(dll_name, known_dll_name) == 0) { int num_exports = known_exports_header->dll_infos[known_dll_index].num_exports; const int* offset_table = (const int*) ((const char*)knownDllExports + known_exports_header->dll_infos[known_dll_index].export_name_offset_table); for(int i = 0; i < num_exports; i++) { const char* name = knownDllExports + offset_table[i]; exports.push_back(name); } break; } } } static bool FindCollisionFreeHash(vector<string>& dll_names, const vector<Hunk*>& importHunks, int& hash_multiplier, int& hash_bits) { assert(dll_names.size() <= 32); dll_names.erase(std::find(dll_names.begin(), dll_names.end(), string("kernel32"))); dll_names.insert(dll_names.begin(), "kernel32"); struct SDllInfo { std::vector<std::string> exports; std::vector<char> used; }; int num_dlls = (int)dll_names.size(); std::vector<unsigned int> best_dll_order(num_dlls); // Load DLLs and mark functions that are imported vector<SDllInfo> dllinfos(num_dlls); for(int dll_index = 0; dll_index < num_dlls; dll_index++) { const char* dllname = dll_names[dll_index].c_str(); SDllInfo& info = dllinfos[dll_index]; { // Scrape exports from DLL on this machine const char* module = LoadDLL(dllname); const IMAGE_DOS_HEADER* dh = (const IMAGE_DOS_HEADER*)module; const IMAGE_FILE_HEADER* coffHeader = (const IMAGE_FILE_HEADER*)(module + dh->e_lfanew + 4); const IMAGE_OPTIONAL_HEADER32* pe = (const IMAGE_OPTIONAL_HEADER32*)(coffHeader + 1); const IMAGE_EXPORT_DIRECTORY* exportdir = (const IMAGE_EXPORT_DIRECTORY*)(module + RVAToFileOffset(module, pe->DataDirectory[IMAGE_DIRECTORY_ENTRY_EXPORT].VirtualAddress)); int num_names = exportdir->NumberOfNames; const int* name_table = (const int*)(module + RVAToFileOffset(module, exportdir->AddressOfNames)); for(int i = 0; i < num_names; i++) { const char* name = module + RVAToFileOffset(module, name_table[i]); info.exports.push_back(name); } } // Combine with list of known exports for this DLL AddKnownExportsForDll(info.exports, dllname); std::sort(info.exports.begin(), info.exports.end()); info.exports.erase(std::unique(info.exports.begin(), info.exports.end()), info.exports.end()); int num_exports = (int)info.exports.size(); info.used.resize(num_exports); for(Hunk* importHunk : importHunks) { if(strcmp(dllname, importHunk->GetImportDll()) == 0) { // Mark those that are used auto it = std::find(info.exports.begin(), info.exports.end(), importHunk->GetImportName()); if(it != info.exports.end()) { int idx = (int)std::distance(info.exports.begin(), it); info.used[idx] = 1; } else { assert(false); Log::Error("", "Could not find '%s' in '%s'", importHunk->GetImportName(), importHunk->GetImportDll()); } } } } const int MAX_BITS = 16; int best_num_bits = INT_MAX; // Find hash function that works // We do however allow hash overlaps from separate DLLs. // To exploit this we sort the dlls to avoid collisions when possible struct SBucket { unsigned int unreferenced_functions_dll_mask; unsigned char referenced_function_dll_index; // dll_index + 1 }; int best_low_byte = INT_MAX; int best_high_byte = INT_MAX; concurrency::critical_section cs; for(int num_bits = MAX_BITS; num_bits >= 1; num_bits--) { concurrency::parallel_for(0, 256, [&](int high_byte) { { Concurrency::critical_section::scoped_lock l(cs); if(num_bits == best_num_bits && high_byte > best_high_byte) { return; } } std::vector<unsigned int> dll_constraints(num_dlls); std::vector<unsigned int> new_dll_order(num_dlls); SBucket* buckets = new SBucket[(size_t)1 << num_bits]; for(int low_byte = 0; low_byte < 256; low_byte++) { for(int dll_index = 0; dll_index < num_dlls; dll_index++) { dll_constraints[dll_index] = 0; } int hash_multiplier = (high_byte << 16) | (low_byte << 8) | 1; memset(buckets, 0, sizeof(SBucket) << num_bits); bool has_collisions = false; unsigned int dll_index = 0; for(SDllInfo& dllinfo : dllinfos) { unsigned int dll_mask = (1u << dll_index); int num_names = (int)dllinfo.exports.size(); for(int i = 0; i < num_names; i++) { unsigned int hashcode = HashCode1K(dllinfo.exports[i].c_str(), hash_multiplier, num_bits); bool new_referenced = dllinfo.used[i]; bool old_referenced = buckets[hashcode].referenced_function_dll_index > 0; if(new_referenced) { if(old_referenced) { has_collisions = true; break; } else { buckets[hashcode].referenced_function_dll_index = dll_index + 1; buckets[hashcode].unreferenced_functions_dll_mask &= ~dll_mask; // Clear unreferenced before this dll_constraints[dll_index] |= buckets[hashcode].unreferenced_functions_dll_mask; } } else { buckets[hashcode].unreferenced_functions_dll_mask |= dll_mask; if(old_referenced) { int old_dll_index = buckets[hashcode].referenced_function_dll_index - 1; if(old_dll_index == dll_index) { has_collisions = true; break; } dll_constraints[old_dll_index] |= dll_mask; } } } dll_index++; if(has_collisions) { break; } } if(!has_collisions && SolveDllOrderConstraints(dll_constraints, &new_dll_order[0])) { Concurrency::critical_section::scoped_lock l(cs); if(num_bits < best_num_bits || high_byte < best_high_byte) { best_low_byte = low_byte; best_high_byte = high_byte; best_num_bits = num_bits; best_dll_order = new_dll_order; } break; } } delete[] buckets; }); int best_hash_multiplier = (best_high_byte << 16) | (best_low_byte << 8) | 1; if(best_num_bits > num_bits) { break; } } int best_hash_multiplier = (best_high_byte << 16) | (best_low_byte << 8) | 1; if(best_num_bits == INT_MAX) { return false; } // Reorder DLLs std::vector<std::string> new_dlls(num_dlls); for(int i = 0; i < num_dlls; i++) { new_dlls[i] = dll_names[best_dll_order[i]]; } dll_names = new_dlls; hash_multiplier = best_hash_multiplier; hash_bits = best_num_bits; return true; } static Hunk* ForwardImport(Hunk* hunk) { do { const char *forward = GetForwardRVA(hunk->GetImportDll(), hunk->GetImportName()); if (forward == NULL) break; string dllName, functionName; int sep = int(strstr(forward, ".") - forward); dllName.append(forward, sep); dllName = ToLower(dllName); functionName.append(&forward[sep + 1], strlen(forward) - (sep + 1)); Log::Warning("", "Import '%s' from '%s' uses forwarded RVA. Replaced by '%s' from '%s'", hunk->GetImportName(), hunk->GetImportDll(), functionName.c_str(), dllName.c_str()); hunk = new Hunk(hunk->GetName(), functionName.c_str(), dllName.c_str()); } while (true); return hunk; } HunkList* ImportHandler::CreateImportHunks(HunkList* hunklist, Hunk*& hashHunk, map<string, string>& fallbackDlls, const vector<string>& rangeDlls, bool verbose, bool& enableRangeImport) { if(verbose) printf("\n-Imports----------------------------------\n"); vector<Hunk*> importHunks; vector<bool> usedRangeDlls(rangeDlls.size()); set<string> usedFallbackDlls; // Fill list for import hunks enableRangeImport = false; for(int i = 0; i <hunklist->GetNumHunks(); i++) { Hunk* hunk = (*hunklist)[i]; if(hunk->GetFlags() & HUNK_IS_IMPORT) { hunk = ForwardImport(hunk); // Is the DLL a range DLL? for(int i = 0; i < (int)rangeDlls.size(); i++) { if(ToUpper(rangeDlls[i]) == ToUpper(hunk->GetImportDll())) { usedRangeDlls[i] = true; enableRangeImport = true; break; } } importHunks.push_back(hunk); } } // Sort import hunks sort(importHunks.begin(), importHunks.end(), ImportHunkRelation); vector<unsigned int> hashes; Hunk* importList = new Hunk("ImportListHunk", 0, HUNK_IS_WRITEABLE, 16, 0, 0); char dllNames[1024] = {0}; char* dllNamesPtr = dllNames+1; char* hashCounter = dllNames; string currentDllName; int pos = 0; for(vector<Hunk*>::const_iterator it = importHunks.begin(); it != importHunks.end();) { Hunk* importHunk = *it; bool useRange = false; // Is the DLL a range DLL? for(int i = 0; i < (int)rangeDlls.size(); i++) { if(ToUpper(rangeDlls[i]) == ToUpper(importHunk->GetImportDll())) { usedRangeDlls[i] = true; useRange = true; break; } } // Skip non hashes if(currentDllName.compare(importHunk->GetImportDll())) { if(strcmp(importHunk->GetImportDll(), "kernel32") != 0) { set<string> seen; string dll = importHunk->GetImportDll(); strcpy_s(dllNamesPtr, sizeof(dllNames)-(dllNamesPtr-dllNames), dll.c_str()); dllNamesPtr += dll.size() + 1; while (fallbackDlls.count(dll) != 0) { usedFallbackDlls.insert(dll); seen.insert(dll); *dllNamesPtr = 0; dllNamesPtr += 1; dll = fallbackDlls[dll]; strcpy_s(dllNamesPtr, sizeof(dllNames) - (dllNamesPtr - dllNames), dll.c_str()); dllNamesPtr += dll.size() + 1; if (seen.count(dll) != 0) Log::Error("", "Cyclic DLL fallback"); } hashCounter = dllNamesPtr; *hashCounter = 0; dllNamesPtr += 1; } currentDllName = importHunk->GetImportDll(); if(verbose) printf("%s\n", currentDllName.c_str()); } (*hashCounter)++; int hashcode = HashCode(importHunk->GetImportName()); hashes.push_back(hashcode); int startOrdinal = GetOrdinal(importHunk->GetImportName(), importHunk->GetImportDll()); int ordinal = startOrdinal; // Add import if(verbose) { if(useRange) printf(" ordinal range {\n "); printf(" %s (ordinal %d, hash %08X)\n", (*it)->GetImportName(), startOrdinal, hashcode); } importList->AddSymbol(new Symbol(importHunk->GetName(), pos*4, SYMBOL_IS_RELOCATEABLE, importList)); it++; while(useRange && it != importHunks.end() && currentDllName.compare((*it)->GetImportDll()) == 0) // Import the rest of the range { int o = GetOrdinal((*it)->GetImportName(), (*it)->GetImportDll()); if(o - startOrdinal >= 254) break; if(verbose) { printf(" %s (ordinal %d)\n", (*it)->GetImportName(), o); } ordinal = o; importList->AddSymbol(new Symbol((*it)->GetName(), (pos+ordinal-startOrdinal)*4, SYMBOL_IS_RELOCATEABLE, importList)); it++; } if(verbose && useRange) printf(" }\n"); if(enableRangeImport) *dllNamesPtr++ = ordinal - startOrdinal + 1; pos += ordinal - startOrdinal + 1; } *dllNamesPtr++ = -1; // Warn about unused range DLLs for (int i = 0; i < (int)rangeDlls.size(); i++) { if (!usedRangeDlls[i]) { Log::Warning("", "No functions were imported from range DLL '%s'", rangeDlls[i].c_str()); } } // Warn about unused fallback DLLs for (auto fallback : fallbackDlls) { if (usedFallbackDlls.count(fallback.first) == 0) { Log::Warning("", "No functions were imported from fallback DLL '%s'", fallback.first.c_str()); } } importList->SetVirtualSize(pos*4); importList->AddSymbol(new Symbol("_ImportList", 0, SYMBOL_IS_RELOCATEABLE, importList)); importList->AddSymbol(new Symbol(".bss", 0, SYMBOL_IS_RELOCATEABLE|SYMBOL_IS_SECTION, importList, "crinkler import")); hashHunk = new Hunk("HashHunk", (char*)hashes.data(), 0, 0, int(hashes.size()*sizeof(unsigned int)), int(hashes.size()*sizeof(unsigned int))); // Create new hunklist HunkList* newHunks = new HunkList; newHunks->AddHunkBack(importList); Hunk* dllNamesHunk = new Hunk("DllNames", dllNames, HUNK_IS_WRITEABLE | HUNK_IS_LEADING, 0, int(dllNamesPtr - dllNames), int(dllNamesPtr - dllNames)); dllNamesHunk->AddSymbol(new Symbol(".data", 0, SYMBOL_IS_RELOCATEABLE|SYMBOL_IS_SECTION, dllNamesHunk, "crinkler import")); dllNamesHunk->AddSymbol(new Symbol("_DLLNames", 0, SYMBOL_IS_RELOCATEABLE, dllNamesHunk)); newHunks->AddHunkBack(dllNamesHunk); return newHunks; } HunkList* ImportHandler::CreateImportHunks1K(HunkList* hunklist, bool verbose, int& hash_bits, int& max_dll_name_length) { if (verbose) { printf("\n-Imports----------------------------------\n"); } vector<Hunk*> importHunks; set<string> dll_set; bool found_kernel32 = false; // Fill list for import hunks for(int i = 0; i < hunklist->GetNumHunks(); i++) { Hunk* hunk = (*hunklist)[i]; if(hunk->GetFlags() & HUNK_IS_IMPORT) { hunk = ForwardImport(hunk); if(strcmp(hunk->GetImportDll(), "kernel32") == 0) { found_kernel32 = true; } dll_set.insert(hunk->GetImportDll()); importHunks.push_back(hunk); } } if(!found_kernel32) { Log::Error("", "Kernel32 needs to be linked for import code to function."); } int hash_multiplier; vector<string> dlls(dll_set.begin(), dll_set.end()); if (!FindCollisionFreeHash(dlls, importHunks, hash_multiplier, hash_bits)) { Log::Error("", "Could not find collision-free hash function"); } string dllnames; int max_name_length = 0; for(string name : dlls) { max_name_length = max(max_name_length, (int)name.size() + 1); } for(string name : dlls) { while (dllnames.size() % max_name_length) { dllnames.push_back(0); } if(name.compare("kernel32") != 0) { dllnames += name; } } Hunk* importList = new Hunk("ImportListHunk", 0, HUNK_IS_WRITEABLE, 8, 0, 65536*256); importList->AddSymbol(new Symbol("_HashMultiplier", hash_multiplier, 0, importList)); importList->AddSymbol(new Symbol("_ImportList", 0, SYMBOL_IS_RELOCATEABLE, importList)); for(Hunk* importHunk : importHunks) { unsigned int hashcode = HashCode1K(importHunk->GetImportName(), hash_multiplier, hash_bits); importList->AddSymbol(new Symbol(importHunk->GetName(), hashcode*4, SYMBOL_IS_RELOCATEABLE, importList)); } if(verbose) { for(string dllname : dlls) { printf("%s\n", dllname.c_str()); for(Hunk* importHunk : importHunks) { if(strcmp(importHunk->GetImportDll(), dllname.c_str()) == 0) { int ordinal = GetOrdinal(importHunk->GetImportName(), importHunk->GetImportDll()); printf(" %s (ordinal %d)\n", importHunk->GetImportName(), ordinal); } } } } HunkList* newHunks = new HunkList; Hunk* dllNamesHunk = new Hunk("DllNames", dllnames.c_str(), HUNK_IS_WRITEABLE | HUNK_IS_LEADING, 0, (int)dllnames.size() + 1, (int)dllnames.size() + 1); dllNamesHunk->AddSymbol(new Symbol("_DLLNames", 0, SYMBOL_IS_RELOCATEABLE, dllNamesHunk)); newHunks->AddHunkBack(dllNamesHunk); newHunks->AddHunkBack(importList); max_dll_name_length = max_name_length; printf( "\n" "Note: Programs linked using the TINYIMPORT option may break if a future Windows\n" "version adds functions to one of the imported DLLs. Such breakage cannot be\n" "fixed by using the RECOMPRESS feature. When using this option, it is strongly\n" "recommended to also distribute a version of your program linked using the\n" "normal import mechanism (without the TINYIMPORT option).\n" ); return newHunks; }
9,999
14,668
// Copyright 2021 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_BREADCRUMBS_BREADCRUMB_MANAGER_KEYED_SERVICE_FACTORY_H_ #define CHROME_BROWSER_BREADCRUMBS_BREADCRUMB_MANAGER_KEYED_SERVICE_FACTORY_H_ #include "base/no_destructor.h" #include "components/keyed_service/content/browser_context_keyed_service_factory.h" namespace breadcrumbs { class BreadcrumbManagerKeyedService; } // namespace breadcrumbs namespace content { class BrowserContext; } // namespace content class BreadcrumbManagerKeyedServiceFactory : public BrowserContextKeyedServiceFactory { public: static BreadcrumbManagerKeyedServiceFactory* GetInstance(); static breadcrumbs::BreadcrumbManagerKeyedService* GetForBrowserContext( content::BrowserContext* context); BreadcrumbManagerKeyedServiceFactory( const BreadcrumbManagerKeyedServiceFactory&) = delete; private: friend class base::NoDestructor<BreadcrumbManagerKeyedServiceFactory>; BreadcrumbManagerKeyedServiceFactory(); ~BreadcrumbManagerKeyedServiceFactory() override; // BrowserContextKeyedServiceFactory implementation. KeyedService* BuildServiceInstanceFor( content::BrowserContext* context) const override; content::BrowserContext* GetBrowserContextToUse( content::BrowserContext* context) const override; }; #endif // CHROME_BROWSER_BREADCRUMBS_BREADCRUMB_MANAGER_KEYED_SERVICE_FACTORY_H_
473
2,023
<reponame>tdiprima/code<filename>recipes/Python/578826_sav2mdb/recipe-578826.py<gh_stars>1000+ # -*- coding: utf-8 -*- import sys import os import random import pypyodbc import savReaderWriter __version__ = "1.0.0" __author__ = "<NAME>" __email__ = "@".join(["fomcl", "yahoo" + ".com"]) """ sav2mdb.py: convert SPSS system files (codepage) to Microsoft Access files """ def get_table_name(filename): tbl = os.path.splitext(os.path.basename(filename))[0] return tbl.capitalize().replace(" ", "_") def get_metadata(savFilename): """Gets variable names (list), variable types and formats (dict)""" with savReaderWriter.SavHeaderReader(savFilename) as header: varNames, varTypes = header.varNames, header.varTypes formats = header.formats return varNames, varTypes, formats def sql_create_table(savFilename): """Generate SQL 'CREATE TABLE' statement on the basis of <savFilename> SPSS-to-SQL datatype translation: numeric, except date/time --> FLOAT date or time --> CHAR(26) (iso dates where applicable) string < 256 bytes --> CHAR of that length string >= 256 bytes --> TEXT $sysmis --> NULL """ varNames, varTypes, formats = get_metadata(savFilename) tbl = get_table_name(savFilename) # if "id" happens to be an existing varname, then suffix the primary key suffix = "_%04d" % random.randint(1000, 9999) if "id" in varNames else "" sql = "CREATE TABLE %(tbl)s (id%(suffix)s COUNTER PRIMARY KEY,\n " sql = [sql % locals()] for varName in varNames: varType = varTypes[varName] format_ = formats[varName].lower() dataType = "FLOAT" if varType == 0 else \ "CHAR(%d)" % varType if varType < 256 else "TEXT" dataType = "CHAR(26)" if "time" in format_ or \ "date" in format_ else dataType sql.append("%(varName)s %(dataType)s, \n " % locals()) return "".join(sql).rstrip(", \n ") + "\n);" def sql_insert_template(savFilename): """Generate SQL 'INSERT INTO' template, suitable for sql quote escaping""" varNames, varTypes, formats = get_metadata(savFilename) tbl = get_table_name(savFilename) varNames_ = ", ".join(varNames) insert = "INSERT INTO %(tbl)s (%(varNames_)s) VALUES " % locals() template = ", ".join(["?"] * len(varNames)) return insert + "(" + template + ");\n" def write_ms_access_file(savFilename, mdbFilename=None, overwrite=True): """Write the actual MS Access file""" if not sys.platform.startswith("win"): raise EnvironmentError("Sorry, Windows only") if not mdbFilename: mdbFilename = os.path.splitext(savFilename)[0] + ".mdb" mdbFilename = mdbFilename.replace(" ", "_") if os.path.exists(mdbFilename) and overwrite: os.remove(mdbFilename) create_table = sql_create_table(savFilename) insert_table = sql_insert_template(savFilename) pypyodbc.win_create_mdb(mdbFilename) try: conn_string = 'Driver={Microsoft Access Driver (*.mdb)};DBQ=%s' connection = pypyodbc.connect(conn_string % mdbFilename) cursor = connection.cursor() cursor.execute(create_table) with savReaderWriter.SavReader(savFilename) as reader: for record in reader: cursor.execute(insert_table, tuple(record)) cursor.commit() finally: connection.close() if __name__ == "__main__": if len(sys.argv) > 1: if len(sys.argv) == 2: write_ms_access_file(sys.argv[1]) elif len(sys.argv) == 3: write_ms_access_file(sys.argv[1], sys.argv[2]) elif len(sys.argv) == 4: write_ms_access_file(sys.argv[1], sys.argv[2], sys.argv[3]) else: print ("Usage: sav2mdb savFilename[[, mdbFilename], overwrite]\n" "If overwrite (True/False) is specified, mdbFilename must\n" "also be specified")
1,610
531
<reponame>globalbus/karaf<gh_stars>100-1000 /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.karaf.config.command; import java.io.IOException; import java.io.StringReader; import java.io.StringWriter; import java.util.Map; import java.util.TreeMap; import org.apache.felix.utils.properties.TypedProperties; import org.apache.karaf.shell.api.action.Command; import org.apache.karaf.shell.api.action.Option; import org.apache.karaf.shell.api.action.lifecycle.Service; @Command(scope = "config", name = "property-list", description = "Lists properties from the currently edited configuration.") @Service public class PropListCommand extends ConfigPropertyCommandSupport { @Option(name = "--raw") boolean raw; @Override public void propertyAction(TypedProperties props) { if (raw) { try { StringWriter sw = new StringWriter(); props.save(sw); System.out.print(sw.toString()); } catch (IOException e) { throw new RuntimeException(e); } } else { try { StringWriter sw = new StringWriter(); props.save(sw); TypedProperties p = new TypedProperties(); p.load(new StringReader(sw.toString())); props = p; } catch (IOException e) { // Ignore } Map<String, Object> sortedProps = new TreeMap<>(props); for (Map.Entry<String, Object> entry : sortedProps.entrySet()) { System.out.println(" " + entry.getKey() + " = " + displayValue(entry.getValue())); } } } /** * Check if a configuration (identified by PID) requires an update or not. * * @param pid the configuration PID. * @return true if the configuration requires an update, false else (always returns false). */ @Override protected boolean requiresUpdate(String pid) { return false; } }
1,048
2,958
<gh_stars>1000+ package com.blade.kit.json; import com.blade.kit.DateKit; import com.blade.kit.ReflectKit; import java.lang.reflect.Array; import java.math.BigDecimal; import java.time.LocalDate; import java.time.LocalDateTime; import java.util.ArrayList; import java.util.Collection; import java.util.Date; import java.util.Map; public class SampleJsonSerializer { private int position; private final char[] buffer; public static String serialize(Object object) throws IllegalArgumentException { if (object == null) { return "null"; } if (object instanceof String) { return '\"' + object.toString().replace("\b", "\\b") .replace("\t", "\\t").replace("\r", "\\r") .replace("\f", "\\f").replace("\n", "\\n") + '\"'; } if (ReflectKit.isBasicType(object)) { return object.toString(); } if (object instanceof BigDecimal) { return serialize(object.toString()); } if(object instanceof Date){ return DateKit.toString((Date) object, "yyyy-MM-dd HH:mm:ss"); } if(object instanceof LocalDate){ return DateKit.toString((LocalDate) object, "yyyy-MM-dd"); } if(object instanceof LocalDateTime){ return DateKit.toString((LocalDateTime) object, "yyyy-MM-dd HH:mm:ss"); } if (object instanceof Map) { StringBuilder sb = new StringBuilder(); sb.append('{'); Map map = (Map) object; for (Object key : map.keySet()) { Object value = map.get(key); sb.append(serialize(key)).append(':').append(serialize(value)).append(','); } int last = sb.length() - 1; if (sb.charAt(last) == ',') sb.deleteCharAt(last); sb.append('}'); return sb.toString(); } if (object instanceof Collection) { return serialize(((Collection) object).toArray()); } if (object.getClass().isArray()) { StringBuilder sb = new StringBuilder(); sb.append('['); int last = Array.getLength(object) - 1; for (int i = 0; i <= last; ++i) { Object value = Array.get(object, i); sb.append(serialize(value)).append(','); } last = sb.length() - 1; if (sb.charAt(last) == ',') sb.deleteCharAt(last); sb.append(']'); return sb.toString(); } throw new IllegalArgumentException(object.toString()); } /** * Deserializer a json string to data object * * @param json the json string which will be deserialized * @return the data object made from json * @throws ParseException thrown when parsing a illegal json text */ public static Object deserialize(String json) throws ParseException { return new SampleJsonSerializer(json).nextValue(); } private SampleJsonSerializer(String string) { this.buffer = string.toCharArray(); this.position = -1; } private Object nextValue() throws ParseException { try { char c = this.nextToken(); switch (c) { case '{': try { Ason<String, Object> ason = new Ason<>(); if (nextToken() != '}') { --position; while (true) { String key = nextValue().toString(); if (nextToken() != ':') { throw new ParseException(new String(this.buffer), this.position, "Expected a ':' after a key"); } ason.put(key, nextValue()); switch (nextToken()) { case ';': case ',': if (nextToken() == '}') { return ason; } --position; break; case '}': return (ason); default: throw new ParseException(new String(this.buffer), this.position, "Expected a ',' or '}'"); } } } else return (ason); } catch (ArrayIndexOutOfBoundsException ignore) { throw new ParseException(new String(this.buffer), this.position, "Expected a ',' or '}'"); } case '[': try { ArrayList<Object> list = new ArrayList<>(); if (nextToken() != ']') { --position; while (true) { if (nextToken() == ',') { --position; list.add(null); } else { --position; list.add(nextValue()); } switch (nextToken()) { case ',': if (nextToken() == ']') { return (list); } --position; break; case ']': return (list); default: throw new ParseException(new String(this.buffer), this.position, "Expected a ',' or ']'"); } } } else return (list); } catch (ArrayIndexOutOfBoundsException ignore) { throw new ParseException(new String(this.buffer), this.position, "Expected a ',' or ']'"); } case '"': case '\'': StringBuilder sb = new StringBuilder(); while (true) { char ch = this.buffer[++position]; switch (ch) { case '\n': case '\r': throw new ParseException(new String(this.buffer), this.position, "Unterminated string"); case '\\': ch = this.buffer[++position]; switch (ch) { case 'b': sb.append('\b'); break; case 't': sb.append('\t'); break; case 'n': sb.append('\n'); break; case 'f': sb.append('\f'); break; case 'r': sb.append('\r'); break; case 'u': int num = 0; for (int i = 3; i >= 0; --i) { int tmp = buffer[++position]; if (tmp <= '9' && tmp >= '0') tmp = tmp - '0'; else if (tmp <= 'F' && tmp >= 'A') tmp = tmp - ('A' - 10); else if (tmp <= 'f' && tmp >= 'a') tmp = tmp - ('a' - 10); else throw new ParseException(new String(this.buffer), this.position, "Illegal hex code"); num += tmp << (i * 4); } sb.append((char) num); break; case '"': case '\'': case '\\': case '/': sb.append(ch); break; default: throw new ParseException(new String(this.buffer), this.position, "Illegal escape."); } break; default: if (ch == c) { return (sb.toString()); } sb.append(ch); } } } int startPosition = this.position; while (c >= ' ' && ",:]}/\\\"[{;=#".indexOf(c) < 0) c = this.buffer[++position]; String substr = new String(buffer, startPosition, position-- - startPosition); if ("true".equalsIgnoreCase(substr)) { return (Boolean.TRUE); } if ("false".equalsIgnoreCase(substr)) { return (Boolean.FALSE); } if ("null".equalsIgnoreCase(substr)) { return null; } char b = "-+".indexOf(substr.charAt(0)) < 0 ? substr.charAt(0) : substr.charAt(1); if (b >= '0' && b <= '9') { try { Long l = new Long(substr); if (l.intValue() == l) return (l.intValue()); return (l); } catch (NumberFormatException exInt) { try { return (Double.parseDouble(substr)); } catch (NumberFormatException ignore) { } } } return (substr); } catch (ArrayIndexOutOfBoundsException ignore) { throw new ParseException(new String(this.buffer), this.position, "Unexpected end"); } } private char nextToken() throws ArrayIndexOutOfBoundsException { while (this.buffer[++position] <= ' ') ; return this.buffer[position]; } }
7,123
335
<reponame>Safal08/Hacktoberfest-1 { "word": "Diffraction", "definitions": [ "The process by which a beam of light or other system of waves is spread out as a result of passing through a narrow aperture or across an edge, typically accompanied by interference between the wave forms produced." ], "parts-of-speech": "Noun" }
109
407
package com.alibaba.tesla.tkgone.server.domain; import java.io.Serializable; import java.util.Date; public class Config implements Serializable { private Long id; private Date gmtCreate; private Date gmtModified; private String category; private String nrType; private String nrId; private String name; private String modifier; private String content; private static final long serialVersionUID = 1L; public Long getId() { return id; } public void setId(Long id) { this.id = id; } public Date getGmtCreate() { return gmtCreate; } public void setGmtCreate(Date gmtCreate) { this.gmtCreate = gmtCreate; } public Date getGmtModified() { return gmtModified; } public void setGmtModified(Date gmtModified) { this.gmtModified = gmtModified; } public String getCategory() { return category; } public void setCategory(String category) { this.category = category == null ? null : category.trim(); } public String getNrType() { return nrType; } public void setNrType(String nrType) { this.nrType = nrType == null ? null : nrType.trim(); } public String getNrId() { return nrId; } public void setNrId(String nrId) { this.nrId = nrId == null ? null : nrId.trim(); } public String getName() { return name; } public void setName(String name) { this.name = name == null ? null : name.trim(); } public String getModifier() { return modifier; } public void setModifier(String modifier) { this.modifier = modifier == null ? null : modifier.trim(); } public String getContent() { return content; } public void setContent(String content) { this.content = content == null ? null : content.trim(); } @Override public boolean equals(Object that) { if (this == that) { return true; } if (that == null) { return false; } if (getClass() != that.getClass()) { return false; } Config other = (Config) that; return (this.getId() == null ? other.getId() == null : this.getId().equals(other.getId())) && (this.getGmtCreate() == null ? other.getGmtCreate() == null : this.getGmtCreate().equals(other.getGmtCreate())) && (this.getGmtModified() == null ? other.getGmtModified() == null : this.getGmtModified().equals(other.getGmtModified())) && (this.getCategory() == null ? other.getCategory() == null : this.getCategory().equals(other.getCategory())) && (this.getNrType() == null ? other.getNrType() == null : this.getNrType().equals(other.getNrType())) && (this.getNrId() == null ? other.getNrId() == null : this.getNrId().equals(other.getNrId())) && (this.getName() == null ? other.getName() == null : this.getName().equals(other.getName())) && (this.getModifier() == null ? other.getModifier() == null : this.getModifier().equals(other.getModifier())) && (this.getContent() == null ? other.getContent() == null : this.getContent().equals(other.getContent())); } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((getId() == null) ? 0 : getId().hashCode()); result = prime * result + ((getGmtCreate() == null) ? 0 : getGmtCreate().hashCode()); result = prime * result + ((getGmtModified() == null) ? 0 : getGmtModified().hashCode()); result = prime * result + ((getCategory() == null) ? 0 : getCategory().hashCode()); result = prime * result + ((getNrType() == null) ? 0 : getNrType().hashCode()); result = prime * result + ((getNrId() == null) ? 0 : getNrId().hashCode()); result = prime * result + ((getName() == null) ? 0 : getName().hashCode()); result = prime * result + ((getModifier() == null) ? 0 : getModifier().hashCode()); result = prime * result + ((getContent() == null) ? 0 : getContent().hashCode()); return result; } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append(getClass().getSimpleName()); sb.append(" ["); sb.append("Hash = ").append(hashCode()); sb.append(", id=").append(id); sb.append(", gmtCreate=").append(gmtCreate); sb.append(", gmtModified=").append(gmtModified); sb.append(", category=").append(category); sb.append(", nrType=").append(nrType); sb.append(", nrId=").append(nrId); sb.append(", name=").append(name); sb.append(", modifier=").append(modifier); sb.append(", content=").append(content); sb.append(", serialVersionUID=").append(serialVersionUID); sb.append("]"); return sb.toString(); } }
2,075
879
<reponame>qianfei11/zstack package org.zstack.sdk; import org.zstack.sdk.PciDeviceMetaData; public class PciDeviceOfferingInstanceOfferingRefInventory { public long id; public void setId(long id) { this.id = id; } public long getId() { return this.id; } public java.lang.String instanceOfferingUuid; public void setInstanceOfferingUuid(java.lang.String instanceOfferingUuid) { this.instanceOfferingUuid = instanceOfferingUuid; } public java.lang.String getInstanceOfferingUuid() { return this.instanceOfferingUuid; } public java.lang.String pciDeviceOfferingUuid; public void setPciDeviceOfferingUuid(java.lang.String pciDeviceOfferingUuid) { this.pciDeviceOfferingUuid = pciDeviceOfferingUuid; } public java.lang.String getPciDeviceOfferingUuid() { return this.pciDeviceOfferingUuid; } public PciDeviceMetaData metadata; public void setMetadata(PciDeviceMetaData metadata) { this.metadata = metadata; } public PciDeviceMetaData getMetadata() { return this.metadata; } public java.lang.Integer pciDeviceCount; public void setPciDeviceCount(java.lang.Integer pciDeviceCount) { this.pciDeviceCount = pciDeviceCount; } public java.lang.Integer getPciDeviceCount() { return this.pciDeviceCount; } }
536
831
<gh_stars>100-1000 // Copyright 2000-2019 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file. package org.jetbrains.jps.android.model.impl; import com.intellij.util.xmlb.XmlSerializer; import java.util.Collections; import java.util.List; import org.jdom.Element; import org.jetbrains.android.facet.AndroidFacetProperties; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import org.jetbrains.jps.android.AndroidJpsUtil; import org.jetbrains.jps.android.model.JpsAndroidModuleExtension; import org.jetbrains.jps.android.model.JpsAndroidSdkProperties; import org.jetbrains.jps.android.model.JpsAndroidSdkType; import org.jetbrains.jps.model.JpsElement; import org.jetbrains.jps.model.JpsElementFactory; import org.jetbrains.jps.model.JpsSimpleElement; import org.jetbrains.jps.model.module.JpsModule; import org.jetbrains.jps.model.serialization.JpsModelSerializerExtension; import org.jetbrains.jps.model.serialization.JpsProjectExtensionSerializer; import org.jetbrains.jps.model.serialization.artifact.JpsArtifactPropertiesSerializer; import org.jetbrains.jps.model.serialization.artifact.JpsPackagingElementSerializer; import org.jetbrains.jps.model.serialization.facet.JpsFacetConfigurationSerializer; import org.jetbrains.jps.model.serialization.library.JpsSdkPropertiesSerializer; public class JpsAndroidModelSerializerExtension extends JpsModelSerializerExtension { private static final List<? extends JpsFacetConfigurationSerializer<JpsAndroidModuleExtension>> FACET_PROPERTIES_LOADERS = Collections.singletonList(new JpsFacetConfigurationSerializer<JpsAndroidModuleExtension>( JpsAndroidModuleExtensionImpl.KIND, AndroidJpsUtil.ANDROID_FACET_TYPE_ID, AndroidJpsUtil.ANDROID_FACET_NAME) { @Override public JpsAndroidModuleExtension loadExtension(@NotNull Element facetConfigurationElement, String name, JpsElement parent, JpsModule module) { return new JpsAndroidModuleExtensionImpl(XmlSerializer.deserialize(facetConfigurationElement, AndroidFacetProperties.class)); } }); private static final JpsSdkPropertiesSerializer<JpsSimpleElement<JpsAndroidSdkProperties>> SDK_PROPERTIES_LOADER = new JpsSdkPropertiesSerializer<JpsSimpleElement<JpsAndroidSdkProperties>>("Android SDK", JpsAndroidSdkType.INSTANCE) { @NotNull @Override public JpsSimpleElement<JpsAndroidSdkProperties> loadProperties(@Nullable Element propertiesElement) { String buildTarget; String jdkName; if (propertiesElement != null) { buildTarget = propertiesElement.getAttributeValue("sdk"); jdkName = propertiesElement.getAttributeValue("jdk"); } else { buildTarget = null; jdkName = null; } return JpsElementFactory.getInstance().createSimpleElement(new JpsAndroidSdkProperties(buildTarget, jdkName)); } }; @NotNull @Override public List<? extends JpsFacetConfigurationSerializer<?>> getFacetConfigurationSerializers() { return FACET_PROPERTIES_LOADERS; } @NotNull @Override public List<? extends JpsPackagingElementSerializer<?>> getPackagingElementSerializers() { return Collections.singletonList(new JpsAndroidFinalPackageElementSerializer()); } @NotNull @Override public List<? extends JpsArtifactPropertiesSerializer<?>> getArtifactTypePropertiesSerializers() { return Collections.singletonList(new JpsAndroidApplicationArtifactPropertiesSerializer()); } @NotNull @Override public List<? extends JpsProjectExtensionSerializer> getProjectExtensionSerializers() { return Collections.singletonList(new JpsAndroidDexSettingsSerializer()); } @NotNull @Override public List<? extends JpsSdkPropertiesSerializer<?>> getSdkPropertiesSerializers() { return Collections.singletonList(SDK_PROPERTIES_LOADER); } }
1,429
1,249
<filename>plugins/decor/deco-layout.hpp #pragma once #include <vector> #include <wayfire/region.hpp> #include "deco-button.hpp" namespace wf { namespace decor { static constexpr uint32_t DECORATION_AREA_RENDERABLE_BIT = (1 << 16); static constexpr uint32_t DECORATION_AREA_RESIZE_BIT = (1 << 17); static constexpr uint32_t DECORATION_AREA_MOVE_BIT = (1 << 18); /** Different types of areas around the decoration */ enum decoration_area_type_t { DECORATION_AREA_MOVE = DECORATION_AREA_MOVE_BIT, DECORATION_AREA_TITLE = DECORATION_AREA_MOVE_BIT | DECORATION_AREA_RENDERABLE_BIT, DECORATION_AREA_BUTTON = DECORATION_AREA_RENDERABLE_BIT, DECORATION_AREA_RESIZE_LEFT = WLR_EDGE_LEFT | DECORATION_AREA_RESIZE_BIT, DECORATION_AREA_RESIZE_RIGHT = WLR_EDGE_RIGHT | DECORATION_AREA_RESIZE_BIT, DECORATION_AREA_RESIZE_TOP = WLR_EDGE_TOP | DECORATION_AREA_RESIZE_BIT, DECORATION_AREA_RESIZE_BOTTOM = WLR_EDGE_BOTTOM | DECORATION_AREA_RESIZE_BIT, }; /** * Represents an area of the decoration which reacts to input events. */ struct decoration_area_t { public: /** * Initialize a new decoration area with the given type and geometry */ decoration_area_t(decoration_area_type_t type, wf::geometry_t g); /** * Initialize a new decoration area holding a button. * * @param g The geometry of the button. * @param damage_callback Callback to execute when button needs repaint. * @param theme The theme to use for the button. */ decoration_area_t(wf::geometry_t g, std::function<void(wlr_box)> damage_callback, const decoration_theme_t& theme); /** @return The geometry of the decoration area, relative to the layout */ wf::geometry_t get_geometry() const; /** @return The area's button, if the area is a button. Otherwise UB */ button_t& as_button(); /** @return The type of the decoration area */ decoration_area_type_t get_type() const; private: decoration_area_type_t type; wf::geometry_t geometry; /* For buttons only */ std::unique_ptr<button_t> button; }; /** * Action which needs to be taken in response to an input event */ enum decoration_layout_action_t { DECORATION_ACTION_NONE = 0, /* Drag actions */ DECORATION_ACTION_MOVE = 1, DECORATION_ACTION_RESIZE = 2, /* Button actions */ DECORATION_ACTION_CLOSE = 3, DECORATION_ACTION_TOGGLE_MAXIMIZE = 4, DECORATION_ACTION_MINIMIZE = 5, }; class decoration_theme_t; /** * Manages the layout of the decorations, i.e positioning of the title, * buttons, etc. * * Also dispatches the input events to the appropriate place. */ class decoration_layout_t { public: /** * Create a new decoration layout for the given theme. * When the theme changes, the decoration layout needs to be created again. * * @param damage_callback The function to be called when a part of the * layout needs a repaint. */ decoration_layout_t(const decoration_theme_t& theme, std::function<void(wlr_box)> damage_callback); /** Regenerate layout using the new size */ void resize(int width, int height); /** * @return The decoration areas which need to be rendered, in top to bottom * order. */ std::vector<nonstd::observer_ptr<decoration_area_t>> get_renderable_areas(); /** @return The combined region of all layout areas */ wf::region_t calculate_region() const; struct action_response_t { decoration_layout_action_t action; /* For resizing action, determine the edges for resize request */ uint32_t edges; }; /** Handle motion event to (x, y) relative to the decoration */ action_response_t handle_motion(int x, int y); /** * Handle press or release event. * @param pressed Whether the event is a press(true) or release(false) * event. * @return The action which needs to be carried out in response to this * event. */ action_response_t handle_press_event(bool pressed = true); /** * Handle focus lost event. */ void handle_focus_lost(); private: const int titlebar_size; const int border_size; const int button_width; const int button_height; const int button_padding; const decoration_theme_t& theme; std::function<void(wlr_box)> damage_callback; std::vector<std::unique_ptr<decoration_area_t>> layout_areas; bool is_grabbed = false; /* Position where the grab has started */ wf::point_t grab_origin; /* Last position of the input */ wf::point_t current_input; /* double-click timer */ wf::wl_timer timer; bool double_click_at_release = false; /** Create buttons in the layout, and return their total geometry */ wf::geometry_t create_buttons(int width, int height); /** Calculate resize edges based on @current_input */ uint32_t calculate_resize_edges() const; /** Update the cursor based on @current_input */ void update_cursor() const; /** * Find the layout area at the given coordinates, if any * @return The layout area or null on failure */ nonstd::observer_ptr<decoration_area_t> find_area_at(wf::point_t point); /** Unset hover state of hovered button at @position, if any */ void unset_hover(wf::point_t position); wf::option_wrapper_t<std::string> button_order{"decoration/button_order"}; }; } }
2,078
1,615
<reponame>jackwiy/MLN<gh_stars>1000+ /** * Created by MomoLuaNative. * Copyright (c) 2019, Momo Group. All rights reserved. * * This source code is licensed under the MIT. * For the full copyright and license information,please view the LICENSE file in the root directory of this source tree. */ package com.immomo.mls.utils; import android.os.Handler; import android.os.Looper; import android.os.Message; /** * Created by XiongFangyu on 2018/6/26. */ public class MainThreadExecutor { private static volatile Handler handler; public static boolean isMainThread() { return Looper.myLooper() == Looper.getMainLooper(); } public static boolean isMainThread(Thread t) { return Looper.getMainLooper().getThread() == t; } /** * 使用全局Main Thread handler来post一直Runnable * * @param runnable */ public static void post(Runnable runnable) { if (runnable == null) { throw new IllegalArgumentException("runnable is null"); } getHandler().post(runnable); } public static void post(Object tag, Runnable runnable) { if (tag instanceof Number || tag instanceof CharSequence) { tag = tag.toString().intern(); } Message message = Message.obtain(getHandler(), runnable); message.obj = tag; getHandler().sendMessage(message); } public static void postAtFrontOfQueue(Runnable runnable) { if (runnable == null) { throw new IllegalArgumentException("runnable is null"); } getHandler().postAtFrontOfQueue(runnable); } public static void postDelayed(Object tag, Runnable runnable, long delayMill) { if (tag == null) { throw new IllegalArgumentException("tag is null"); } if (runnable == null) { throw new IllegalArgumentException("runnable is null"); } if (delayMill <= 0) { throw new IllegalArgumentException("delayMill <= 0"); } if (tag instanceof Number || tag instanceof CharSequence) { tag = tag.toString().intern(); } Message message = Message.obtain(getHandler(), runnable); message.obj = tag; getHandler().sendMessageDelayed(message, delayMill); } public static void cancelSpecificRunnable(Object tag, Runnable runnable) { if (tag == null) { throw new IllegalArgumentException("tag is null"); } if (runnable == null) { throw new IllegalArgumentException("runnable is null"); } if (tag instanceof Number || tag instanceof CharSequence) { tag = tag.toString().intern(); } getHandler().removeCallbacks(runnable, tag); } public static void cancelAllRunnable(Object tag) { if (tag == null) { throw new IllegalArgumentException("tag is null"); } if (tag instanceof Number || tag instanceof CharSequence) { tag = tag.toString().intern(); } getHandler().removeCallbacksAndMessages(tag); } private static Handler getHandler() { if (handler == null) { synchronized (MainThreadExecutor.class) { if (handler == null) { handler = new Handler(Looper.getMainLooper()); } } } return handler; } }
1,422
348
{"nom":"Thonon-les-Bains","circ":"5ème circonscription","dpt":"Haute-Savoie","inscrits":22789,"abs":14452,"votants":8337,"blancs":620,"nuls":219,"exp":7498,"res":[{"nuance":"REM","nom":"Mme <NAME>","voix":4341},{"nuance":"DVD","nom":"Mme <NAME>","voix":3157}]}
107
3,274
<gh_stars>1000+ package com.ql.util.express.instruction.detail; import java.util.List; import com.ql.util.express.InstructionSet; import com.ql.util.express.InstructionSetContext; import com.ql.util.express.InstructionSetRunner; import com.ql.util.express.OperateData; import com.ql.util.express.RunEnvironment; import com.ql.util.express.instruction.OperateDataCacheManager; public class InstructionCallMacro extends Instruction{ private static final long serialVersionUID = -5760553701305043649L; String name; public InstructionCallMacro(String aName){ this.name = aName; } public void execute(RunEnvironment environment,List<String> errorList)throws Exception{ if(environment.isTrace()&&log.isDebugEnabled()){ log.debug(this); } InstructionSetContext context = environment.getContext(); Object functionSet = context.getSymbol(this.name); Object result =InstructionSetRunner.execute( context.getExpressRunner(), (InstructionSet)functionSet, context.getExpressLoader(), context, errorList, environment.isTrace(), false,false,this.log, environment.getContext().isSupportDynamicFieldName()); if(result instanceof OperateData){ environment.push((OperateData)result); }else{ environment.push(OperateDataCacheManager.fetchOperateData(result,null)); } environment.programPointAddOne(); } public String toString(){ return "call macro " + this.name ; } }
529
30,023
"""Config flow to configure the GeoNet NZ Volcano integration.""" import voluptuous as vol from homeassistant import config_entries from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_SCAN_INTERVAL, CONF_UNIT_SYSTEM, CONF_UNIT_SYSTEM_IMPERIAL, CONF_UNIT_SYSTEM_METRIC, ) from homeassistant.core import callback from homeassistant.helpers import config_validation as cv from .const import DEFAULT_RADIUS, DEFAULT_SCAN_INTERVAL, DOMAIN @callback def configured_instances(hass): """Return a set of configured GeoNet NZ Volcano instances.""" return { f"{entry.data[CONF_LATITUDE]}, {entry.data[CONF_LONGITUDE]}" for entry in hass.config_entries.async_entries(DOMAIN) } class GeonetnzVolcanoFlowHandler(config_entries.ConfigFlow, domain=DOMAIN): """Handle a GeoNet NZ Volcano config flow.""" async def _show_form(self, errors=None): """Show the form to the user.""" data_schema = vol.Schema( {vol.Optional(CONF_RADIUS, default=DEFAULT_RADIUS): cv.positive_int} ) return self.async_show_form( step_id="user", data_schema=data_schema, errors=errors or {} ) async def async_step_import(self, import_config): """Import a config entry from configuration.yaml.""" return await self.async_step_user(import_config) async def async_step_user(self, user_input=None): """Handle the start of the config flow.""" if not user_input: return await self._show_form() latitude = user_input.get(CONF_LATITUDE, self.hass.config.latitude) user_input[CONF_LATITUDE] = latitude longitude = user_input.get(CONF_LONGITUDE, self.hass.config.longitude) user_input[CONF_LONGITUDE] = longitude identifier = f"{user_input[CONF_LATITUDE]}, {user_input[CONF_LONGITUDE]}" if identifier in configured_instances(self.hass): return await self._show_form({"base": "already_configured"}) if self.hass.config.units.name == CONF_UNIT_SYSTEM_IMPERIAL: user_input[CONF_UNIT_SYSTEM] = CONF_UNIT_SYSTEM_IMPERIAL else: user_input[CONF_UNIT_SYSTEM] = CONF_UNIT_SYSTEM_METRIC scan_interval = user_input.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL) user_input[CONF_SCAN_INTERVAL] = scan_interval.total_seconds() return self.async_create_entry(title=identifier, data=user_input)
1,062
308
<filename>Examples/Classes/TreeViewScene.h #ifndef __TREEVIEW_SCENE_H__ #define __TREEVIEW_SCENE_H__ #include "cocos2d.h" #include "DemoScene.h" USING_NS_FGUI; class TreeViewScene : public DemoScene { public: TreeViewScene(); ~TreeViewScene(); // implement the "static create()" method manually CREATE_FUNC(TreeViewScene); protected: virtual void continueInit() override; private: void onClickNode(EventContext* context); void renderTreeNode(GTreeNode* node, GComponent* obj); GComponent* _view; GTree* _tree1; GTree* _tree2; }; #endif
258
347
// // Created by machiry on 5/1/17. // #include <iostream> #include <llvm/Support/Debug.h> #include "llvm/IR/DebugInfo.h" #include "FileUtils.h" using namespace llvm; namespace IOCTL_CHECKER { std::string FileUtils::getNewRelativePath(std::string &srcBaseDir, std::string &srcFilePath, std::string &bitCodeDir, std::string &suffix) { std::string relativePath; bool is_handled = false; // if src file path starts with srcBaseDir if(!srcFilePath.compare(0, srcBaseDir.size(), srcBaseDir)) { relativePath = srcFilePath.substr(srcBaseDir.size()); is_handled = true; } else { // if src file does not start, if may start with . or .. if(!srcFilePath.compare(0, 3, "../")) { relativePath = srcFilePath.substr(3); is_handled = true; } else { if(!srcFilePath.compare(0, 2, "./")) { relativePath = srcFilePath.substr(2); is_handled = true; } else { // this is when the path is relative to the source directory if(bitCodeDir.back() != '/') { relativePath = "/"; relativePath.append(srcFilePath); ; } else { relativePath = srcFilePath; } is_handled = true; } } } std::string to_ret = ""; if(is_handled) { to_ret = bitCodeDir; // handle missing path separator if(*(bitCodeDir.end()) != '/' && *(relativePath.begin()) != '/') { to_ret.append("/"); } to_ret.append(relativePath.substr(0, relativePath.length() - 2)); to_ret.append(suffix); } if(!is_handled) { if(srcFilePath.compare(0, 4,"incl")) { dbgs() << "Unable to handle file path:" << srcFilePath << "\n"; } } return to_ret; } }
1,149
2,342
// Copyright (c) 2014 <NAME>. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the name Chromium Embedded // Framework nor the names of its contributors may be used to endorse // or promote products derived from this software without specific prior // written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // --------------------------------------------------------------------------- // // The contents of this file are only available to applications that link // against the libcef_dll_wrapper target. // #ifndef CEF_INCLUDE_WRAPPER_CEF_MESSAGE_ROUTER_H_ #define CEF_INCLUDE_WRAPPER_CEF_MESSAGE_ROUTER_H_ #pragma once #include "include/base/cef_ref_counted.h" #include "include/cef_base.h" #include "include/cef_browser.h" #include "include/cef_process_message.h" #include "include/cef_v8.h" // The below classes implement support for routing aynchronous messages between // JavaScript running in the renderer process and C++ running in the browser // process. An application interacts with the router by passing it data from // standard CEF C++ callbacks (OnBeforeBrowse, OnProcessMessageRecieved, // OnContextCreated, etc). The renderer-side router supports generic JavaScript // callback registration and execution while the browser-side router supports // application-specific logic via one or more application-provided Handler // instances. // // The renderer-side router implementation exposes a query function and a cancel // function via the JavaScript 'window' object: // // // Create and send a new query. // var request_id = window.cefQuery({ // request: 'my_request', // persistent: false, // onSuccess: function(response) {}, // onFailure: function(error_code, error_message) {} // }); // // // Optionally cancel the query. // window.cefQueryCancel(request_id); // // When |window.cefQuery| is executed the request is sent asynchronously to one // or more C++ Handler objects registered in the browser process. Each C++ // Handler can choose to either handle or ignore the query in the // Handler::OnQuery callback. If a Handler chooses to handle the query then it // should execute Callback::Success when a response is available or // Callback::Failure if an error occurs. This will result in asynchronous // execution of the associated JavaScript callback in the renderer process. Any // queries unhandled by C++ code in the browser process will be automatically // canceled and the associated JavaScript onFailure callback will be executed // with an error code of -1. // // Queries can be either persistent or non-persistent. If the query is // persistent than the callbacks will remain registered until one of the // following conditions are met: // // A. The query is canceled in JavaScript using the |window.cefQueryCancel| // function. // B. The query is canceled in C++ code using the Callback::Failure function. // C. The context associated with the query is released due to browser // destruction, navigation or renderer process termination. // // If the query is non-persistent then the registration will be removed after // the JavaScript callback is executed a single time. If a query is canceled for // a reason other than Callback::Failure being executed then the associated // Handler's OnQueryCanceled method will be called. // // Some possible usage patterns include: // // One-time Request. Use a non-persistent query to send a JavaScript request. // The Handler evaluates the request and returns the response. The query is // then discarded. // // Broadcast. Use a persistent query to register as a JavaScript broadcast // receiver. The Handler keeps track of all registered Callbacks and executes // them sequentially to deliver the broadcast message. // // Subscription. Use a persistent query to register as a JavaScript subscription // receiver. The Handler initiates the subscription feed on the first request // and delivers responses to all registered subscribers as they become // available. The Handler cancels the subscription feed when there are no // longer any registered JavaScript receivers. // // Message routing occurs on a per-browser and per-context basis. Consequently, // additional application logic can be applied by restricting which browser or // context instances are passed into the router. If you choose to use this // approach do so cautiously. In order for the router to function correctly any // browser or context instance passed into a single router callback must then // be passed into all router callbacks. // // There is generally no need to have multiple renderer-side routers unless you // wish to have multiple bindings with different JavaScript function names. It // can be useful to have multiple browser-side routers with different client- // provided Handler instances when implementing different behaviors on a per- // browser basis. // // This implementation places no formatting restrictions on payload content. // An application may choose to exchange anything from simple formatted // strings to serialized XML or JSON data. // // // EXAMPLE USAGE // // 1. Define the router configuration. You can optionally specify settings // like the JavaScript function names. The configuration must be the same in // both the browser and renderer processes. If using multiple routers in the // same application make sure to specify unique function names for each // router configuration. // // // Example config object showing the default values. // CefMessageRouterConfig config; // config.js_query_function = "cefQuery"; // config.js_cancel_function = "cefQueryCancel"; // // 2. Create an instance of CefMessageRouterBrowserSide in the browser process. // You might choose to make it a member of your CefClient implementation, // for example. // // browser_side_router_ = CefMessageRouterBrowserSide::Create(config); // // 3. Register one or more Handlers. The Handler instances must either outlive // the router or be removed from the router before they're deleted. // // browser_side_router_->AddHandler(my_handler); // // 4. Call all required CefMessageRouterBrowserSide methods from other callbacks // in your CefClient implementation (OnBeforeClose, etc). See the // CefMessageRouterBrowserSide class documentation for the complete list of // methods. // // 5. Create an instance of CefMessageRouterRendererSide in the renderer process. // You might choose to make it a member of your CefApp implementation, for // example. // // renderer_side_router_ = CefMessageRouterRendererSide::Create(config); // // 6. Call all required CefMessageRouterRendererSide methods from other // callbacks in your CefRenderProcessHandler implementation // (OnContextCreated, etc). See the CefMessageRouterRendererSide class // documentation for the complete list of methods. // // 7. Execute the query function from JavaScript code. // // window.cefQuery({request: 'my_request', // persistent: false, // onSuccess: function(response) { print(response); }, // onFailure: function(error_code, error_message) {} }); // // 8. Handle the query in your Handler::OnQuery implementation and execute the // appropriate callback either immediately or asynchronously. // // void MyHandler::OnQuery(int64 query_id, // CefRefPtr<CefBrowser> browser, // CefRefPtr<CefFrame> frame, // const CefString& request, // bool persistent, // CefRefPtr<Callback> callback) { // if (request == "my_request") { // callback->Continue("my_response"); // return true; // } // return false; // Not handled. // } // // 9. Notice that the onSuccess callback is executed in JavaScript. /// // Used to configure the query router. The same values must be passed to both // CefMessageRouterBrowserSide and CefMessageRouterRendererSide. If using multiple // router pairs make sure to choose values that do not conflict. /// struct CefMessageRouterConfig { CefMessageRouterConfig(); // Name of the JavaScript function that will be added to the 'window' object // for sending a query. The default value is "cefQuery". CefString js_query_function; // Name of the JavaScript function that will be added to the 'window' object // for canceling a pending query. The default value is "cefQueryCancel". CefString js_cancel_function; }; /// // Implements the browser side of query routing. The methods of this class may // be called on any browser process thread unless otherwise indicated. /// class CefMessageRouterBrowserSide : public base::RefCountedThreadSafe<CefMessageRouterBrowserSide> { public: /// // Callback associated with a single pending asynchronous query. Execute the // Success or Failure method to send an asynchronous response to the // associated JavaScript handler. It is a runtime error to destroy a Callback // object associated with an uncanceled query without first executing one of // the callback methods. The methods of this class may be called on any // browser process thread. /// class Callback : public CefBaseRefCounted { public: /// // Notify the associated JavaScript onSuccess callback that the query has // completed successfully with the specified |response|. /// virtual void Success(const CefString& response) =0; /// // Notify the associated JavaScript onFailure callback that the query has // failed with the specified |error_code| and |error_message|. /// virtual void Failure(int error_code, const CefString& error_message) =0; }; /// // Implement this interface to handle queries. All methods will be executed on // the browser process UI thread. /// class Handler { public: typedef CefMessageRouterBrowserSide::Callback Callback; /// // Executed when a new query is received. |query_id| uniquely identifies the // query for the life span of the router. Return true to handle the query // or false to propagate the query to other registered handlers, if any. If // no handlers return true from this method then the query will be // automatically canceled with an error code of -1 delivered to the // JavaScript onFailure callback. If this method returns true then a // Callback method must be executed either in this method or asynchronously // to complete the query. /// virtual bool OnQuery(CefRefPtr<CefBrowser> browser, CefRefPtr<CefFrame> frame, int64 query_id, const CefString& request, bool persistent, CefRefPtr<Callback> callback) { return false; } /// // Executed when a query has been canceled either explicitly using the // JavaScript cancel function or implicitly due to browser destruction, // navigation or renderer process termination. It will only be called for // the single handler that returned true from OnQuery for the same // |query_id|. No references to the associated Callback object should be // kept after this method is called, nor should any Callback methods be // executed. /// virtual void OnQueryCanceled(CefRefPtr<CefBrowser> browser, CefRefPtr<CefFrame> frame, int64 query_id) {} virtual ~Handler() {} }; /// // Create a new router with the specified configuration. /// static CefRefPtr<CefMessageRouterBrowserSide> Create( const CefMessageRouterConfig& config); /// // Add a new query handler. If |first| is true it will be added as the first // handler, otherwise it will be added as the last handler. Returns true if // the handler is added successfully or false if the handler has already been // added. Must be called on the browser process UI thread. The Handler object // must either outlive the router or be removed before deletion. /// virtual bool AddHandler(Handler* handler, bool first) =0; /// // Remove an existing query handler. Any pending queries associated with the // handler will be canceled. Handler::OnQueryCanceled will be called and the // associated JavaScript onFailure callback will be executed with an error // code of -1. Returns true if the handler is removed successfully or false // if the handler is not found. Must be called on the browser process UI // thread. /// virtual bool RemoveHandler(Handler* handler) =0; /// // Cancel all pending queries associated with either |browser| or |handler|. // If both |browser| and |handler| are NULL all pending queries will be // canceled. Handler::OnQueryCanceled will be called and the associated // JavaScript onFailure callback will be executed in all cases with an error // code of -1. /// virtual void CancelPending(CefRefPtr<CefBrowser> browser, Handler* handler) =0; /// // Returns the number of queries currently pending for the specified |browser| // and/or |handler|. Either or both values may be empty. Must be called on the // browser process UI thread. /// virtual int GetPendingCount(CefRefPtr<CefBrowser> browser, Handler* handler) =0; // The below methods should be called from other CEF handlers. They must be // called exactly as documented for the router to function correctly. /// // Call from CefLifeSpanHandler::OnBeforeClose. Any pending queries associated // with |browser| will be canceled and Handler::OnQueryCanceled will be called. // No JavaScript callbacks will be executed since this indicates destruction // of the browser. /// virtual void OnBeforeClose(CefRefPtr<CefBrowser> browser) =0; /// // Call from CefRequestHandler::OnRenderProcessTerminated. Any pending queries // associated with |browser| will be canceled and Handler::OnQueryCanceled // will be called. No JavaScript callbacks will be executed since this // indicates destruction of the context. /// virtual void OnRenderProcessTerminated(CefRefPtr<CefBrowser> browser) =0; /// // Call from CefRequestHandler::OnBeforeBrowse only if the navigation is // allowed to proceed. If |frame| is the main frame then any pending queries // associated with |browser| will be canceled and Handler::OnQueryCanceled // will be called. No JavaScript callbacks will be executed since this // indicates destruction of the context. /// virtual void OnBeforeBrowse(CefRefPtr<CefBrowser> browser, CefRefPtr<CefFrame> frame) =0; /// // Call from CefClient::OnProcessMessageReceived. Returns true if the message // is handled by this router or false otherwise. /// virtual bool OnProcessMessageReceived( CefRefPtr<CefBrowser> browser, CefProcessId source_process, CefRefPtr<CefProcessMessage> message) =0; protected: // Protect against accidental deletion of this object. friend class base::RefCountedThreadSafe<CefMessageRouterBrowserSide>; virtual ~CefMessageRouterBrowserSide() {} }; /// // Implements the renderer side of query routing. The methods of this class must // be called on the render process main thread. /// class CefMessageRouterRendererSide : public base::RefCountedThreadSafe<CefMessageRouterRendererSide> { public: /// // Create a new router with the specified configuration. /// static CefRefPtr<CefMessageRouterRendererSide> Create( const CefMessageRouterConfig& config); /// // Returns the number of queries currently pending for the specified |browser| // and/or |context|. Either or both values may be empty. /// virtual int GetPendingCount(CefRefPtr<CefBrowser> browser, CefRefPtr<CefV8Context> context) =0; // The below methods should be called from other CEF handlers. They must be // called exactly as documented for the router to function correctly. /// // Call from CefRenderProcessHandler::OnContextCreated. Registers the // JavaScripts functions with the new context. /// virtual void OnContextCreated(CefRefPtr<CefBrowser> browser, CefRefPtr<CefFrame> frame, CefRefPtr<CefV8Context> context) =0; /// // Call from CefRenderProcessHandler::OnContextReleased. Any pending queries // associated with the released context will be canceled and // Handler::OnQueryCanceled will be called in the browser process. /// virtual void OnContextReleased(CefRefPtr<CefBrowser> browser, CefRefPtr<CefFrame> frame, CefRefPtr<CefV8Context> context) =0; /// // Call from CefRenderProcessHandler::OnProcessMessageReceived. Returns true // if the message is handled by this router or false otherwise. /// virtual bool OnProcessMessageReceived( CefRefPtr<CefBrowser> browser, CefProcessId source_process, CefRefPtr<CefProcessMessage> message) =0; protected: // Protect against accidental deletion of this object. friend class base::RefCountedThreadSafe<CefMessageRouterRendererSide>; virtual ~CefMessageRouterRendererSide() {} }; #endif // CEF_INCLUDE_WRAPPER_CEF_MESSAGE_ROUTER_H_
5,548
839
<filename>rt/features/throttling/src/main/java/org/apache/cxf/throttling/ThrottlingInterceptor.java /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.cxf.throttling; import java.util.logging.Logger; import org.apache.cxf.common.logging.LogUtils; import org.apache.cxf.continuations.Continuation; import org.apache.cxf.continuations.ContinuationProvider; import org.apache.cxf.endpoint.Endpoint; import org.apache.cxf.interceptor.Fault; import org.apache.cxf.interceptor.OutgoingChainInterceptor; import org.apache.cxf.message.Message; import org.apache.cxf.phase.AbstractPhaseInterceptor; /** * */ public class ThrottlingInterceptor extends AbstractPhaseInterceptor<Message> { private static final Logger LOG = LogUtils.getL7dLogger(ThrottlingInterceptor.class); final ThrottlingManager manager; public ThrottlingInterceptor(String phase, ThrottlingManager manager) { super(ThrottlingInterceptor.class.getName() + "-" + phase, phase); this.manager = manager; } @Override public void handleMessage(Message message) throws Fault { ThrottleResponse rsp = manager.getThrottleResponse(getPhase(), message); if (rsp == null) { return; } message.getExchange().put(ThrottleResponse.class, rsp); if (rsp.getResponseCode() >= 300) { createOutMessage(message); message.getInterceptorChain().doInterceptStartingAt(message, OutgoingChainInterceptor.class.getName()); return; } long l = rsp.getDelay(); if (l > 0) { ContinuationProvider cp = message.get(ContinuationProvider.class); if (cp == null) { LOG.warning("No ContinuationProvider available, sleeping on current thread"); try { Thread.sleep(l); } catch (InterruptedException e) { //ignore } return; } Continuation c = cp.getContinuation(); c.suspend(l); } } private Message createOutMessage(Message inMessage) { Endpoint e = inMessage.getExchange().getEndpoint(); Message mout = e.getBinding().createMessage(); mout.setExchange(inMessage.getExchange()); mout.setInterceptorChain( OutgoingChainInterceptor.getOutInterceptorChain(inMessage.getExchange())); inMessage.getExchange().setOutMessage(mout); inMessage.getExchange().put("cxf.io.cacheinput", Boolean.FALSE); return mout; } }
1,299
848
<gh_stars>100-1000 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #define EIGEN_USE_THREADS #include <vector> #include "tensorflow/cc/client/client_session.h" #include "tensorflow/cc/ops/array_ops.h" #include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/image_ops.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/framework/shape_inference_testutil.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/graph/gradients.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { constexpr const float RESIZE_VAL_TOLERANCE = 1.0e-8; template <typename T> Tensor BuildTensor(const int batch_size, const int height, const int width, const int channels, const float ratio, const float min, const float max) { Tensor tensor(DataTypeToEnum<T>::value, TensorShape({batch_size, height, width, channels})); for (int64 i = 0; i < tensor.NumElements(); ++i) { tensor.flat<T>()(i) = FloatToQuantized<T>(static_cast<float>(i) / ratio, min, max); } return tensor; } template <> Tensor BuildTensor<float>(const int batch_size, const int height, const int width, const int channels, const float ratio, const float min, const float max) { Tensor tensor(DT_FLOAT, TensorShape({batch_size, height, width, channels})); for (int64 i = 0; i < tensor.NumElements(); ++i) { tensor.flat<float>()(i) = static_cast<float>(i) / ratio; } return tensor; } float CalculateResizeScale(int64 in_size, int64 out_size, bool align_corners) { return (align_corners && out_size > 1) ? (in_size - 1) / static_cast<float>(out_size - 1) : in_size / static_cast<float>(out_size); } inline std::tuple<int64, int64, float> GetReferenceWeight( const bool half_pixel_centers, const int64 out_size, const int64 in_size, const int step, const int index, const float scale) { const float in = half_pixel_centers ? (static_cast<float>(index) + 0.5f) * scale - 0.5f : index * scale; const float in_f = std::floor(in); const int64 lower = std::max(static_cast<int64>(in_f), static_cast<int64>(0)); const int64 upper = std::min(static_cast<int64>(std::ceil(in)), in_size - 1); return std::make_tuple(lower * step, upper * step, in - in_f); } template <typename T> T ComputeLerpReference(const T in_top_left, const T in_top_right, const T in_bottom_left, const T in_bottom_right, const float x_lerp, const float y_lerp, const float min, const float max) { const float top_left = QuantizedToFloat<T>(in_top_left, min, max); const float top_right = QuantizedToFloat<T>(in_top_right, min, max); const float bottom_left = QuantizedToFloat<T>(in_bottom_left, min, max); const float bottom_right = QuantizedToFloat<T>(in_bottom_right, min, max); const float top = top_left + (top_right - top_left) * x_lerp; const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp; const float out = top + (bottom - top) * y_lerp; return FloatToQuantized<T>(out, min, max); } template <> float ComputeLerpReference<float>(const float in_top_left, const float in_top_right, const float in_bottom_left, const float in_bottom_right, const float x_lerp, const float y_lerp, const float min, const float max) { const float top = in_top_left + (in_top_right - in_top_left) * x_lerp; const float bottom = in_bottom_left + (in_bottom_right - in_bottom_left) * x_lerp; return top + (bottom - top) * y_lerp; } template <typename T> T CalcReferenceResizedVal(const T* image_data, const bool half_pixel_centers, const int batch_size, const int64 in_height, const int64 in_width, const int64 out_height, const int64 out_width, const int channels, const float height_scale, const float width_scale, const float min, const float max, const int b, const int64 x, const int64 y, const int c) { const std::tuple<int64, int64, float> x_weight = GetReferenceWeight( half_pixel_centers, out_width, in_width, channels, x, width_scale); const std::tuple<int64, int64, float> y_weight = GetReferenceWeight( half_pixel_centers, out_height, in_height, 1, y, height_scale); const int64 in_row_size = in_width * channels; const int64 in_batch_num_values = in_height * in_row_size; const int y_lower_index = b * in_batch_num_values + std::get<0>(y_weight) * in_row_size; const int y_upper_index = b * in_batch_num_values + std::get<1>(y_weight) * in_row_size; const int64 xs_lower = std::get<0>(x_weight); const int64 xs_upper = std::get<1>(x_weight); const float xs_lerp = std::get<2>(x_weight); const float ys_lerp = std::get<2>(y_weight); const float top_left = image_data[y_lower_index + xs_lower + c]; const float top_right = image_data[y_lower_index + xs_upper + c]; const float bottom_left = image_data[y_upper_index + xs_lower + c]; const float bottom_right = image_data[y_upper_index + xs_upper + c]; const float val = ComputeLerpReference<T>(top_left, top_right, bottom_left, bottom_right, xs_lerp, ys_lerp, min, max); return val; } template <typename T> void CheckTensorValue(const T* in_data, const T* out_data, const int batch_size, const int64 in_height, const int64 in_width, const int64 out_height, const int64 out_width, const int channels, const bool align_corners, const bool half_pixel_centers, const float min, const float max, const float tolerance, const bool relative) { const int64 out_row_size = out_width * channels; const float height_scale = CalculateResizeScale(in_height, out_height, align_corners); const float width_scale = CalculateResizeScale(in_width, out_width, align_corners); for (int b = 0; b < batch_size; ++b) { for (int64 y = 0; y < out_height; ++y) { for (int64 x = 0; x < out_width; ++x) { for (int c = 0; c < channels; ++c) { const T ref_qval = CalcReferenceResizedVal<T>( in_data, half_pixel_centers, batch_size, in_height, in_width, out_height, out_width, channels, height_scale, width_scale, min, max, b, x, y, c); const T qval = out_data[(b * out_height + y) * out_row_size + x * channels + c]; const float ref_val = QuantizedToFloat<T>(ref_qval, min, max); const float val = QuantizedToFloat<T>(qval, min, max); if (!relative) { const int q_tolerance = std::round(tolerance); EXPECT_TRUE(std::abs(static_cast<int32>(ref_qval) - static_cast<int32>(qval)) <= q_tolerance) << "ref = " << ref_val << ", val = " << val << ", " << b << ", " << y << ", " << x << ", " << c << ", qval = " << qval << ", ref qval = " << ref_qval << ", " << q_tolerance; } else { const float rel_tolerance = std::max(ref_val, 1.0f) * tolerance; EXPECT_NEAR(ref_val, val, rel_tolerance) << "ref = " << ref_val << ", val = " << val << ", " << b << ", " << y << ", " << x << ", " << c << ", ref qval = " << qval; } } } } } } void TestResizeBilinear(const Tensor& image_tensor, const DataType dt, const Input::Initializer& new_size, const bool show_time, const int64 iterations, const float min, const float max, const bool half_pixel_centers, std::vector<Tensor>* outputs) { Scope root = Scope::NewRootScope(); Output placeholder = ops::Placeholder(root.WithOpName("placeholder"), dt); Output size = ops::Const<int32>(root.WithOpName("size"), new_size); Output in_min = ops::Const<float>(root.WithOpName("min"), min); Output in_max = ops::Const<float>(root.WithOpName("max"), max); ops::QuantizedResizeBilinear qrb = ops::QuantizedResizeBilinear( root.WithOpName("qrb"), placeholder, size, in_min, in_max, ops::QuantizedResizeBilinear::HalfPixelCenters(half_pixel_centers)); TF_EXPECT_OK(root.status()); ClientSession session(root); int64 total_duration = 0; outputs->clear(); for (int i = 0; i < iterations; ++i) { const int64 start_time = Env::Default()->NowMicros(); TF_EXPECT_OK(session.Run({{placeholder, image_tensor}}, {qrb.resized_images, qrb.out_min, qrb.out_max}, outputs)); const int64 end_time = Env::Default()->NowMicros(); total_duration += end_time - start_time; } const int64 one_run_duration = total_duration / iterations; const int64 num_ops = outputs->at(0).NumElements(); const double million_ops_per_second = (iterations * num_ops) / static_cast<double>(total_duration); if (show_time) { LOG(INFO) << "Time resize bilinear: " << TensorShape(image_tensor.shape()).DebugString() << ": iterations=" << iterations << ", MOps/s=" << million_ops_per_second << ", one_run_duration=" << one_run_duration << ", total_duration=" << total_duration; } } } // namespace void TestResizeBilinearOneDim() { constexpr float TOLERANCE = 1.0e-5; constexpr int IN_WIDTH = 128; constexpr int OUT_WIDTH = 256; constexpr float MIN = 0.0f; constexpr float MAX = 256.0f; constexpr float SCALE = static_cast<float>(IN_WIDTH) / OUT_WIDTH; Tensor image_quantized_tensor(DT_QINT32, TensorShape({1, 1, IN_WIDTH, 1})); for (int64 i = 0; i < image_quantized_tensor.NumElements(); ++i) { image_quantized_tensor.flat<qint32>()(i) = FloatToQuantized<qint32>(static_cast<float>(i), MIN, MAX); } std::vector<Tensor> outputs; TestResizeBilinear(image_quantized_tensor, DT_QINT32, {1, OUT_WIDTH}, false, 1, MIN, MAX, false, &outputs); ASSERT_EQ(3, outputs.size()); ASSERT_EQ(OUT_WIDTH, outputs.at(0).NumElements()); ASSERT_EQ(4, outputs.at(0).shape().dims()); ASSERT_EQ(OUT_WIDTH, outputs.at(0).shape().dim_size(2)); // Manual value testing for (int64 i = 0; i < outputs.at(0).NumElements(); ++i) { const float resized_image_val = QuantizedToFloat<qint32>(outputs.at(0).flat<qint32>()(i), MIN, MAX); float expected_val = 0.0f; if (i == 0 || i == outputs.at(0).NumElements() - 1 || i % 2 == 0) { expected_val = QuantizedToFloat<qint32>( image_quantized_tensor.flat<qint32>()(i / 2), MIN, MAX); } else { const float image_val0 = QuantizedToFloat<qint32>( image_quantized_tensor.flat<qint32>()(i / 2), MIN, MAX); const float image_val1 = QuantizedToFloat<qint32>( image_quantized_tensor.flat<qint32>()(i / 2 + 1), MIN, MAX); expected_val = (image_val0 + image_val1) * SCALE; } VLOG(1) << "(" << i << ") " << expected_val << ", " << resized_image_val; EXPECT_NEAR(expected_val, resized_image_val, RESIZE_VAL_TOLERANCE) << expected_val << ", " << resized_image_val; } // Value testing with reference implementation CheckTensorValue<qint32>(image_quantized_tensor.flat<qint32>().data(), outputs.at(0).flat<qint32>().data(), /*batch_size=*/1, /*in_height=*/IN_WIDTH, /*in_width=*/1, /*out_height=*/OUT_WIDTH, /*out_width=*/1, /*channels=*/1, /*align_corners=*/false, /*half_pixel_centers=*/false, MIN, MAX, TOLERANCE, true); } template <typename T> void RunTestResizeBilinearTwoDims(int batch_size, int in_height, int in_width, int out_height, int out_width, int channels, float tolerance, bool relative, const bool half_pixel_centers) { constexpr float RATIO = 100.0f; const float min = 0.0f; const float max = batch_size * in_height * in_width * channels / RATIO; const Tensor image_quantized_tensor = BuildTensor<T>( batch_size, in_height, in_width, channels, RATIO, min, max); std::vector<Tensor> outputs; TestResizeBilinear(image_quantized_tensor, DataTypeToEnum<T>::value, {out_height, out_width}, false, 1, min, max, half_pixel_centers, &outputs); CheckTensorValue<T>( image_quantized_tensor.flat<T>().data(), outputs.at(0).flat<T>().data(), batch_size, in_height, in_width, out_height, out_width, channels, /*align_corners=*/false, /*half_pixel_centers=*/half_pixel_centers, min, max, tolerance, relative); } template <typename T> void RunBenchmarkResizeBilinearTwoDims(int batch_size, int in_height, int in_width, int out_height, int out_width, int channels, int iteration, const bool half_pixel_centers) { constexpr float RATIO = 100.0f; const float min = 0.0f; const float max = batch_size * in_height * in_width * channels / RATIO; const Tensor image_quantized_tensor = BuildTensor<T>( batch_size, in_height, in_width, channels, RATIO, min, max); std::vector<Tensor> outputs; TestResizeBilinear(image_quantized_tensor, DataTypeToEnum<T>::value, {out_height, out_width}, true, iteration, min, max, false, &outputs); } template <typename T> void TestResizeBilinearTwoDimsType(const float tolerance, const bool relative, const bool half_pixel_centers) { RunTestResizeBilinearTwoDims<T>(1, 1, 1, 1, 1, 1, tolerance, relative, half_pixel_centers); RunTestResizeBilinearTwoDims<T>(1, 1, 128, 1, 256, 1, tolerance, relative, half_pixel_centers); RunTestResizeBilinearTwoDims<T>(1, 128, 1, 256, 1, 1, tolerance, relative, half_pixel_centers); RunTestResizeBilinearTwoDims<T>(1, 128, 128, 256, 256, 1, tolerance, relative, half_pixel_centers); RunTestResizeBilinearTwoDims<T>(1, 256, 256, 128, 128, 1, tolerance, relative, half_pixel_centers); RunTestResizeBilinearTwoDims<T>(1, 1, 128, 1, 256, 2, tolerance, relative, half_pixel_centers); RunTestResizeBilinearTwoDims<T>(1, 128, 1, 256, 1, 2, tolerance, relative, half_pixel_centers); RunTestResizeBilinearTwoDims<T>(1, 128, 128, 256, 256, 2, tolerance, relative, half_pixel_centers); RunTestResizeBilinearTwoDims<T>(1, 256, 256, 128, 128, 2, tolerance, relative, half_pixel_centers); RunTestResizeBilinearTwoDims<T>(1, 1, 16, 1, 32, 3, tolerance, relative, half_pixel_centers); RunTestResizeBilinearTwoDims<T>(1, 1, 128, 1, 256, 3, tolerance, relative, half_pixel_centers); RunTestResizeBilinearTwoDims<T>(1, 128, 128, 256, 256, 3, tolerance, relative, half_pixel_centers); RunTestResizeBilinearTwoDims<T>(1, 256, 256, 128, 128, 3, tolerance, relative, half_pixel_centers); } void TestResizeBilinearTwoDims() { for (const bool half_pixel_centers : {false, true}) { TestResizeBilinearTwoDimsType<quint8>(1.0f, false, half_pixel_centers); TestResizeBilinearTwoDimsType<qint32>(1.0e-5, true, half_pixel_centers); TestResizeBilinearTwoDimsType<float>(1.0e-5, true, half_pixel_centers); } } template <typename T> void RunBenchmarkResizeBilinearTwoDimsType() { constexpr int ITER = 100; RunBenchmarkResizeBilinearTwoDims<T>(1, 1, 1, 2, 2, 1, ITER, false); RunBenchmarkResizeBilinearTwoDims<T>(1, 128, 128, 256, 256, 1, ITER, false); RunBenchmarkResizeBilinearTwoDims<T>(1, 128, 128, 256, 256, 3, ITER, false); RunBenchmarkResizeBilinearTwoDims<T>(1, 64, 64, 128, 128, 2, ITER, false); RunBenchmarkResizeBilinearTwoDims<T>(1, 32, 32, 64, 64, 16, ITER, false); } void RunBenchmarkResizeBilinearTwoDims() { LOG(INFO) << "Benchmark quint8"; RunBenchmarkResizeBilinearTwoDimsType<quint8>(); LOG(INFO) << "Benchmark qint32"; RunBenchmarkResizeBilinearTwoDimsType<qint32>(); LOG(INFO) << "Benchmark float"; RunBenchmarkResizeBilinearTwoDimsType<float>(); } } // namespace tensorflow #define RUN_TEST(t) \ TEST(QuantizationResizeBilenarTest, t) { tensorflow::t(); } RUN_TEST(TestResizeBilinearOneDim); RUN_TEST(TestResizeBilinearTwoDims); #if defined(__ANDROID__) RUN_TEST(RunBenchmarkResizeBilinearTwoDims); #endif // __ANDROID__ int main(int argc, char** argv) { // On Linux, add: FLAGS_logtostderr = true; ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
8,374
541
<reponame>michelcareau/DSpace<gh_stars>100-1000 /** * The contents of this file are subject to the license and copyright * detailed in the LICENSE and NOTICE files at the root of the source * tree and available online at * * http://www.dspace.org/license/ */ package org.dspace.app.sherpa.submit; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import java.sql.SQLException; import java.util.List; import org.dspace.AbstractUnitTest; import org.dspace.app.sherpa.v2.SHERPAResponse; import org.dspace.authorize.AuthorizeException; import org.dspace.content.Collection; import org.dspace.content.Community; import org.dspace.content.Item; import org.dspace.content.MetadataField; import org.dspace.content.MetadataSchemaEnum; import org.dspace.content.MetadataValue; import org.dspace.content.WorkspaceItem; import org.dspace.content.factory.ContentServiceFactory; import org.dspace.content.service.CollectionService; import org.dspace.content.service.CommunityService; import org.dspace.content.service.InstallItemService; import org.dspace.content.service.ItemService; import org.dspace.content.service.MetadataFieldService; import org.dspace.content.service.MetadataValueService; import org.dspace.content.service.WorkspaceItemService; import org.dspace.services.factory.DSpaceServicesFactory; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; /** * SHERPASubmitServiceTest creates a dummy item with an ISSN in its metadata, and makes sure * that the ISSN is detected and passed to SHERPAService for a mock query */ public class SHERPASubmitServiceTest extends AbstractUnitTest { // Set up services protected ItemService itemService = ContentServiceFactory.getInstance().getItemService(); protected WorkspaceItemService workspaceItemService = ContentServiceFactory.getInstance().getWorkspaceItemService(); protected InstallItemService installItemService = ContentServiceFactory.getInstance().getInstallItemService(); protected CommunityService communityService = ContentServiceFactory.getInstance().getCommunityService(); protected CollectionService collectionService = ContentServiceFactory.getInstance().getCollectionService(); protected MetadataFieldService metadataFieldService = ContentServiceFactory.getInstance().getMetadataFieldService(); protected MetadataValueService metadataValueService = ContentServiceFactory.getInstance().getMetadataValueService(); SHERPASubmitService sherpaSubmitService = DSpaceServicesFactory.getInstance().getServiceManager() .getServiceByName("org.dspace.app.sherpa.submit.SHERPASubmitService", SHERPASubmitService.class); Collection testCollection = null; Community testCommunity = null; @BeforeClass public static void setUpClass() { } @AfterClass public static void tearDownClass() { } @Before public void setUp() throws SQLException, AuthorizeException { context.turnOffAuthorisationSystem(); // Create primary Test community testCommunity = communityService.create(null, context); communityService .addMetadata(context, testCommunity, MetadataSchemaEnum.DC.getName(), "title", null, null, "Test Community"); communityService.update(context, testCommunity); // Create our primary Test Collection testCollection = collectionService.create(context, testCommunity); collectionService.addMetadata(context, testCollection, "dc", "title", null, null, "Test Collection"); collectionService.update(context, testCollection); } @After public void tearDown() { context.restoreAuthSystemState(); testCommunity = null; testCollection = null; } /** * Test the ISSN extraction */ @Test public void testGetISSNs() throws AuthorizeException, SQLException { String validISSN = "0140-6736"; // Create and install an item with an ISSN WorkspaceItem testWorkspaceItem = workspaceItemService.create(context, testCollection, false); Item testItem = installItemService.installItem(context, testWorkspaceItem); // Set up ISSN metadatavalue MetadataField issnField = metadataFieldService. findByString(context, "dc.identifier.issn", '.'); MetadataValue metadataValue = metadataValueService.create(context, testItem, issnField); metadataValue.setValue(validISSN); // Get responses from SHERPA submit service, which should inspect item ISSNs and perform search // on the mock SHERPA service List<SHERPAResponse> responses = sherpaSubmitService.searchRelatedJournals(context, testItem); // Make sure response is not null or empty assertTrue("Response list should not be null or empty", responses != null && !responses.isEmpty()); // For each response (there should be only one based on test data) perform the standard set // of thorough parsing tests for (SHERPAResponse response : responses) { // Assert response is not error, or fail with message assertFalse("Response was flagged as 'isError'", response.isError()); // Skip remainder of parsing tests - these are already done in SHERPAServiceTEst } } }
1,731
4,036
<reponame>vadi2/codeql package unreachableblocks; public class Unreachable { private boolean privateFalse = false; public void method() { if (false) { // unreachable } if (privateFalse) { // unreachable } if (methodFalse()) { // unreachable } switch (7) { case 5: // unreachable break; case 6: // unreachable System.out.println("dead"); // unreachable case 7: case 8: // reachable from 7 break; // reachable case 9: //unreachable break; case 10: // unreachable default: break; //unreachable } } private boolean methodFalse() { return privateFalse; } }
240
1,050
// Copyright 2019-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include <cuda_runtime_api.h> #include <rapidjson/document.h> #include <rapidjson/error/en.h> #include <unistd.h> #include <chrono> #include <future> #include <iostream> #include <string> #include <thread> #include <vector> #include "common.h" #include "triton/core/tritonserver.h" static_assert( TRITON_MIN_COMPUTE_CAPABILITY >= 1.0, "Invalid TRITON_MIN_COMPUTE_CAPABILITY specified"); namespace ni = triton::server; namespace { struct IOSpec { TRITONSERVER_MemoryType input_type_; int64_t input_type_id_; TRITONSERVER_MemoryType output_type_; int64_t output_type_id_; }; // Meta data used for preparing input data and validate output data IOSpec io_spec; static auto gpu_data_deleter = [](void* data) { if (data != nullptr) { FAIL_IF_CUDA_ERR( cudaSetDevice(io_spec.input_type_id_), "setting CUDA device to release GPU memory on " + std::to_string(io_spec.input_type_id_)); FAIL_IF_CUDA_ERR(cudaFree(data), "releasing GPU memory"); } }; void Usage(char** argv, const std::string& msg = std::string()) { if (!msg.empty()) { std::cerr << msg << std::endl; } std::cerr << "Usage: " << argv[0] << " [options]" << std::endl; std::cerr << "\t-i [input device ID]" << std::endl; std::cerr << "\t-out [output device ID]" << std::endl; std::cerr << "\t-v Enable verbose logging" << std::endl; std::cerr << "\t-r [model repository absolute path]" << std::endl; std::cerr << "\t-m [model name to be tested]" << std::endl; std::cerr << "\t-h [host policy name]" << std::endl; std::cerr << "\tFor '-h', if specify, the input will be set with different " << "host policy names, given that the specified value is the " << "host policy that the model under test is associated with." << std::endl; std::cerr << "\tFor device ID, -1 is used to stand for CPU device, " << "non-negative value is for GPU device." << std::endl; exit(1); } TRITONSERVER_Error* ResponseAlloc( TRITONSERVER_ResponseAllocator* allocator, const char* tensor_name, size_t byte_size, TRITONSERVER_MemoryType preferred_memory_type, int64_t preferred_memory_type_id, void* userp, void** buffer, void** buffer_userp, TRITONSERVER_MemoryType* actual_memory_type, int64_t* actual_memory_type_id) { // If 'byte_size' is zero just return 'buffer'==nullptr, we don't // need to do any other book-keeping. if (byte_size == 0) { *buffer = nullptr; *buffer_userp = nullptr; std::cout << "allocated " << byte_size << " bytes for result tensor " << tensor_name << std::endl; } else { void* allocated_ptr = nullptr; if (io_spec.output_type_ == TRITONSERVER_MEMORY_CPU) { allocated_ptr = malloc(byte_size); } else { auto err = cudaSetDevice(io_spec.output_type_id_); if (err == cudaSuccess) { err = cudaMalloc(&allocated_ptr, byte_size); } if (err != cudaSuccess) { return TRITONSERVER_ErrorNew( TRITONSERVER_ERROR_INTERNAL, std::string( "failed to allocate CUDA memory: " + std::string(cudaGetErrorString(err))) .c_str()); } } if (allocated_ptr == nullptr) { return TRITONSERVER_ErrorNew( TRITONSERVER_ERROR_INTERNAL, std::string( "failed to allocate " + std::to_string(byte_size) + " bytes in " + TRITONSERVER_MemoryTypeString(io_spec.output_type_) + " for result tensor " + tensor_name) .c_str()); } // Pass the tensor name with buffer_userp so we can show it when // releasing the buffer. *buffer = allocated_ptr; *buffer_userp = new std::string(tensor_name); std::cout << "allocated " << byte_size << " bytes in " << TRITONSERVER_MemoryTypeString(io_spec.output_type_) << " for result tensor " << tensor_name << std::endl; } *actual_memory_type = io_spec.output_type_; *actual_memory_type_id = io_spec.output_type_id_; return nullptr; // Success } TRITONSERVER_Error* ResponseRelease( TRITONSERVER_ResponseAllocator* allocator, void* buffer, void* buffer_userp, size_t byte_size, TRITONSERVER_MemoryType memory_type, int64_t memory_type_id) { std::unique_ptr<std::string> name; if (buffer_userp != nullptr) { name.reset(reinterpret_cast<std::string*>(buffer_userp)); } else { name.reset(new std::string("<unknown>")); } std::cout << "Releasing buffer " << buffer << " of size " << byte_size << " in " << TRITONSERVER_MemoryTypeString(memory_type) << " for result '" << *name << "'" << std::endl; if (memory_type == TRITONSERVER_MEMORY_CPU) { free(buffer); } else { auto err = cudaSetDevice(memory_type_id); if (err == cudaSuccess) { err = cudaFree(buffer); } if (err != cudaSuccess) { return TRITONSERVER_ErrorNew( TRITONSERVER_ERROR_INTERNAL, std::string( "failed to release CUDA memory: " + std::string(cudaGetErrorString(err))) .c_str()); } } return nullptr; // Success } void InferRequestComplete( TRITONSERVER_InferenceRequest* request, const uint32_t flags, void* userp) { if ((flags & TRITONSERVER_REQUEST_RELEASE_ALL) != 0) { TRITONSERVER_InferenceRequestDelete(request); } } void InferResponseComplete( TRITONSERVER_InferenceResponse* response, const uint32_t flags, void* userp) { if (response != nullptr) { // Send 'response' to the future. std::promise<TRITONSERVER_InferenceResponse*>* p = reinterpret_cast<std::promise<TRITONSERVER_InferenceResponse*>*>(userp); p->set_value(response); delete p; } } uint32_t OutputIndex(TRITONSERVER_InferenceResponse* response, const std::string& name) { uint32_t output_count; FAIL_IF_ERR( TRITONSERVER_InferenceResponseOutputCount(response, &output_count), "getting number of response outputs"); for (uint32_t idx = 0; idx < output_count; ++idx) { const char* cname; TRITONSERVER_DataType datatype; const int64_t* shape; uint64_t dim_count; const void* base; size_t byte_size; TRITONSERVER_MemoryType memory_type; int64_t memory_type_id; void* userp; FAIL_IF_ERR( TRITONSERVER_InferenceResponseOutput( response, idx, &cname, &datatype, &shape, &dim_count, &base, &byte_size, &memory_type, &memory_type_id, &userp), "getting output info"); if (name == std::string(cname)) { return idx; } } FAIL("can't found output '" + name + "'"); return 0; } TRITONSERVER_Error* ParseModelConfig( const rapidjson::Document& model_metadata, TRITONSERVER_DataType* dtype, bool* is_torch_model) { *dtype = TRITONSERVER_TYPE_INVALID; for (const auto& input : model_metadata["inputs"].GetArray()) { if (strcmp(input["datatype"].GetString(), "INT32") && strcmp(input["datatype"].GetString(), "FP32") && strcmp(input["datatype"].GetString(), "BYTES")) { return TRITONSERVER_ErrorNew( TRITONSERVER_ERROR_UNSUPPORTED, "IO test utility only supports model with data type INT32, " "FP32 or BYTES"); } if (*dtype == TRITONSERVER_TYPE_INVALID) { *dtype = TRITONSERVER_StringToDataType(input["datatype"].GetString()); } else { auto dt = TRITONSERVER_StringToDataType(input["datatype"].GetString()); if (dt != *dtype) { return TRITONSERVER_ErrorNew( TRITONSERVER_ERROR_INVALID_ARG, "the model inputs must have the same data type"); } } } for (const auto& output : model_metadata["outputs"].GetArray()) { if (strcmp(output["datatype"].GetString(), "INT32") && strcmp(output["datatype"].GetString(), "FP32") && strcmp(output["datatype"].GetString(), "BYTES")) { return TRITONSERVER_ErrorNew( TRITONSERVER_ERROR_UNSUPPORTED, "IO test utility only supports model with data type INT32, " "FP32 or BYTES"); } else { auto dt = TRITONSERVER_StringToDataType(output["datatype"].GetString()); if (dt != *dtype) { return TRITONSERVER_ErrorNew( TRITONSERVER_ERROR_INVALID_ARG, "the model inputs and outputs must have the same data type"); } } } *is_torch_model = (model_metadata["platform"] == "pytorch_libtorch"); return nullptr; } template <typename T> void GenerateInputData( std::vector<char>* input0_data, std::vector<char>* input1_data) { input0_data->resize(16 * sizeof(T)); input1_data->resize(16 * sizeof(T)); for (size_t i = 0; i < 16; ++i) { ((T*)input0_data->data())[i] = i; ((T*)input1_data->data())[i] = 1; } } void GenerateStringInputData( std::vector<char>* input0_data, std::vector<char>* input1_data) { std::string input0_str = ""; std::string input1_str = ""; for (size_t i = 0; i < 16; ++i) { std::string i0 = std::to_string(i + 1); uint32_t i0_len = i0.size(); input0_str.append(reinterpret_cast<const char*>(&i0_len), sizeof(uint32_t)); input0_str.append(i0); std::string i1 = std::to_string(1); uint32_t i1_len = i1.size(); input1_str.append(reinterpret_cast<const char*>(&i1_len), sizeof(uint32_t)); input1_str.append(i1); } std::copy( input0_str.begin(), input0_str.end(), std::back_inserter(*input0_data)); std::copy( input1_str.begin(), input1_str.end(), std::back_inserter(*input1_data)); } void GenerateStringOutputData( std::vector<char>* output0_data, std::vector<char>* output1_data) { std::string output0_str = ""; std::string output1_str = ""; for (size_t i = 0; i < 16; ++i) { std::string o0 = std::to_string(i + 2); uint32_t o0_len = o0.size(); output0_str.append( reinterpret_cast<const char*>(&o0_len), sizeof(uint32_t)); output0_str.append(o0); std::string o1 = std::to_string(i); uint32_t o1_len = o1.size(); output1_str.append( reinterpret_cast<const char*>(&o1_len), sizeof(uint32_t)); output1_str.append(o1); } std::copy( output0_str.begin(), output0_str.end(), std::back_inserter(*output0_data)); std::copy( output1_str.begin(), output1_str.end(), std::back_inserter(*output1_data)); } template <typename T> void CompareResult( const std::string& output0_name, const std::string& output1_name, const void* input0, const void* input1, const void* output0, const void* output1) { for (size_t i = 0; i < 16; ++i) { std::cout << ((T*)input0)[i] << " + " << ((T*)input1)[i] << " = " << ((T*)output0)[i] << std::endl; std::cout << ((T*)input0)[i] << " - " << ((T*)input1)[i] << " = " << ((T*)output1)[i] << std::endl; if ((((T*)input0)[i] + ((T*)input1)[i]) != ((T*)output0)[i]) { FAIL("incorrect sum in " + output0_name); } if ((((T*)input0)[i] - ((T*)input1)[i]) != ((T*)output1)[i]) { FAIL("incorrect difference in " + output1_name); } } } void CompareStringResult( const std::string& output0_name, const std::string& output1_name, const void* input0, const void* input1, const void* output0, const void* output1) { // preprocess results from serialized buffer to integers std::vector<int> output0_numbers; std::vector<int> output1_numbers; size_t buf_offset0 = 0, buf_offset1 = 0; const uint8_t* base0 = reinterpret_cast<const uint8_t*>(output0); const uint8_t* base1 = reinterpret_cast<const uint8_t*>(output1); for (size_t i = 0; i < 16; ++i) { const uint32_t len0 = *(reinterpret_cast<const uint32_t*>(base0 + buf_offset0)); std::string o0_tmp( reinterpret_cast<const char*>(base0 + buf_offset0 + sizeof(len0)), len0); output0_numbers.push_back(std::atoi(o0_tmp.c_str())); buf_offset0 += sizeof(len0) + len0; const uint32_t len1 = *(reinterpret_cast<const uint32_t*>(base1 + buf_offset1)); std::string o1_tmp( reinterpret_cast<const char*>(base1 + buf_offset1 + sizeof(len1)), len1); output1_numbers.push_back(std::atoi(o1_tmp.c_str())); buf_offset1 += sizeof(len1) + len1; } for (int i = 0; i < 16; ++i) { std::cout << (i + 1) << " + " << 1 << " = " << output0_numbers[i] << std::endl; std::cout << (i + 1) << " - " << 1 << " = " << output1_numbers[i] << std::endl; if (((i + 1) + 1) != output0_numbers[i]) { FAIL("incorrect sum in " + output0_name); } if (((i + 1) - 1) != output1_numbers[i]) { FAIL("incorrect difference in " + output1_name); } } } } // namespace int main(int argc, char** argv) { std::string model_repository_path; std::string model_name; int verbose_level = 0; io_spec.input_type_ = TRITONSERVER_MEMORY_CPU; io_spec.input_type_id_ = 0; io_spec.output_type_ = TRITONSERVER_MEMORY_CPU; io_spec.output_type_id_ = 0; const char* host_policy_cstr = nullptr; std::string host_policy; // Parse commandline... int opt; while ((opt = getopt(argc, argv, "vi:o:r:m:h:")) != -1) { switch (opt) { case 'i': { int64_t raw_id = std::stoll(optarg); if (raw_id < 0) { io_spec.input_type_ = TRITONSERVER_MEMORY_CPU; io_spec.input_type_id_ = 0; } else { io_spec.input_type_ = TRITONSERVER_MEMORY_GPU; io_spec.input_type_id_ = raw_id; } break; } case 'o': { int64_t raw_id = std::stoll(optarg); if (raw_id < 0) { io_spec.output_type_ = TRITONSERVER_MEMORY_CPU; io_spec.output_type_id_ = 0; } else { io_spec.output_type_ = TRITONSERVER_MEMORY_GPU; io_spec.output_type_id_ = raw_id; } break; } case 'h': { host_policy = optarg; host_policy_cstr = host_policy.c_str(); break; } case 'r': model_repository_path = optarg; break; case 'm': model_name = optarg; break; case 'v': verbose_level = 1; break; case '?': Usage(argv); break; } } if (model_repository_path.empty()) { Usage(argv, "-r must be used to specify model repository path"); } if (model_name.empty()) { Usage(argv, "-m must be used to specify model being test"); } // Create the server... TRITONSERVER_ServerOptions* server_options = nullptr; FAIL_IF_ERR( TRITONSERVER_ServerOptionsNew(&server_options), "creating server options"); FAIL_IF_ERR( TRITONSERVER_ServerOptionsSetModelRepositoryPath( server_options, model_repository_path.c_str()), "setting model repository path"); FAIL_IF_ERR( TRITONSERVER_ServerOptionsSetModelControlMode( server_options, TRITONSERVER_MODEL_CONTROL_EXPLICIT), "setting model control mode"); FAIL_IF_ERR( TRITONSERVER_ServerOptionsSetStartupModel( server_options, model_name.c_str()), "setting model to load"); FAIL_IF_ERR( TRITONSERVER_ServerOptionsSetLogVerbose(server_options, verbose_level), "setting verbose logging level"); FAIL_IF_ERR( TRITONSERVER_ServerOptionsSetBackendDirectory( server_options, "/opt/tritonserver/backends"), "setting backend directory"); FAIL_IF_ERR( TRITONSERVER_ServerOptionsSetRepoAgentDirectory( server_options, "/opt/tritonserver/repoagents"), "setting repository agent directory"); FAIL_IF_ERR( TRITONSERVER_ServerOptionsSetStrictModelConfig(server_options, true), "setting strict model configuration"); FAIL_IF_ERR( TRITONSERVER_ServerOptionsSetMinSupportedComputeCapability( server_options, TRITON_MIN_COMPUTE_CAPABILITY), "setting minimum supported CUDA compute capability"); TRITONSERVER_Server* server_ptr = nullptr; FAIL_IF_ERR( TRITONSERVER_ServerNew(&server_ptr, server_options), "creating server"); FAIL_IF_ERR( TRITONSERVER_ServerOptionsDelete(server_options), "deleting server options"); std::shared_ptr<TRITONSERVER_Server> server( server_ptr, TRITONSERVER_ServerDelete); // Wait until the server is both live and ready. size_t health_iters = 0; while (true) { bool live, ready; FAIL_IF_ERR( TRITONSERVER_ServerIsLive(server.get(), &live), "unable to get server liveness"); FAIL_IF_ERR( TRITONSERVER_ServerIsReady(server.get(), &ready), "unable to get server readiness"); std::cout << "Server Health: live " << live << ", ready " << ready << std::endl; if (live && ready) { break; } if (++health_iters >= 10) { FAIL("failed to find healthy inference server"); } std::this_thread::sleep_for(std::chrono::milliseconds(500)); } // Print status of the server. { TRITONSERVER_Message* server_metadata_message; FAIL_IF_ERR( TRITONSERVER_ServerMetadata(server.get(), &server_metadata_message), "unable to get server metadata message"); const char* buffer; size_t byte_size; FAIL_IF_ERR( TRITONSERVER_MessageSerializeToJson( server_metadata_message, &buffer, &byte_size), "unable to serialize server metadata message"); std::cout << "Server Status:" << std::endl; std::cout << std::string(buffer, byte_size) << std::endl; FAIL_IF_ERR( TRITONSERVER_MessageDelete(server_metadata_message), "deleting status metadata"); } // Wait for the model to become available. bool is_torch_model = false; TRITONSERVER_DataType dtype = TRITONSERVER_TYPE_INT32; bool is_ready = false; health_iters = 0; while (!is_ready) { FAIL_IF_ERR( TRITONSERVER_ServerModelIsReady( server.get(), model_name.c_str(), 1, &is_ready), "unable to get model readiness"); if (!is_ready) { if (++health_iters >= 10) { FAIL("model failed to be ready in 10 iterations"); } std::this_thread::sleep_for(std::chrono::milliseconds(500)); continue; } TRITONSERVER_Message* model_metadata_message; FAIL_IF_ERR( TRITONSERVER_ServerModelMetadata( server.get(), model_name.c_str(), 1, &model_metadata_message), "unable to get model metadata message"); const char* buffer; size_t byte_size; FAIL_IF_ERR( TRITONSERVER_MessageSerializeToJson( model_metadata_message, &buffer, &byte_size), "unable to serialize model status protobuf"); rapidjson::Document model_metadata; model_metadata.Parse(buffer, byte_size); if (model_metadata.HasParseError()) { FAIL( "error: failed to parse model metadata from JSON: " + std::string(GetParseError_En(model_metadata.GetParseError())) + " at " + std::to_string(model_metadata.GetErrorOffset())); } FAIL_IF_ERR( TRITONSERVER_MessageDelete(model_metadata_message), "deleting status protobuf"); if (strcmp(model_metadata["name"].GetString(), model_name.c_str())) { FAIL("unable to find metadata for model"); } bool found_version = false; if (model_metadata.HasMember("versions")) { for (const auto& version : model_metadata["versions"].GetArray()) { if (strcmp(version.GetString(), "1") == 0) { found_version = true; break; } } } if (!found_version) { FAIL("unable to find version 1 status for model"); } FAIL_IF_ERR( ParseModelConfig(model_metadata, &dtype, &is_torch_model), "parsing model metadata"); } // Create the allocator that will be used to allocate buffers for // the result tensors. TRITONSERVER_ResponseAllocator* allocator = nullptr; FAIL_IF_ERR( TRITONSERVER_ResponseAllocatorNew( &allocator, ResponseAlloc, ResponseRelease, nullptr /* start_fn */), "creating response allocator"); TRITONSERVER_InferenceRequest* irequest = nullptr; FAIL_IF_ERR( TRITONSERVER_InferenceRequestNew( &irequest, server.get(), model_name.c_str(), -1 /* model_version */), "creating inference request"); FAIL_IF_ERR( TRITONSERVER_InferenceRequestSetId(irequest, "123"), "setting ID for the request"); FAIL_IF_ERR( TRITONSERVER_InferenceRequestSetReleaseCallback( irequest, InferRequestComplete, nullptr /* request_release_userp */), "setting request release callback"); // Create 0 data that shouldn't be selected and used to test host policy // functionality std::vector<uint32_t> zero_data(16); // Create the data for the two input tensors. Initialize the first // to unique integers and the second to all ones. std::vector<char> input0_data; std::vector<char> input1_data; if (dtype == TRITONSERVER_TYPE_INT32) { GenerateInputData<int32_t>(&input0_data, &input1_data); } else if (dtype == TRITONSERVER_TYPE_FP32) { GenerateInputData<float>(&input0_data, &input1_data); } else { GenerateStringInputData(&input0_data, &input1_data); } auto input0 = "INPUT0"; auto input1 = "INPUT1"; // Get the size of the input tensors size_t input0_size = input0_data.size(); size_t input1_size = input1_data.size(); std::vector<int64_t> input0_shape({1, 16}); std::vector<int64_t> input1_shape({1, 16}); FAIL_IF_ERR( TRITONSERVER_InferenceRequestAddInput( irequest, input0, dtype, &input0_shape[0], input0_shape.size()), "setting input 0 meta-data for the request"); FAIL_IF_ERR( TRITONSERVER_InferenceRequestAddInput( irequest, input1, dtype, &input1_shape[0], input1_shape.size()), "setting input 1 meta-data for the request"); auto output0 = is_torch_model ? "OUTPUT__0" : "OUTPUT0"; auto output1 = is_torch_model ? "OUTPUT__1" : "OUTPUT1"; FAIL_IF_ERR( TRITONSERVER_InferenceRequestAddRequestedOutput(irequest, output0), "requesting output 0 for the request"); FAIL_IF_ERR( TRITONSERVER_InferenceRequestAddRequestedOutput(irequest, output1), "requesting output 1 for the request"); const void* input0_base = &input0_data[0]; const void* input1_base = &input1_data[0]; bool gpu_input = (io_spec.input_type_ == TRITONSERVER_MEMORY_GPU); std::unique_ptr<void, decltype(gpu_data_deleter)> input0_gpu( nullptr, gpu_data_deleter); std::unique_ptr<void, decltype(gpu_data_deleter)> input1_gpu( nullptr, gpu_data_deleter); if (gpu_input) { FAIL_IF_CUDA_ERR( cudaSetDevice(io_spec.input_type_id_), "setting CUDA device to device " + std::to_string(io_spec.input_type_id_)); void* dst; FAIL_IF_CUDA_ERR( cudaMalloc(&dst, input0_size), "allocating GPU memory for INPUT0 data"); input0_gpu.reset(dst); FAIL_IF_CUDA_ERR( cudaMemcpy(dst, &input0_data[0], input0_size, cudaMemcpyHostToDevice), "setting INPUT0 data in GPU memory"); FAIL_IF_CUDA_ERR( cudaMalloc(&dst, input1_size), "allocating GPU memory for INPUT1 data"); input1_gpu.reset(dst); FAIL_IF_CUDA_ERR( cudaMemcpy(dst, &input1_data[0], input1_size, cudaMemcpyHostToDevice), "setting INPUT1 data in GPU memory"); } input0_base = gpu_input ? input0_gpu.get() : &input0_data[0]; input1_base = gpu_input ? input1_gpu.get() : &input1_data[0]; if (host_policy_cstr == nullptr) { FAIL_IF_ERR( TRITONSERVER_InferenceRequestAppendInputData( irequest, input0, input0_base, input0_size, io_spec.input_type_, io_spec.input_type_id_), "assigning INPUT0 data"); FAIL_IF_ERR( TRITONSERVER_InferenceRequestAppendInputData( irequest, input1, input1_base, input1_size, io_spec.input_type_, io_spec.input_type_id_), "assigning INPUT1 data"); FAIL_IF_ERR( TRITONSERVER_InferenceRequestAppendInputDataWithHostPolicy( irequest, input0, zero_data.data(), zero_data.size() * sizeof(uint32_t), TRITONSERVER_MEMORY_CPU, 0, "fake_host_policy_name"), "assigning zero INPUT0 data with host policy 'fake_host_policy_name'"); FAIL_IF_ERR( TRITONSERVER_InferenceRequestAppendInputDataWithHostPolicy( irequest, input1, zero_data.data(), zero_data.size() * sizeof(uint32_t), TRITONSERVER_MEMORY_CPU, 0, "fake_host_policy_name"), "assigning zero INPUT1 data with host policy 'fake_host_policy_name'"); } else { FAIL_IF_ERR( TRITONSERVER_InferenceRequestAppendInputData( irequest, input0, zero_data.data(), zero_data.size() * sizeof(uint32_t), TRITONSERVER_MEMORY_CPU, 0), "assigning zero INPUT0 data"); FAIL_IF_ERR( TRITONSERVER_InferenceRequestAppendInputData( irequest, input1, zero_data.data(), zero_data.size() * sizeof(uint32_t), TRITONSERVER_MEMORY_CPU, 0), "assigning zero INPUT1 data"); FAIL_IF_ERR( TRITONSERVER_InferenceRequestAppendInputDataWithHostPolicy( irequest, input0, input0_base, input0_size, io_spec.input_type_, io_spec.input_type_id_, host_policy_cstr), "assigning INPUT0 data to provided host policy"); FAIL_IF_ERR( TRITONSERVER_InferenceRequestAppendInputDataWithHostPolicy( irequest, input1, input1_base, input1_size, io_spec.input_type_, io_spec.input_type_id_, host_policy_cstr), "assigning INPUT1 data to provided host policy"); } // Perform inference... auto p = new std::promise<TRITONSERVER_InferenceResponse*>(); std::future<TRITONSERVER_InferenceResponse*> completed = p->get_future(); FAIL_IF_ERR( TRITONSERVER_InferenceRequestSetResponseCallback( irequest, allocator, nullptr /* response_allocator_userp */, InferResponseComplete, reinterpret_cast<void*>(p)), "setting response callback"); FAIL_IF_ERR( TRITONSERVER_ServerInferAsync( server.get(), irequest, nullptr /* trace */), "running inference"); // Wait for the inference response and check the status. TRITONSERVER_InferenceResponse* response = completed.get(); FAIL_IF_ERR(TRITONSERVER_InferenceResponseError(response), "response status"); // Create the expected data for the two output tensors. std::vector<char> expected0_data; std::vector<char> expected1_data; if (dtype == TRITONSERVER_TYPE_BYTES) { GenerateStringOutputData(&expected0_data, &expected1_data); } // Check the output tensor values... // Note that depending on whether the backend supports outputs in GPU memory, // the output tensor may be in CPU memory even if -g flag is set. const void* output0_content; size_t output0_byte_size; TRITONSERVER_MemoryType output0_memory_type; int64_t output0_memory_type_id; { const char* cname; TRITONSERVER_DataType datatype; const int64_t* shape; uint64_t dim_count; void* userp; FAIL_IF_ERR( TRITONSERVER_InferenceResponseOutput( response, OutputIndex(response, output0), &cname, &datatype, &shape, &dim_count, &output0_content, &output0_byte_size, &output0_memory_type, &output0_memory_type_id, &userp), "getting output0 info"); if (dtype == TRITONSERVER_TYPE_BYTES) { size_t expected0_size = expected0_data.size(); if (expected0_size != output0_byte_size) { FAIL( "unexpected output0 byte-size, expected " + std::to_string(expected0_size) + ", got " + std::to_string(output0_byte_size)); } } else if (output0_byte_size != input0_size) { FAIL( "unexpected output0 byte-size, expected " + std::to_string(input0_size) + ", got " + std::to_string(output0_byte_size)); } else if ( (io_spec.output_type_ != output0_memory_type) || (io_spec.output_type_id_ != output0_memory_type_id)) { FAIL( std::string("unexpected output0 memory type (id), expected to be " "allocated in ") + TRITONSERVER_MemoryTypeString(io_spec.output_type_) + " with id " + std::to_string(io_spec.output_type_id_) + ", got " + TRITONSERVER_MemoryTypeString(output0_memory_type) + " with id " + std::to_string(output0_memory_type_id)); } } const void* output1_content; size_t output1_byte_size; TRITONSERVER_MemoryType output1_memory_type; int64_t output1_memory_type_id; { const char* cname; TRITONSERVER_DataType datatype; const int64_t* shape; uint64_t dim_count; void* userp; FAIL_IF_ERR( TRITONSERVER_InferenceResponseOutput( response, OutputIndex(response, output1), &cname, &datatype, &shape, &dim_count, &output1_content, &output1_byte_size, &output1_memory_type, &output1_memory_type_id, &userp), "getting output1 info"); if (dtype == TRITONSERVER_TYPE_BYTES) { size_t expected1_size = expected1_data.size(); if (expected1_size != output1_byte_size) { FAIL( "unexpected output1 byte-size, expected " + std::to_string(expected1_size) + ", got " + std::to_string(output1_byte_size)); } } else if (output1_byte_size != input1_size) { FAIL( "unexpected output1 byte-size, expected " + std::to_string(input1_size) + ", got " + std::to_string(output1_byte_size)); } else if ( (io_spec.output_type_ != output1_memory_type) || (io_spec.output_type_id_ != output1_memory_type_id)) { FAIL( std::string("unexpected output1 memory type (id), expected to be " "allocated in ") + TRITONSERVER_MemoryTypeString(io_spec.output_type_) + " with id " + std::to_string(io_spec.output_type_id_) + ", got " + TRITONSERVER_MemoryTypeString(output1_memory_type) + " with id " + std::to_string(output1_memory_type_id)); } } const void* output0_result = output0_content; const void* output1_result = output1_content; // Different from CPU memory, outputs in GPU memory must be copied to CPU // memory to be read directly. std::vector<char> output0_data(output0_byte_size); std::vector<char> output1_data(output1_byte_size); if (output0_memory_type == TRITONSERVER_MEMORY_CPU) { std::cout << "OUTPUT0 are stored in CPU memory" << std::endl; } else { std::cout << "OUTPUT0 are stored in GPU memory" << std::endl; FAIL_IF_CUDA_ERR( cudaMemcpy( &output0_data[0], output0_content, output0_byte_size, cudaMemcpyDeviceToHost), "setting INPUT0 data in GPU memory"); output0_result = &output0_data[0]; } if (output1_memory_type == TRITONSERVER_MEMORY_CPU) { std::cout << "OUTPUT1 are stored in CPU memory" << std::endl; } else { std::cout << "OUTPUT1 are stored in GPU memory" << std::endl; FAIL_IF_CUDA_ERR( cudaMemcpy( &output1_data[0], output1_content, output1_byte_size, cudaMemcpyDeviceToHost), "setting INPUT0 data in GPU memory"); output1_result = &output1_data[0]; } if (dtype == TRITONSERVER_TYPE_INT32) { CompareResult<int32_t>( output0, output1, &input0_data[0], &input1_data[0], output0_result, output1_result); } else if (dtype == TRITONSERVER_TYPE_FP32) { CompareResult<float>( output0, output1, &input0_data[0], &input1_data[0], output0_result, output1_result); } else { CompareStringResult( output0, output1, &input0_data[0], &input1_data[0], output0_result, output1_result); } FAIL_IF_ERR( TRITONSERVER_InferenceResponseDelete(response), "deleting inference response"); FAIL_IF_ERR( TRITONSERVER_ResponseAllocatorDelete(allocator), "deleting response allocator"); return 0; }
14,445
348
{"nom":"Sant'Antonino","circ":"2ème circonscription","dpt":"Haute-Corse","inscrits":138,"abs":60,"votants":78,"blancs":11,"nuls":1,"exp":66,"res":[{"nuance":"REG","nom":"<NAME>","voix":52},{"nuance":"REM","nom":"<NAME>","voix":14}]}
94
698
/** * Copyright (C) Zhang,Yuexiang (xfeep) * */ package nginx.clojure.net; import java.io.IOException; public interface NginxClojureSocketHandler { public void onConnect(NginxClojureAsynSocket s, long sc) throws IOException; public void onRead(NginxClojureAsynSocket s, long sc) throws IOException; public void onWrite(NginxClojureAsynSocket s, long sc) throws IOException; public void onRelease(NginxClojureAsynSocket s, long sc) throws IOException; }
149
335
{ "word": "Multiplicity", "definitions": [ "A large number or variety." ], "parts-of-speech": "Noun" }
59
1,645
/* * Seldon -- open source prediction engine * ======================================= * Copyright 2011-2015 Seldon Technologies Ltd and Rummble Ltd (http://www.seldon.io/) * ********************************************************************************************** * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ********************************************************************************************** */ package io.seldon.spark.actions; import java.text.ParseException; import java.util.Comparator; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.PriorityQueue; import java.util.Queue; import org.apache.commons.lang3.builder.ReflectionToStringBuilder; import org.apache.commons.lang3.builder.ToStringStyle; import org.apache.hadoop.conf.Configuration; import org.apache.spark.SparkConf; import org.apache.spark.api.java.JavaPairRDD; import org.apache.spark.api.java.JavaRDD; import org.apache.spark.api.java.JavaSparkContext; import org.apache.spark.api.java.function.Function; import org.apache.spark.api.java.function.PairFunction; import org.apache.spark.broadcast.Broadcast; import org.apache.spark.storage.StorageLevel; import scala.Tuple2; import com.beust.jcommander.JCommander; import com.beust.jcommander.Parameter; import com.fasterxml.jackson.databind.DeserializationFeature; import com.fasterxml.jackson.databind.ObjectMapper; public class GroupActionsJob { public static class ClientDetail { public final String client; public final long itemCount; public ClientDetail(String client, long itemCount) { this.client = client; this.itemCount = itemCount; } } public static class CmdLineArgs { @Parameter(names = "--input-path-pattern", required = true) private String input_path_pattern; @Parameter(names = "--input-date-string", required = true) private String input_date_string; @Parameter(names = "--output-path-dir", required = true) private String output_path_dir; @Parameter(names = "--debug-use-local-master") private Boolean debug_use_local_master = false; @Parameter(names = "--aws-access-key-id", required = false) private String aws_access_key_id; @Parameter(names = "--aws-secret-access-key", required = false) private String aws_secret_access_key; @Parameter(names = "--gzip-output", required = false) private boolean gzip_output = false; @Parameter(names = "--single-client", required = false) private String single_client; @Override public String toString() { return ReflectionToStringBuilder.toString(this, ToStringStyle.SHORT_PREFIX_STYLE); } } public static void main(String[] args) { CmdLineArgs cmdLineArgs = new CmdLineArgs(); new JCommander(cmdLineArgs, args); run(cmdLineArgs); } public static void run(CmdLineArgs cmdLineArgs) { long unixDays = 0; try { unixDays = JobUtils.dateToUnixDays(cmdLineArgs.input_date_string); } catch (ParseException e) { unixDays = 0; } System.out.println(String.format("--- started GroupActionsJob date[%s] unixDays[%s] ---", cmdLineArgs.input_date_string, unixDays)); System.out.println("Env: " + System.getenv()); System.out.println("Properties: " + System.getProperties()); SparkConf sparkConf = new SparkConf().setAppName("GroupActionsJob"); if (cmdLineArgs.debug_use_local_master) { System.out.println("Using 'local' master"); sparkConf.setMaster("local"); } Tuple2<String, String>[] sparkConfPairs = sparkConf.getAll(); System.out.println("--- sparkConf ---"); for (int i = 0; i < sparkConfPairs.length; i++) { Tuple2<String, String> kvPair = sparkConfPairs[i]; System.out.println(String.format("%s:%s", kvPair._1, kvPair._2)); } System.out.println("-----------------"); JavaSparkContext jsc = new JavaSparkContext(sparkConf); { // setup aws access Configuration hadoopConf = jsc.hadoopConfiguration(); hadoopConf.set("fs.s3.impl", "org.apache.hadoop.fs.s3native.NativeS3FileSystem"); if (cmdLineArgs.aws_access_key_id != null && !"".equals(cmdLineArgs.aws_access_key_id)) { hadoopConf.set("fs.s3n.awsAccessKeyId", cmdLineArgs.aws_access_key_id); hadoopConf.set("fs.s3n.awsSecretAccessKey", cmdLineArgs.aws_secret_access_key); } } // String output_path_dir = "./out/" + input_date_string + "-" + UUID.randomUUID(); JavaRDD<String> dataSet = jsc.textFile(JobUtils.getSourceDirFromDate(cmdLineArgs.input_path_pattern, cmdLineArgs.input_date_string)).repartition(4); final ObjectMapper objectMapper = new ObjectMapper(); objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); final String single_client = cmdLineArgs.single_client; if (single_client != null) { Function<String, Boolean> clientFilter = new Function<String, Boolean>() { @Override public Boolean call(String t) throws Exception { ActionData actionData = JobUtils.getActionDataFromActionLogLine(objectMapper, t); return ((actionData.client != null) && (actionData.client.equals(single_client))); } }; dataSet = dataSet.filter(clientFilter); } JavaPairRDD<String, ActionData> pairs = dataSet.mapToPair(new PairFunction<String, String, ActionData>() { @Override public Tuple2<String, ActionData> call(String t) throws Exception { ActionData actionData = JobUtils.getActionDataFromActionLogLine(objectMapper, t); // String key = (actionData.userid == 0) ? "__no_userid__" : actionData.client; String key = actionData.client; return new Tuple2<String, ActionData>(key, actionData); } }).persist(StorageLevel.MEMORY_AND_DISK()); List<String> clientList = pairs.keys().distinct().collect(); Queue<ClientDetail> clientDetailQueue = new PriorityQueue<ClientDetail>(30, new Comparator<ClientDetail>() { @Override public int compare(ClientDetail o1, ClientDetail o2) { if (o1.itemCount > o2.itemCount) { return -1; } else if (o1.itemCount < o2.itemCount) { return 1; } return 0; } }); Queue<ClientDetail> clientDetailZeroQueue = new PriorityQueue<ClientDetail>(30, new Comparator<ClientDetail>() { @Override public int compare(ClientDetail o1, ClientDetail o2) { if (o1.itemCount > o2.itemCount) { return -1; } else if (o1.itemCount < o2.itemCount) { return 1; } return 0; } }); System.out.println("Client list "+clientList.toString()); for (String client : clientList) { if (client != null) { System.out.println("looking at client "+client); final String currentClient = client; JavaPairRDD<String, ActionData> filtered_by_client = pairs.filter(new Function<Tuple2<String, ActionData>, Boolean>() { @Override public Boolean call(Tuple2<String, ActionData> v1) throws Exception { if (currentClient.equalsIgnoreCase(v1._1)) { return Boolean.TRUE; } else { return Boolean.FALSE; } } }); JavaPairRDD<String, ActionData> nonZeroUserIds = filtered_by_client.filter(new Function<Tuple2<String, ActionData>, Boolean>() { @Override public Boolean call(Tuple2<String, ActionData> v1) throws Exception { if (v1._2.userid == 0) { return Boolean.FALSE; } else { return Boolean.TRUE; } } }); JavaPairRDD<String, Integer> userIdLookupRDD = nonZeroUserIds.mapToPair(new PairFunction<Tuple2<String, ActionData>, String, Integer>() { @Override public Tuple2<String, Integer> call(Tuple2<String, ActionData> t) throws Exception { String key = currentClient + "_" + t._2.client_userid; return new Tuple2<String, Integer>(key, t._2.userid); } }); Map<String, Integer> userIdLookupMap = userIdLookupRDD.collectAsMap(); Map<String, Integer> userIdLookupMap_wrapped = new HashMap<String, Integer>(userIdLookupMap); final Broadcast<Map<String, Integer>> broadcastVar = jsc.broadcast(userIdLookupMap_wrapped); JavaRDD<String> json_only_with_zeros = filtered_by_client.map(new Function<Tuple2<String, ActionData>, String>() { @Override public String call(Tuple2<String, ActionData> v1) throws Exception { Map<String, Integer> m = broadcastVar.getValue(); ActionData actionData = v1._2; if (actionData.userid == 0) { String key = currentClient + "_" + actionData.client_userid; if (m.containsKey(key)) { actionData.userid = m.get(key); } else { return ""; } } String json = JobUtils.getJsonFromActionData(actionData); return json; } }); JavaRDD<String> json_only = json_only_with_zeros.filter(new Function<String, Boolean>() { @Override public Boolean call(String v1) throws Exception { return (v1.length() == 0) ? Boolean.FALSE : Boolean.TRUE; } }); String outputPath = getOutputPath(cmdLineArgs.output_path_dir, unixDays, client); if (cmdLineArgs.gzip_output) { json_only.saveAsTextFile(outputPath, org.apache.hadoop.io.compress.GzipCodec.class); } else { json_only.saveAsTextFile(outputPath); } long json_only_count = json_only.count(); clientDetailZeroQueue.add(new ClientDetail(currentClient, json_only_with_zeros.count() - json_only_count)); clientDetailQueue.add(new ClientDetail(currentClient, json_only_count)); } else System.out.println("Found null client!"); } System.out.println("- Client Action (Zero Userid) Count -"); while (clientDetailZeroQueue.size() != 0) { GroupActionsJob.ClientDetail clientDetail = clientDetailZeroQueue.remove(); System.out.println(String.format("%s: %d", clientDetail.client, clientDetail.itemCount)); } System.out.println("- Client Action Count -"); while (clientDetailQueue.size() != 0) { GroupActionsJob.ClientDetail clientDetail = clientDetailQueue.remove(); System.out.println(String.format("%s: %d", clientDetail.client, clientDetail.itemCount)); } jsc.stop(); System.out.println(String.format("--- finished GroupActionsJob date[%s] unixDays[%s] ---", cmdLineArgs.input_date_string, unixDays)); } public static String getOutputPath(String output_path_dir, long unixDays, String client) { return output_path_dir + "/" + client + "/actions/" + unixDays; } }
5,445
2,151
// Copyright 2018 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_UI_ASH_LAUNCHER_INTERNAL_APP_SHELF_CONTEXT_MENU_H_ #define CHROME_BROWSER_UI_ASH_LAUNCHER_INTERNAL_APP_SHELF_CONTEXT_MENU_H_ #include "base/macros.h" #include "chrome/browser/ui/ash/launcher/launcher_context_menu.h" // Class for context menu which is shown for internal app in the shelf. class InternalAppShelfContextMenu : public LauncherContextMenu { public: InternalAppShelfContextMenu(ChromeLauncherController* controller, const ash::ShelfItem* item, int64_t display_id); ~InternalAppShelfContextMenu() override = default; // LauncherContextMenu: void GetMenuModel(GetMenuModelCallback callback) override; private: void BuildMenu(ui::SimpleMenuModel* menu_model); DISALLOW_COPY_AND_ASSIGN(InternalAppShelfContextMenu); }; #endif // CHROME_BROWSER_UI_ASH_LAUNCHER_INTERNAL_APP_SHELF_CONTEXT_MENU_H_
395
500
<reponame>wqqchh2014/CNN<filename>src/sdsoc.h /* * This file is developed by <NAME> (Walker LAU). * * This version accelerates all 7 CONV-layers of VIPLFaceNet. * * If you want to get the latest version of this project or met any problems, * please go to <https://github.com/WalkerLau/Accelerating-CNN-with-FPGA> , * I will try to help as much as I can. * * You can redistribute this source codes and/or modify it under the terms of the BSD 2-Clause License. * * Note: the above information must be kept whenever or wherever the codes are used. * */ #ifndef SDSOC_H #define SDSOC_H #define SDSOC #endif
211
3,094
# Imports import os import torch import torch.nn.functional as F import numpy as np import config from torch import nn, optim from torch.utils.data import DataLoader from tqdm import tqdm from dataset import CatDog from efficientnet_pytorch import EfficientNet from utils import check_accuracy, load_checkpoint, save_checkpoint def save_feature_vectors(model, loader, output_size=(1, 1), file="trainb7"): model.eval() images, labels = [], [] for idx, (x, y) in enumerate(tqdm(loader)): x = x.to(config.DEVICE) with torch.no_grad(): features = model.extract_features(x) features = F.adaptive_avg_pool2d(features, output_size=output_size) images.append(features.reshape(x.shape[0], -1).detach().cpu().numpy()) labels.append(y.numpy()) np.save(f"data_features/X_{file}.npy", np.concatenate(images, axis=0)) np.save(f"data_features/y_{file}.npy", np.concatenate(labels, axis=0)) model.train() def train_one_epoch(loader, model, loss_fn, optimizer, scaler): loop = tqdm(loader) for batch_idx, (data, targets) in enumerate(loop): data = data.to(config.DEVICE) targets = targets.to(config.DEVICE).unsqueeze(1).float() with torch.cuda.amp.autocast(): scores = model(data) loss = loss_fn(scores, targets) optimizer.zero_grad() scaler.scale(loss).backward() scaler.step(optimizer) scaler.update() loop.set_postfix(loss=loss.item()) def main(): model = EfficientNet.from_pretrained("efficientnet-b7") model._fc = nn.Linear(2560, 1) train_dataset = CatDog(root="data/train/", transform=config.basic_transform) test_dataset = CatDog(root="data/test/", transform=config.basic_transform) train_loader = DataLoader( train_dataset, shuffle=True, batch_size=config.BATCH_SIZE, num_workers=config.NUM_WORKERS, pin_memory=True, ) test_loader = DataLoader( test_dataset, shuffle=False, batch_size=config.BATCH_SIZE, num_workers=config.NUM_WORKERS, ) model = model.to(config.DEVICE) scaler = torch.cuda.amp.GradScaler() loss_fn = nn.BCEWithLogitsLoss() optimizer = optim.Adam( model.parameters(), lr=config.LEARNING_RATE, weight_decay=config.WEIGHT_DECAY ) if config.LOAD_MODEL and config.CHECKPOINT_FILE in os.listdir(): load_checkpoint(torch.load(config.CHECKPOINT_FILE), model) for epoch in range(config.NUM_EPOCHS): train_one_epoch(train_loader, model, loss_fn, optimizer, scaler) check_accuracy(train_loader, model, loss_fn) if config.SAVE_MODEL: checkpoint = {"state_dict": model.state_dict(), "optimizer": optimizer.state_dict()} save_checkpoint(checkpoint, filename=config.CHECKPOINT_FILE) save_feature_vectors(model, train_loader, output_size=(1, 1), file="train_b7") save_feature_vectors(model, test_loader, output_size=(1, 1), file="test_b7") if __name__ == "__main__": main()
1,291
1,217
<reponame>473867143/Prometheus #ifndef __DESCRIPTOR_H__ #define __DESCRIPTOR_H__ #include <iostream> #include <stdio.h> #include <string.h> #include <stdlib.h> #include <math.h> #include <stdint.h> class Descriptor { public: // constructor creates filters Descriptor(uint8_t* I,int32_t width,int32_t height,int32_t bpl,bool half_resolution); // deconstructor releases memory ~Descriptor(); // descriptors accessible from outside uint8_t* I_desc; private: // build descriptor I_desc from I_du and I_dv void createDescriptor(uint8_t* I_du,uint8_t* I_dv,int32_t width,int32_t height,int32_t bpl,bool half_resolution); }; #endif
267
2,860
<reponame>pvlugter/lagom<filename>dev/sbt-plugin/src/sbt-test/sbt-plugin/run-all-javadsl/b/impl/src/main/java/impl/Module.java /* * Copyright (C) Lightbend Inc. <https://www.lightbend.com> */ package impl; import com.google.inject.AbstractModule; import com.lightbend.lagom.javadsl.server.ServiceGuiceSupport; import api.BarService; import api.FooService; import play.*; import javax.inject.Inject; import java.util.Date; import java.io.*; import com.typesafe.config.Config; public class Module extends AbstractModule implements ServiceGuiceSupport { @Override protected void configure() { bindService(BarService.class, BarServiceImpl.class); bindClient(FooService.class); bind(OnStart.class).asEagerSingleton(); } } class OnStart { @Inject public OnStart(Environment environment, Config configuration) { doOnStart(environment, configuration); } private void doOnStart(Environment environment, Config configuration) { try { // open for append FileWriter writer = new FileWriter(environment.getFile("target/reload.log"), true); writer.write(new Date() + " - reloaded\n"); writer.close(); if (configuration.hasPathOrNull("fail") && configuration.getBoolean("fail")) { throw new RuntimeException(); } } catch(IOException e) { throw new RuntimeException(e); } } }
480
1,285
package net.minestom.server.entity; import net.minestom.server.entity.metadata.item.ItemEntityMeta; import net.minestom.server.event.EventDispatcher; import net.minestom.server.event.entity.EntityItemMergeEvent; import net.minestom.server.instance.Chunk; import net.minestom.server.item.ItemStack; import net.minestom.server.item.StackingRule; import net.minestom.server.utils.time.Cooldown; import net.minestom.server.utils.time.TimeUnit; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import java.time.Duration; import java.time.temporal.TemporalUnit; import java.util.Set; /** * Represents an item on the ground. */ public class ItemEntity extends Entity { /** * Used to slow down the merge check delay */ private static Duration mergeDelay = Duration.of(10, TimeUnit.SERVER_TICK); /** * The last time that this item has checked his neighbors for merge */ private long lastMergeCheck; private ItemStack itemStack; private boolean pickable = true; private boolean mergeable = true; private float mergeRange = 1; private long spawnTime; private long pickupDelay; public ItemEntity(@NotNull ItemStack itemStack) { super(EntityType.ITEM); setItemStack(itemStack); setBoundingBox(0.25f, 0.25f, 0.25f); } /** * Gets the update option for the merging feature. * * @return the merge update option */ @Nullable public static Duration getMergeDelay() { return mergeDelay; } /** * Changes the merge delay. * Can be set to null to entirely remove the delay. * * @param delay the new merge delay */ public static void setMergeDelay(@Nullable Duration delay) { ItemEntity.mergeDelay = delay; } @Override public void update(long time) { if (isMergeable() && isPickable() && (mergeDelay == null || !Cooldown.hasCooldown(time, lastMergeCheck, mergeDelay))) { this.lastMergeCheck = time; final Chunk chunk = instance.getChunkAt(getPosition()); final Set<Entity> entities = instance.getChunkEntities(chunk); for (Entity entity : entities) { if (entity instanceof ItemEntity) { // Do not merge with itself if (entity == this) continue; final ItemEntity itemEntity = (ItemEntity) entity; if (!itemEntity.isPickable() || !itemEntity.isMergeable()) continue; // Too far, do not merge if (getDistance(itemEntity) > mergeRange) continue; final ItemStack itemStackEntity = itemEntity.getItemStack(); final StackingRule stackingRule = itemStack.getStackingRule(); final boolean canStack = stackingRule.canBeStacked(itemStack, itemStackEntity); if (!canStack) continue; final int totalAmount = stackingRule.getAmount(itemStack) + stackingRule.getAmount(itemStackEntity); final boolean canApply = stackingRule.canApply(itemStack, totalAmount); if (!canApply) continue; final ItemStack result = stackingRule.apply(itemStack, totalAmount); EntityItemMergeEvent entityItemMergeEvent = new EntityItemMergeEvent(this, itemEntity, result); EventDispatcher.callCancellable(entityItemMergeEvent, () -> { setItemStack(entityItemMergeEvent.getResult()); itemEntity.remove(); }); } } } } @Override public void spawn() { this.spawnTime = System.currentTimeMillis(); } @Override public @NotNull ItemEntityMeta getEntityMeta() { return (ItemEntityMeta) super.getEntityMeta(); } /** * Gets the item stack on ground. * * @return the item stack */ @NotNull public ItemStack getItemStack() { return itemStack; } /** * Changes the item stack on ground. * * @param itemStack the item stack */ public void setItemStack(@NotNull ItemStack itemStack) { this.itemStack = itemStack; getEntityMeta().setItem(itemStack); } /** * Gets if the item is currently pickable. * <p> * {@link #setPickable(boolean)} needs to be true and the delay {@link #getPickupDelay()} * to be long gone. * * @return true if the item is pickable, false otherwise */ public boolean isPickable() { return pickable && (System.currentTimeMillis() - getSpawnTime() >= pickupDelay); } /** * Makes the item pickable. * * @param pickable true to make the item pickable, false otherwise */ public void setPickable(boolean pickable) { this.pickable = pickable; } /** * Gets if the item is mergeable. * * @return true if the entity is mergeable, false otherwise */ public boolean isMergeable() { return mergeable; } /** * When set to true, close {@link ItemEntity} will try to merge together as a single entity * when their {@link #getItemStack()} is similar and allowed to stack together. * * @param mergeable should the entity merge with other {@link ItemEntity} */ public void setMergeable(boolean mergeable) { this.mergeable = mergeable; } /** * Gets the merge range. * * @return the merge range */ public float getMergeRange() { return mergeRange; } /** * Changes the merge range. * * @param mergeRange the merge range */ public void setMergeRange(float mergeRange) { this.mergeRange = mergeRange; } /** * Gets the pickup delay in milliseconds, defined by {@link #setPickupDelay(Duration)}. * * @return the pickup delay */ public long getPickupDelay() { return pickupDelay; } /** * Sets the pickup delay of the ItemEntity. * * @param delay the pickup delay * @param temporalUnit the unit of the delay */ public void setPickupDelay(long delay, @NotNull TemporalUnit temporalUnit) { setPickupDelay(Duration.of(delay, temporalUnit)); } /** * Sets the pickup delay of the ItemEntity. * * @param delay the pickup delay */ public void setPickupDelay(Duration delay) { this.pickupDelay = delay.toMillis(); } /** * Used to know if the ItemEntity can be pickup. * * @return the time in milliseconds since this entity has spawn */ public long getSpawnTime() { return spawnTime; } }
2,885
321
#pragma once extern "C" { #include "lua.h" } #define LUA_ASYNC_SOCKET_METATABLE_NAME "lua_asyncSocket" extern int luaopen_async_socket(lua_State* L);
68
506
<filename>oshgui/Dependencies/Source/SimpleImageLoader/Common.hpp #ifndef IMAGELOADER_COMMON_HPP #define IMAGELOADER_COMMON_HPP #include <cstdint> #include <cstdlib> namespace SimpleImageLoader { inline uint16_t __SwapUInt16(uint16_t arg) { #if defined(_MSC_VER) && _MSC_VER >= 1310 return _byteswap_ushort(arg); #elif defined(__i386__) && defined(__GNUC__) __asm__("xchgb %b0, %h0" : "+q" (arg)); return arg; #else uint16_t result; result = ((arg << 8) & 0xFF00) | ((arg >> 8) & 0x00FF); return result; #endif } //--------------------------------------------------------------------------- inline uint32_t __SwapUInt32(uint32_t arg) { #if defined(_MSC_VER) && _MSC_VER >= 1310 return _byteswap_ulong(arg); #elif defined(__i386__) && defined(__GNUC__) __asm__("bswap %0" : "+r" (arg)); return arg; #else uint32_t result; result = ((arg & 0x000000FF) << 24) | ((arg & 0x0000FF00) << 8) | ((arg >> 8) & 0x0000FF00) | ((arg >> 24) & 0x000000FF); return result; #endif } //--------------------------------------------------------------------------- inline void SwapUInt16(uint16_t *p) { *p = __SwapUInt16(*p); } //--------------------------------------------------------------------------- inline void SwapUInt32(uint32_t *p) { *p = __SwapUInt32(*p); } //--------------------------------------------------------------------------- inline uint8_t HighNibble(uint8_t byte) { return byte & 0xF0; } //--------------------------------------------------------------------------- inline uint8_t LowNibble(uint8_t byte) { return byte & 0x0F; } //--------------------------------------------------------------------------- inline uint32_t CalculateLine(uint32_t width, uint32_t bitdepth) { return (uint32_t)(((uint64_t)width * bitdepth + 7) / 8); } //--------------------------------------------------------------------------- inline uint32_t CalculatePitch(uint32_t line) { return line + 3 & ~3; } //--------------------------------------------------------------------------- inline uint32_t CalculateUsedPaletteEntries(uint32_t bit_count) { if (bit_count >= 1 && bit_count <= 8) { return 1 << bit_count; } return 0; } //--------------------------------------------------------------------------- inline uint8_t* CalculateScanLine(uint8_t *bits, uint32_t pitch, int32_t scanline) { return bits + (pitch * scanline); } //--------------------------------------------------------------------------- } #endif
834
1,041
package org.tests.basic; import io.ebean.*; import org.junit.jupiter.api.Test; import org.tests.model.basic.OrderAggregate; import org.tests.model.basic.ResetBasicData; import java.util.List; import static org.junit.jupiter.api.Assertions.assertTrue; public class TestOrderTotalAmountReportBean extends BaseTestCase { @Test public void test() { ResetBasicData.reset(); String sql = "select order_id, count(*) as totalItems, sum(order_qty*unit_price) as totalAmount \n" + "from o_order_detail \n" + "group by order_id"; RawSql rawSql = RawSqlBuilder.parse(sql).columnMapping("order_id", "order.id").create(); List<OrderAggregate> l0 = DB.find(OrderAggregate.class) .setRawSql(rawSql) .findList(); for (OrderAggregate r0 : l0) { r0.toString(); } List<OrderAggregate> l2 = DB.createQuery(OrderAggregate.class) .setRawSql(rawSql) .where().gt("order.id", 0) .having().lt("totalItems", 3).gt("totalAmount", 50).findList(); for (OrderAggregate r2 : l2) { assertTrue(r2.getTotalItems() < 3); } } @Test public void test_when_aliasInUnderscore() { ResetBasicData.reset(); String sql = "select order_id, count(*) as total_items, sum(order_qty*unit_price) as total_amount \n" + "from o_order_detail \n" + "group by order_id"; RawSql rawSql = RawSqlBuilder.parse(sql) .columnMapping("order_id", "order.id") .create(); Query<OrderAggregate> query = DB.find(OrderAggregate.class) .setRawSql(rawSql); query.findList(); assertSql(query).contains("count(*) as total_items, sum(order_qty*unit_price) as total_amount"); } @Test public void test_when_aliasInCamelCase() { ResetBasicData.reset(); String sql = "select order_id, count(*) as totalItems, sum(order_qty*unit_price) as totalAmount \n" + "from o_order_detail \n" + "group by order_id"; RawSql rawSql = RawSqlBuilder.parse(sql) .columnMapping("order_id", "order.id") .create(); Query<OrderAggregate> query = DB.find(OrderAggregate.class) .setRawSql(rawSql); query.findList(); assertSql(query).contains("count(*) as totalItems, sum(order_qty*unit_price) as totalAmount"); } }
947
5,964
// Copyright 2016 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "src/deoptimize-reason.h" namespace v8 { namespace internal { std::ostream& operator<<(std::ostream& os, DeoptimizeReason reason) { switch (reason) { #define DEOPTIMIZE_REASON(Name, message) \ case DeoptimizeReason::k##Name: \ return os << #Name; DEOPTIMIZE_REASON_LIST(DEOPTIMIZE_REASON) #undef DEOPTIMIZE_REASON } UNREACHABLE(); } size_t hash_value(DeoptimizeReason reason) { return static_cast<uint8_t>(reason); } char const* DeoptimizeReasonToString(DeoptimizeReason reason) { static char const* kDeoptimizeReasonStrings[] = { #define DEOPTIMIZE_REASON(Name, message) message, DEOPTIMIZE_REASON_LIST(DEOPTIMIZE_REASON) #undef DEOPTIMIZE_REASON }; size_t const index = static_cast<size_t>(reason); DCHECK_LT(index, arraysize(kDeoptimizeReasonStrings)); return kDeoptimizeReasonStrings[index]; } } // namespace internal } // namespace v8
388
732
package io.eventuate.tram.spring.optimisticlocking; import io.eventuate.tram.jdbc.optimistic.locking.common.test.AbstractTestEntityService; import org.junit.Test; import org.junit.runner.RunWith; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.test.context.SpringBootTest; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.context.annotation.Import; import org.springframework.test.context.junit4.SpringRunner; @RunWith(SpringRunner.class) @SpringBootTest(classes = EventuateSpringOptimisticLockingWithAnnotationTransactionTest.Config.class, webEnvironment = SpringBootTest.WebEnvironment.NONE) public class EventuateSpringOptimisticLockingWithAnnotationTransactionTest extends AbstractEventuateSpringOptimisticLockingTest { @Configuration @Import({OptimisticLockingDecoratorConfiguration.class, TestEntityRepositoryConfiguration.class}) public static class Config { @Bean public TestEntityServiceTransactionAnnotation testEntityServiceTransactionAnnotation() { return new TestEntityServiceTransactionAnnotation(); } } @Autowired private TestEntityServiceTransactionAnnotation testEntityService; @Override protected AbstractTestEntityService testEntityService() { return testEntityService; } @Override @Test public void shouldRetryOnLockException() throws InterruptedException { super.shouldRetryOnLockException(); } }
414
331
<reponame>yangKJ/KJBannerViewDemo<gh_stars>100-1000 // // KJCollectionViewCell.h // KJBannerViewDemo // // Created by 杨科军 on 2019/1/13. // Copyright © 2019 杨科军. All rights reserved. // https://github.com/yangKJ/KJBannerViewDemo #import "KJBannerViewCell.h" #import "KJBannerModel.h" NS_ASSUME_NONNULL_BEGIN @interface KJCollectionViewCell : KJBannerViewCell @property (nonatomic, strong) NSString *title; @end NS_ASSUME_NONNULL_END
193
480
<reponame>weicao/galaxysql<gh_stars>100-1000 /* * Copyright [2013-2021], Alibaba Group Holding Limited * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.alibaba.polardbx.executor.ddl.newengine.cross; import com.alibaba.polardbx.common.ddl.newengine.DdlState; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import org.apache.calcite.rel.RelNode; public class DropPhyObjectRecorder extends GenericPhyObjectRecorder { public DropPhyObjectRecorder(RelNode physicalPlan, ExecutionContext executionContext) { super(physicalPlan, executionContext); } @Override protected boolean checkIfPhyObjectDone() { boolean phyObjectDone = super.checkIfPhyObjectDone(); return ddlContext.getState() == DdlState.ROLLBACK_RUNNING ? !phyObjectDone : phyObjectDone; } }
417
530
/* * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * The contents of this file are subject to the terms of either the Universal Permissive License * v 1.0 as shown at http://oss.oracle.com/licenses/upl * * or the following license: * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of conditions * and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials provided with * the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors may be used to * endorse or promote products derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package org.openjdk.jmc.flightrecorder.controlpanel.ui.configuration.model.gui; import java.util.ArrayList; import java.util.List; import java.util.function.BiConsumer; import org.eclipse.jface.viewers.ArrayContentProvider; import org.eclipse.jface.viewers.ComboViewer; import org.eclipse.jface.viewers.ISelectionChangedListener; import org.eclipse.jface.viewers.IStructuredSelection; import org.eclipse.jface.viewers.LabelProvider; import org.eclipse.jface.viewers.SelectionChangedEvent; import org.eclipse.jface.viewers.StructuredSelection; import org.eclipse.swt.SWT; import org.eclipse.swt.layout.GridData; import org.eclipse.swt.widgets.Composite; import org.eclipse.swt.widgets.Label; import org.eclipse.ui.forms.widgets.FormToolkit; import org.openjdk.jmc.flightrecorder.controlpanel.ui.configuration.model.xml.JFCGrammar; import org.openjdk.jmc.flightrecorder.controlpanel.ui.configuration.model.xml.XMLModel; import org.openjdk.jmc.flightrecorder.controlpanel.ui.configuration.model.xml.XMLTagInstance; final class SelectionNode extends WidgetNode { private final List<XMLTagInstance> m_optionElements = new ArrayList<>(); private ComboViewer m_viewer; private static class ComboLabelProvider extends LabelProvider { @Override public String getText(Object element) { XMLTagInstance optionElement = (XMLTagInstance) element; return optionElement.getValue(JFCGrammar.ATTRIBUTE_LABEL_MANDATORY); } } public SelectionNode(XMLModel model, XMLTagInstance selectionElement) { super(model, selectionElement); } private String getDefaultIdentifier() { return getInputElement().getValue(JFCGrammar.ATTRIBUTE_DEFAULT); } public void addItem(XMLTagInstance optionElement) { m_optionElements.add(optionElement); } @Override public void create( FormToolkit toolkit, Composite parent, int horisontalSpan, BiConsumer<Object, String> errorTracker) { Label label = toolkit.createLabel(parent, getLabel() + ':'); adaptLabel(label); m_viewer = createViewer(parent, horisontalSpan); setViewerSelection(); } @Override public void create(Composite parent, int horisontalSpan, BiConsumer<Object, String> errorTracker) { Label label = new Label(parent, SWT.NONE); label.setText(getLabel() + ':'); adaptLabel(label); m_viewer = createViewer(parent, horisontalSpan); setViewerSelection(); } private void setViewerSelection() { XMLTagInstance selected = getSelected(); if (selected != null) { m_viewer.setSelection(new StructuredSelection(selected)); } } private void adaptLabel(Label label) { GridData gd1 = new GridData(SWT.FILL, SWT.CENTER, false, false); label.setLayoutData(gd1); label.setToolTipText(getDescription()); } private ComboViewer createViewer(Composite parent, int horisontalSpan) { ComboViewer viewer = new ComboViewer(parent); viewer.setContentProvider(new ArrayContentProvider()); viewer.getControl().setToolTipText(getDescription()); viewer.setLabelProvider(new ComboLabelProvider()); viewer.addSelectionChangedListener(new ISelectionChangedListener() { @Override public void selectionChanged(SelectionChangedEvent event) { IStructuredSelection ss = ((IStructuredSelection) event.getSelection()); select((XMLTagInstance) ss.getFirstElement()); } }); viewer.setInput(m_optionElements); GridData gd2 = new GridData(SWT.FILL, SWT.FILL, true, false); gd2.horizontalSpan = horisontalSpan - 1; viewer.getControl().setLayoutData(gd2); return viewer; } private XMLTagInstance getSelected() { for (XMLTagInstance optionElement : m_optionElements) { if (getDefaultIdentifier().equalsIgnoreCase(optionElement.getValue(JFCGrammar.ATTRIBUTE_NAME))) { return optionElement; } } return m_optionElements.size() > 0 ? m_optionElements.get(0) : null; } @Override Value getValue() { String valueId = getInputElement().getValue(JFCGrammar.ATTRIBUTE_DEFAULT); String value = null; // FIXME: Why not use m_optionElements? Otherwise, why keep it? for (XMLTagInstance optionElement : getInputElement().getTagsInstances()) { if (value == null) { value = optionElement.getContent(); } if (valueId.equalsIgnoreCase(optionElement.getValue(JFCGrammar.ATTRIBUTE_NAME))) { value = optionElement.getContent(); } } if (value == null) { value = ""; //$NON-NLS-1$ } return Value.valueOf(value); } private void select(XMLTagInstance optionElement) { if (optionElement != null) { m_viewer.getControl().setToolTipText(optionElement.getValue(JFCGrammar.ATTRIBUTE_DESCRIPTION)); String currentIdentifier = getDefaultIdentifier(); String newIdentifier = optionElement.getValue(JFCGrammar.ATTRIBUTE_NAME); if (!currentIdentifier.equalsIgnoreCase(newIdentifier)) { getInputElement().setValue(JFCGrammar.ATTRIBUTE_DEFAULT, newIdentifier); fireChange(); markDirty(); } } } }
2,219
601
<filename>austin-web/src/main/java/com/java3y/austin/web/controller/RefreshTokenController.java package com.java3y.austin.web.controller; import com.java3y.austin.common.enums.ChannelType; import com.java3y.austin.common.vo.BasicResultVO; import com.java3y.austin.cron.handler.RefreshDingDingAccessTokenHandler; import com.java3y.austin.cron.handler.RefreshGeTuiAccessTokenHandler; import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.RestController; /** * @Author 3y */ @Api(tags = {"手动刷新token的接口"}) @RestController public class RefreshTokenController { @Autowired private RefreshDingDingAccessTokenHandler refreshDingDingAccessTokenHandler; @Autowired private RefreshGeTuiAccessTokenHandler refreshGeTuiAccessTokenHandler; /** * 按照不同的渠道刷新对应的Token,channelType取值来源com.java3y.austin.common.enums.ChannelType * @param channelType * @return */ @ApiOperation(value = "手动刷新token", notes = "钉钉/个推 token刷新") @GetMapping("/refresh") public BasicResultVO refresh(Integer channelType) { if (ChannelType.PUSH.getCode().equals(channelType)) { refreshGeTuiAccessTokenHandler.execute(); } if (ChannelType.DING_DING_WORK_NOTICE.getCode().equals(channelType)) { refreshDingDingAccessTokenHandler.execute(); } return BasicResultVO.success("刷新成功"); } }
651
343
<filename>testing/demo.sikuli/s01.py # ----------------------------------------------------------------------------- # Copyright 2020 White Magic Software, Ltd. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # This script introduces the editor and its purpose. # ----------------------------------------------------------------------------- from sikuli import * import sys if not "../editor.sikuli" in sys.path: sys.path.append( "../editor.sikuli" ) from editor import * # --------------------------------------------------------------- # Fresh start # --------------------------------------------------------------- rm( app_home + "/variables.yaml" ) rm( app_home + "/untitled.md" ) rm( dir_home + "/.scrivenvar" ) # --------------------------------------------------------------- # Wait for application to launch # --------------------------------------------------------------- openApp( "java -jar " + app_bin ) wait("1594187265140.png", 30) # Breathing room for video recording. wait( 4 ) # --------------------------------------------------------------- # Introduction # --------------------------------------------------------------- set_typing_speed( 240 ) heading( "What is this application?" ) typer( "Well, this application is a text editor that supports interpolated definitions, ") typer( "a few different text formats, real-time preview, spell check ") typer( "as you tipe" ) wait( 0.5 ) recur( 3, backspace ) typer( "ype, and R statements." ) paragraph() wait( 1 ) # --------------------------------------------------------------- # Definition demo # --------------------------------------------------------------- heading( "What are definitions?" ) typer( "Watch. " ) wait( .5 ) # Focus the definition editor. click_create() recur( 4, tab ) wait( .5 ) rename_definition( "application" ) insert() rename_definition( "title" ) insert() rename_definition( "Scrivenvar" ) # Set focus to the text editor. tab() typer( "The left-hand pane contains a nested, folder-like structure of names " ) typer( "and values that are called *definitions*. " ) wait( .5 ) typer( "Such definitions can simplify updating documents. " ) wait( 1 ) edit_find( "this application" ) typer( "$application.title$" ) edit_find_next() typer( "$application.title$" ) type( Key.END, Key.CTRL ) typer( "The right-hand pane shows the result after having substituted definition " ) typer( "values into the document." ) paragraph() typer( "Now nobody wants to type definition names all the time. Instead, type any " ) typer( "partial definition value followed by `Ctrl+Space`, such as: scr" ) wait( 0.5 ) autoinsert() wait( 1 ) typer( ". *Much* better!" ) paragraph() heading( "What is interpolation?" ) typer( "Definition values can reference definition names. " ) wait( .5 ) typer( "The definition names act as placeholders. Substituting placeholders with " ) typer( "their definition value is called *interpolation*. Let's see how it works." ) wait( 2 )
1,054
1,444
package mage.cards.z; import java.util.UUID; import mage.MageInt; import mage.abilities.keyword.FlyingAbility; import mage.abilities.keyword.ShroudAbility; import mage.cards.CardImpl; import mage.cards.CardSetInfo; import mage.constants.CardType; import mage.constants.SubType; /** * * @author Backfir3 */ public final class Zephid extends CardImpl { public Zephid(UUID ownerId, CardSetInfo setInfo) { super(ownerId,setInfo,new CardType[]{CardType.CREATURE},"{4}{U}{U}"); this.subtype.add(SubType.ILLUSION); this.power = new MageInt(3); this.toughness = new MageInt(4); this.addAbility(FlyingAbility.getInstance()); this.addAbility(ShroudAbility.getInstance()); } private Zephid(final Zephid card) { super(card); } @Override public Zephid copy() { return new Zephid(this); } }
351
589
package rocks.inspectit.server.instrumentation.config.job; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItems; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.mockito.Matchers.eq; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.mockito.Mockito.verifyZeroInteractions; import static org.mockito.Mockito.when; import java.rmi.RemoteException; import java.util.Collection; import java.util.Collections; import org.hamcrest.Matcher; import org.mockito.ArgumentCaptor; import org.mockito.InjectMocks; import org.mockito.Matchers; import org.mockito.Mock; import org.slf4j.Logger; import org.springframework.context.ApplicationEventPublisher; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; import com.google.common.collect.ImmutableList; import rocks.inspectit.server.ci.event.ClassInstrumentationChangedEvent; import rocks.inspectit.server.ci.event.ProfileUpdateEvent; import rocks.inspectit.server.instrumentation.classcache.ClassCache; import rocks.inspectit.server.instrumentation.classcache.ClassCacheInstrumentation; import rocks.inspectit.server.instrumentation.config.AgentCacheEntry; import rocks.inspectit.server.instrumentation.config.ClassCacheSearchNarrower; import rocks.inspectit.server.instrumentation.config.ConfigurationHolder; import rocks.inspectit.server.instrumentation.config.ConfigurationResolver; import rocks.inspectit.server.instrumentation.config.applier.IInstrumentationApplier; import rocks.inspectit.shared.all.instrumentation.classcache.ClassType; import rocks.inspectit.shared.all.instrumentation.classcache.ImmutableClassType; import rocks.inspectit.shared.all.instrumentation.config.impl.AgentConfig; import rocks.inspectit.shared.all.instrumentation.config.impl.InstrumentationDefinition; import rocks.inspectit.shared.all.testbase.TestBase; import rocks.inspectit.shared.cs.ci.Environment; import rocks.inspectit.shared.cs.ci.assignment.AbstractClassSensorAssignment; @SuppressWarnings({ "all", "unchecked" }) public class ProfileUpdateJobTest extends TestBase { @InjectMocks protected ProfileUpdateJob job; @Mock protected Logger log; @Mock protected ClassCacheSearchNarrower classCacheSearchNarrower; @Mock protected AgentCacheEntry agentCacheEntry; @Mock protected ClassCache classCache; @Mock protected ConfigurationHolder configurationHolder; @Mock protected AgentConfig agentConfiguration; @Mock protected Environment environment; @Mock protected ConfigurationResolver configurationResolver; @Mock protected ClassCacheInstrumentation instrumentationService; @Mock protected AbstractClassSensorAssignment<?> sensorAssignment; @Mock protected IInstrumentationApplier holdedInstrumentationApplier; @Mock protected IInstrumentationApplier instrumentationApplier; @Mock protected ClassType classTypeOne; @Mock protected ClassType classTypeTwo; @Mock protected ImmutableClassType immutableClassTypeOne; @Mock protected ImmutableClassType immutableClassTypeTwo; @Mock protected ProfileUpdateEvent event; @Mock protected ApplicationEventPublisher eventPublisher; @BeforeMethod public void setup() throws Exception { when(configurationHolder.getAgentConfiguration()).thenReturn(agentConfiguration); when(configurationHolder.getEnvironment()).thenReturn(environment); when(configurationHolder.getInstrumentationAppliers()).thenReturn(Collections.singletonList(holdedInstrumentationApplier)); when(agentCacheEntry.getConfigurationHolder()).thenReturn(configurationHolder); when(agentCacheEntry.getClassCache()).thenReturn(classCache); when(agentCacheEntry.getId()).thenReturn(10L); when(classCache.getInstrumentationService()).thenReturn(instrumentationService); when(classTypeOne.isClass()).thenReturn(true); when(classTypeTwo.isClass()).thenReturn(true); when(classTypeOne.castToClass()).thenReturn(immutableClassTypeOne); when(classTypeTwo.castToClass()).thenReturn(immutableClassTypeTwo); when(immutableClassTypeOne.hasInstrumentationPoints()).thenReturn(true); when(immutableClassTypeOne.hasInstrumentationPoints()).thenReturn(false); when(classTypeOne.getFQN()).thenReturn("fqnOne"); when(classTypeTwo.getFQN()).thenReturn("fqnTwo"); } public class Run extends ProfileUpdateJobTest { @Test public void noChanges() { job.setProfileUpdateEvent(event); job.run(); verifyZeroInteractions(classCache, environment, classCacheSearchNarrower, agentConfiguration, instrumentationService, eventPublisher); } @Test public void addedAssignment() throws RemoteException { Collection<ClassType> types = ImmutableList.of(classTypeOne, classTypeTwo); doReturn(instrumentationApplier).when(configurationResolver).getInstrumentationApplier(sensorAssignment, environment); doReturn(types).when(classCacheSearchNarrower).narrowByClassSensorAssignment(classCache, sensorAssignment); doReturn(types).when(instrumentationService).addInstrumentationPoints(eq(types), eq(agentConfiguration), Matchers.<Collection<IInstrumentationApplier>> any()); doReturn(Collections.singleton(sensorAssignment)).when(event).getAddedSensorAssignments(); job.setProfileUpdateEvent(event); job.run(); ArgumentCaptor<Collection> captor = ArgumentCaptor.forClass(Collection.class); verify(instrumentationService, times(1)).addInstrumentationPoints(eq(types), eq(agentConfiguration), captor.capture()); assertThat((Collection<IInstrumentationApplier>) captor.getValue(), hasSize(1)); assertThat(((Collection<IInstrumentationApplier>) captor.getValue()).iterator().next(), is(instrumentationApplier)); ArgumentCaptor<Collection> typeCaptor = ArgumentCaptor.forClass(Collection.class); verify(instrumentationService).getInstrumentationResults(typeCaptor.capture()); assertThat((Collection<ClassType>) typeCaptor.getValue(), hasItems(classTypeOne, classTypeTwo)); ArgumentCaptor<ClassInstrumentationChangedEvent> eventCaptor = ArgumentCaptor.forClass(ClassInstrumentationChangedEvent.class); verify(eventPublisher).publishEvent(eventCaptor.capture()); assertThat(eventCaptor.getValue().getAgentId(), is(equalTo(10L))); Matcher<InstrumentationDefinition> matcherOne = org.hamcrest.Matchers.<InstrumentationDefinition> hasProperty("className", equalTo("fqnOne")); Matcher<InstrumentationDefinition> matcherTwo = org.hamcrest.Matchers.<InstrumentationDefinition> hasProperty("className", equalTo("fqnTwo")); assertThat(eventCaptor.getValue().getInstrumentationDefinitions(), hasItems(matcherOne, matcherTwo)); verifyNoMoreInteractions(instrumentationService, eventPublisher); verifyZeroInteractions(environment); } @Test public void removedAssignment() throws RemoteException { Collection<ClassType> types = ImmutableList.of(classTypeOne, classTypeTwo); doReturn(instrumentationApplier).when(configurationResolver).getInstrumentationApplier(sensorAssignment, environment); doReturn(types).when(classCacheSearchNarrower).narrowByClassSensorAssignment(classCache, sensorAssignment); doReturn(types).when(instrumentationService).removeInstrumentationPoints(eq(types), Matchers.<Collection<IInstrumentationApplier>> any()); doReturn(Collections.singleton(sensorAssignment)).when(event).getRemovedSensorAssignments(); job.setProfileUpdateEvent(event); job.run(); ArgumentCaptor<Collection> captor = ArgumentCaptor.forClass(Collection.class); verify(instrumentationService, times(1)).removeInstrumentationPoints(eq(types), captor.capture()); assertThat((Collection<IInstrumentationApplier>) captor.getValue(), hasSize(1)); assertThat(((Collection<IInstrumentationApplier>) captor.getValue()).iterator().next(), is(instrumentationApplier)); ArgumentCaptor<Collection> typeCaptor = ArgumentCaptor.forClass(Collection.class); verify(instrumentationService).getInstrumentationResults(typeCaptor.capture()); assertThat((Collection<ClassType>) typeCaptor.getValue(), hasItems(classTypeOne, classTypeTwo)); Collection<IInstrumentationApplier> appliers = configurationHolder.getInstrumentationAppliers(); verify(instrumentationService, times(1)).addInstrumentationPoints(captor.capture(), eq(agentConfiguration), eq(appliers)); assertThat((Collection<ClassType>) captor.getValue(), hasSize(2)); assertThat(((Collection<ClassType>) captor.getValue()).iterator().next(), is(classTypeOne)); ArgumentCaptor<ClassInstrumentationChangedEvent> eventCaptor = ArgumentCaptor.forClass(ClassInstrumentationChangedEvent.class); verify(eventPublisher).publishEvent(eventCaptor.capture()); assertThat(eventCaptor.getValue().getAgentId(), is(equalTo(10L))); Matcher<InstrumentationDefinition> matcherOne = org.hamcrest.Matchers.<InstrumentationDefinition> hasProperty("className", equalTo("fqnOne")); Matcher<InstrumentationDefinition> matcherTwo = org.hamcrest.Matchers.<InstrumentationDefinition> hasProperty("className", equalTo("fqnTwo")); assertThat(eventCaptor.getValue().getInstrumentationDefinitions(), hasItems(matcherOne, matcherTwo)); verifyNoMoreInteractions(instrumentationService, eventPublisher); verifyZeroInteractions(environment); } @Test public void removedAssignmentNoChange() throws RemoteException { Collection<ClassType> types = ImmutableList.of(classTypeOne, classTypeTwo); doReturn(instrumentationApplier).when(configurationResolver).getInstrumentationApplier(sensorAssignment, environment); doReturn(types).when(classCacheSearchNarrower).narrowByClassSensorAssignment(classCache, sensorAssignment); doReturn(Collections.emptyList()).when(instrumentationService).removeInstrumentationPoints(eq(types), Matchers.<Collection<IInstrumentationApplier>> any()); doReturn(Collections.singleton(sensorAssignment)).when(event).getRemovedSensorAssignments(); job.setProfileUpdateEvent(event); job.run(); ArgumentCaptor<Collection> captor = ArgumentCaptor.forClass(Collection.class); verify(instrumentationService, times(1)).removeInstrumentationPoints(eq(types), captor.capture()); assertThat((Collection<IInstrumentationApplier>) captor.getValue(), hasSize(1)); assertThat(((Collection<IInstrumentationApplier>) captor.getValue()).iterator().next(), is(instrumentationApplier)); verifyNoMoreInteractions(instrumentationService); verifyZeroInteractions(environment, eventPublisher); } } }
3,245
938
<reponame>brickviking/TinkersConstruct package slimeknights.tconstruct.library.materials.stats; /** * Material stats that support repairing, requires durability as part of the stats */ public interface IRepairableMaterialStats extends IMaterialStats { /** * Gets the amount of durability for this stat type * @return Durability */ int getDurability(); }
103
1,056
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.netbeans.modules.maven.model.pom; import java.util.List; import javax.xml.namespace.QName; import org.netbeans.modules.xml.xam.dom.DocumentComponent2; /** * Interface for all the components in the model. * * @author mkleint */ public interface POMComponent extends DocumentComponent2<POMComponent> { public static final String EXTENSIBILITY_ELEMENT_PROPERTY = "extensibilityElement"; // NOI18N /** * Get the owner model of this component. * * @return the owner model */ @Override POMModel getModel(); void accept(POMComponentVisitor visitor); /** * Adds a child extensibility element. * * @param ee a new child extensibility element */ void addExtensibilityElement(POMExtensibilityElement ee); /** * Removes an existing child extensibility element. * * @param ee an existing child extensibility element */ void removeExtensibilityElement(POMExtensibilityElement ee); /** * Gets a list of all child extensibility elements. * * @return a list of all child extensibility elements */ List<POMExtensibilityElement> getExtensibilityElements(); /** * Gets a list of child extensibility elements of the given type. * * @param type type of child extensibility elements * @return a list of child extensibility elements of the given type */ <T extends POMExtensibilityElement> List<T> getExtensibilityElements(Class<T> type); String getChildElementText(QName qname); void setChildElementText(String propertyName, String text, QName qname); /** * find the location in document for the given simple child element * * @param qname * @return position in document or -1 if not present. */ int findChildElementPosition(QName qname); }
893
608
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ package com.facebook.battery.metrics.network; import static android.net.TrafficStats.UNSUPPORTED; import static com.facebook.battery.metrics.network.NetworkBytesCollector.MOBILE; import static com.facebook.battery.metrics.network.NetworkBytesCollector.RX; import static com.facebook.battery.metrics.network.NetworkBytesCollector.TX; import static com.facebook.battery.metrics.network.NetworkBytesCollector.WIFI; import static org.assertj.core.api.Java6Assertions.assertThat; import android.content.Context; import android.net.ConnectivityManager; import android.net.NetworkInfo; import org.junit.Test; import org.junit.runner.RunWith; import org.robolectric.RobolectricTestRunner; import org.robolectric.RuntimeEnvironment; import org.robolectric.Shadows; import org.robolectric.annotation.Config; import org.robolectric.shadows.ShadowConnectivityManager; import org.robolectric.shadows.ShadowNetworkInfo; @RunWith(RobolectricTestRunner.class) @Config(shadows = {ShadowTrafficStats.class}) public class TrafficStatsNetworkBytesCollectorTest { private final long[] mBytes = new long[8]; @Test public void testEmpty() throws Exception { TrafficStatsNetworkBytesCollector collector = new TrafficStatsNetworkBytesCollector(RuntimeEnvironment.application); collector.getTotalBytes(mBytes); assertThat(mBytes).isEqualTo(new long[8]); } @Test public void testInitialValues() throws Exception { ShadowTrafficStats.setUidRxBytes(10000); ShadowTrafficStats.setUidTxBytes(20000); TrafficStatsNetworkBytesCollector collector = new TrafficStatsNetworkBytesCollector(RuntimeEnvironment.application); assertThat(collector.getTotalBytes(mBytes)).isTrue(); assertThat(mBytes).isEqualTo(new long[] {0, 0, 10000, 20000, 0, 0, 0, 0}); } @Test public void testUnsupportedValues() throws Exception { ShadowTrafficStats.setUidRxBytes(UNSUPPORTED); ShadowTrafficStats.setUidTxBytes(UNSUPPORTED); TrafficStatsNetworkBytesCollector collector = new TrafficStatsNetworkBytesCollector(RuntimeEnvironment.application); assertThat(collector.getTotalBytes(mBytes)).isFalse(); } @Test public void testBroadcastNetworkChanges() throws Exception { ShadowTrafficStats.setUidRxBytes(10000); ShadowTrafficStats.setUidTxBytes(20000); TrafficStatsNetworkBytesCollector collector = new TrafficStatsNetworkBytesCollector(RuntimeEnvironment.application); assertThat(collector.getTotalBytes(mBytes)).isTrue(); ShadowTrafficStats.setUidRxBytes(11000); ShadowTrafficStats.setUidTxBytes(22000); ConnectivityManager connectivityManager = (ConnectivityManager) RuntimeEnvironment.application.getSystemService(Context.CONNECTIVITY_SERVICE); ShadowConnectivityManager shadowConnectivityManager = Shadows.shadowOf(connectivityManager); NetworkInfo networkInfo = ShadowNetworkInfo.newInstance(null, ConnectivityManager.TYPE_WIFI, 0, true, true); shadowConnectivityManager.setActiveNetworkInfo(networkInfo); collector.mReceiver.onReceive(null, null); ShadowTrafficStats.setUidRxBytes(11100); ShadowTrafficStats.setUidTxBytes(22200); assertThat(collector.getTotalBytes(mBytes)).isTrue(); assertThat(mBytes[RX | MOBILE]).isEqualTo(11000); assertThat(mBytes[TX | MOBILE]).isEqualTo(22000); assertThat(mBytes[RX | WIFI]).isEqualTo(100); assertThat(mBytes[TX | WIFI]).isEqualTo(200); } }
1,178
522
// Copyright 2017 The TensorFlow Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // ============================================================================= #include "tensorflow/contrib/tensor_forest/kernels/v4/candidate_graph_runner.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/platform/env.h" namespace tensorflow { namespace tensorforest { // Names of ops in the graph to run. constexpr char kInitializeOp[] = "init"; constexpr char kAddExampleOp[] = "add_example"; constexpr char kSplitScoreName[] = "split_score"; constexpr char kGetSplitName[] = "get_split"; constexpr char kGetLeftStatsName[] = "get_left_stats"; constexpr char kGetRightStatsName[] = "get_right_stats"; // Names of files written by python graph builder. constexpr char kGraphFilename[] = "graph"; constexpr char kSaverDefFilename[] = "saver"; constexpr char kMetaDefFilename[] = "meta"; // Names of Tensor inputs. constexpr char kFeaturesName[] = "features"; constexpr char kInputDataName[] = "input_data"; constexpr char kTargetsName[] = "targets"; constexpr char kExamplesName[] = "examples"; constexpr char kNoOp[] = "none"; CandidateGraphRunner::CandidateGraphRunner( const string& graph_dir, const decision_trees::BinaryNode& split) : split_(split) { // read graph from file. GraphDef graph_def; TF_CHECK_OK(ReadBinaryProto( Env::Default(), io::JoinPath(graph_dir, kGraphFilename), &graph_def)) << "Could not read graph def."; // create session. session_.reset(::tensorflow::NewSession(SessionOptions())); TF_CHECK_OK(session_->Create(graph_def)) << "Failed to create session"; // Features don't change, store them in a tensor. const auto& oblique = split.inequality_left_child_test().oblique(); const int32 feat_size = oblique.features_size(); features_.reset( new Tensor(tensorflow::DT_INT32, TensorShape({feat_size}))); auto feat = features_->flat<int32>(); int i = 0; for (const auto& id : oblique.features()) { safe_strto32(id.id().value(), &feat(i++)); } } void CandidateGraphRunner::RunOp( const string& name, const TensorNameValueList& inputs, const std::vector<string>& output_tensor_names, std::vector<Tensor>* outputs) { std::vector<string> op_name; if (name != kNoOp) { op_name.push_back(name); } TF_CHECK_OK(session_->Run(inputs, output_tensor_names, op_name, outputs)) << "Failed to run: " << name; } void CandidateGraphRunner::Init() { RunOp(kInitializeOp, TensorNameValueList(), std::vector<string>(), nullptr); } void CandidateGraphRunner::AddExample(const Tensor& input_data, const Tensor& target, const Tensor& examples) { TensorNameValueList inputs; inputs.emplace_back(kFeaturesName, *features_); inputs.emplace_back(kExamplesName, examples); inputs.emplace_back(kInputDataName, input_data); inputs.emplace_back(kTargetsName, target); RunOp(kAddExampleOp, inputs, std::vector<string>(), nullptr); } float CandidateGraphRunner::SplitScore() { std::vector<Tensor> outputs; RunOp(kNoOp, TensorNameValueList(), {kSplitScoreName}, &outputs); return outputs[0].unaligned_flat<float>()(0); } void CandidateGraphRunner::GetSplit(decision_trees::BinaryNode* node) { std::vector<Tensor> outputs; RunOp(kNoOp, TensorNameValueList(), {kGetSplitName}, &outputs); ParseProtoUnlimited(node, outputs[0].unaligned_flat<string>()(0)); const auto& oblique = split_.inequality_left_child_test().oblique(); auto* new_split = node->mutable_inequality_left_child_test()->mutable_oblique(); for (const auto& id : oblique.features()) { *new_split->add_features() = id; } } void CandidateGraphRunner::GetLeftStats(LeafStat* stats) { std::vector<Tensor> outputs; RunOp(kNoOp, TensorNameValueList(), {kGetLeftStatsName}, &outputs); const auto& counts = outputs[0].unaligned_flat<float>(); auto* dense = stats->mutable_classification()->mutable_dense_counts(); for (int i = 0; i < counts.size(); ++i) { dense->add_value()->set_float_value(counts(i)); } } void CandidateGraphRunner::GetRightStats(LeafStat* stats) { std::vector<Tensor> outputs; RunOp(kNoOp, TensorNameValueList(), {kGetRightStatsName}, &outputs); const auto& counts = outputs[0].unaligned_flat<float>(); auto* dense = stats->mutable_classification()->mutable_dense_counts(); for (int i = 0; i < counts.size(); ++i) { dense->add_value()->set_float_value(counts(i)); } } } // namespace tensorforest } // namespace tensorflow
1,759
1,521
<gh_stars>1000+ /** * Copyright 2020 Alibaba Group Holding Limited. * * <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file * except in compliance with the License. You may obtain a copy of the License at * * <p>http://www.apache.org/licenses/LICENSE-2.0 * * <p>Unless required by applicable law or agreed to in writing, software distributed under the * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.alibaba.maxgraph.servers; import com.alibaba.maxgraph.common.config.CommonConfig; import com.alibaba.maxgraph.common.config.Configs; import com.alibaba.maxgraph.common.config.KafkaConfig; import com.alibaba.maxgraph.common.config.ZkConfig; import com.alibaba.maxgraph.common.RoleType; import com.alibaba.maxgraph.compiler.api.exception.MaxGraphException; import com.google.common.annotations.VisibleForTesting; import com.salesforce.kafka.test.KafkaTestCluster; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Properties; public class MaxNode extends NodeBase { private static final Logger logger = LoggerFactory.getLogger(MaxNode.class); private KafkaTestCluster kafkaTestCluster; private NodeBase coordinator; private List<NodeBase> frontends = new ArrayList<>(); private List<NodeBase> ingestors = new ArrayList<>(); private List<NodeBase> stores = new ArrayList<>(); public MaxNode(Configs configs) throws Exception { Properties kafkaConfigs = new Properties(); kafkaConfigs.put("max.request.size", 10000000); this.kafkaTestCluster = new KafkaTestCluster(1, kafkaConfigs); this.kafkaTestCluster.start(); int frontendCount = 1; int ingestorCount = 2; int storeCount = CommonConfig.STORE_NODE_COUNT.get(configs); Configs baseConfigs = Configs.newBuilder(configs) .put( ZkConfig.ZK_CONNECT_STRING.getKey(), this.kafkaTestCluster.getZookeeperConnectString()) .put( KafkaConfig.KAFKA_SERVERS.getKey(), this.kafkaTestCluster.getKafkaConnectString()) .put( CommonConfig.INGESTOR_NODE_COUNT.getKey(), String.valueOf(ingestorCount)) .put( CommonConfig.INGESTOR_QUEUE_COUNT.getKey(), String.valueOf(ingestorCount)) .put( String.format( CommonConfig.NODE_COUNT_FORMAT, RoleType.EXECUTOR_ENGINE.getName()), String.valueOf(storeCount)) .put( String.format( CommonConfig.NODE_COUNT_FORMAT, RoleType.EXECUTOR_GRAPH.getName()), String.valueOf(storeCount)) .put( String.format( CommonConfig.NODE_COUNT_FORMAT, RoleType.EXECUTOR_MANAGE.getName()), String.valueOf(storeCount)) .put( String.format( CommonConfig.NODE_COUNT_FORMAT, RoleType.EXECUTOR_QUERY.getName()), String.valueOf(storeCount)) .put( String.format( CommonConfig.NODE_COUNT_FORMAT, RoleType.GAIA_RPC.getName()), String.valueOf(storeCount)) .put( String.format( CommonConfig.NODE_COUNT_FORMAT, RoleType.GAIA_ENGINE.getName()), String.valueOf(storeCount)) .put( CommonConfig.FRONTEND_NODE_COUNT.getKey(), String.valueOf(frontendCount)) .build(); Configs coordinatorConfigs = Configs.newBuilder(baseConfigs) .put(CommonConfig.ROLE_NAME.getKey(), RoleType.COORDINATOR.getName()) .put(CommonConfig.NODE_IDX.getKey(), "0") .build(); this.coordinator = new Coordinator(coordinatorConfigs); for (int i = 0; i < frontendCount; i++) { Configs frontendConfigs = Configs.newBuilder(baseConfigs) .put(CommonConfig.ROLE_NAME.getKey(), RoleType.FRONTEND.getName()) .put(CommonConfig.NODE_IDX.getKey(), String.valueOf(i)) .put(CommonConfig.RPC_PORT.getKey(), "55556") .build(); this.frontends.add(new Frontend(frontendConfigs)); } for (int i = 0; i < ingestorCount; i++) { Configs ingestConfigs = Configs.newBuilder(baseConfigs) .put(CommonConfig.ROLE_NAME.getKey(), RoleType.INGESTOR.getName()) .put(CommonConfig.NODE_IDX.getKey(), String.valueOf(i)) .build(); this.ingestors.add(new Ingestor(ingestConfigs)); } for (int i = 0; i < storeCount; i++) { Configs storeConfigs = Configs.newBuilder(baseConfigs) .put(CommonConfig.ROLE_NAME.getKey(), RoleType.STORE.getName()) .put(CommonConfig.NODE_IDX.getKey(), String.valueOf(i)) .build(); this.stores.add(new Store(storeConfigs)); } } public void start() { List<Thread> startThreads = new ArrayList<>(); for (NodeBase store : this.stores) { startThreads.add( new Thread( () -> { store.start(); logger.info("[" + store.getName() + "] started"); })); } for (NodeBase frontend : this.frontends) { startThreads.add( new Thread( () -> { frontend.start(); logger.info("[" + frontend.getName() + "] started"); })); } for (NodeBase ingestor : this.ingestors) { startThreads.add( new Thread( () -> { ingestor.start(); logger.info("[" + ingestor.getName() + "] started"); })); } startThreads.add( new Thread( () -> { this.coordinator.start(); logger.info("[" + this.coordinator.getName() + "] started"); })); for (Thread startThread : startThreads) { startThread.start(); } for (Thread startThread : startThreads) { try { startThread.join(); } catch (InterruptedException e) { throw new MaxGraphException(e); } } logger.info("maxnode started"); } @Override public void close() throws IOException { for (NodeBase ingestor : this.ingestors) { ingestor.close(); } for (NodeBase frontend : this.frontends) { frontend.close(); } for (NodeBase store : this.stores) { store.close(); } this.coordinator.close(); try { this.kafkaTestCluster.close(); } catch (Exception e) { logger.warn("close kafka failed", e); } } public static void main(String[] args) throws Exception { String configFile = System.getProperty("config.file"); Configs conf = new Configs(configFile); MaxNode maxNode = new MaxNode(conf); NodeLauncher nodeLauncher = new NodeLauncher(maxNode); nodeLauncher.start(); } @VisibleForTesting public List<NodeBase> getStores() { return stores; } @VisibleForTesting public List<NodeBase> getFrontends() { return frontends; } }
5,034
1,652
package com.ctrip.xpipe.redis.meta.server.cluster.impl; import com.ctrip.xpipe.api.lifecycle.TopElement; import com.ctrip.xpipe.cluster.AbstractLeaderElector; import com.ctrip.xpipe.redis.core.meta.MetaZkConfig; import com.ctrip.xpipe.redis.meta.server.config.MetaServerConfig; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; /** * @author wenchao.meng * * Jul 21, 2016 */ @Component public class MetaserverLeaderElector extends AbstractLeaderElector implements TopElement{ @Autowired private MetaServerConfig config; @Override protected String getServerId() { return String.valueOf(config.getMetaServerId()); } @Override protected String getLeaderElectPath() { return MetaZkConfig.getMetaServerLeaderElectPath(); } }
261
881
<filename>dev/test/transforms/zlib_body_handler/main.cpp /* restinio */ /*! Echo server. */ #include <catch2/catch.hpp> #include <restinio/all.hpp> #include <restinio/transforms/zlib.hpp> #include <test/common/utest_logger.hpp> #include <test/common/pub.hpp> #include "../random_data_generators.ipp" TEST_CASE( "body_handler" , "[zlib][body_handler]" ) { std::srand( static_cast<unsigned int>(std::time( nullptr )) ); const auto response_body = create_random_text( 128 * 1024, 16 ); using router_t = restinio::router::express_router_t<>; auto router = std::make_unique< router_t >(); namespace rtz = restinio::transforms::zlib; router->http_post( "/", [ & ]( auto req, auto ){ return restinio::transforms::zlib::handle_body( *req, [&]( auto body ){ return req->create_response() .append_header( restinio::http_field::server, "RESTinio" ) .append_header_date_field() .set_body( std::move( body ) ) .done(); } ); } ); using http_server_t = restinio::http_server_t< restinio::traits_t< restinio::asio_timer_manager_t, utest_logger_t, router_t > >; http_server_t http_server{ restinio::own_io_context(), [&]( auto & settings ){ settings .port( utest_default_port() ) .address( "127.0.0.1" ) .request_handler( std::move( router ) ); } }; other_work_thread_for_server_t<http_server_t> other_thread{ http_server }; other_thread.run(); for( int i = 0; i <= 9; ++i ) { { auto compressed_data = rtz::deflate_compress( response_body, i ); const std::string request = fmt::format( "POST / HTTP/1.0\r\n" "From: unit-test\r\n" "User-Agent: unit-test\r\n" "Content-Type: text/plain\r\n" "Content-Encoding: DEFLATE\r\n" "Content-Length: {}\r\n" "Connection: close\r\n" "\r\n" "{}", compressed_data.size(), compressed_data ); std::string response; REQUIRE_NOTHROW( response = do_request( request ) ); REQUIRE_THAT( response, Catch::Matchers::EndsWith( "\r\n\r\n" + response_body ) ); } { auto compressed_data = rtz::gzip_compress( response_body, i ); const std::string request = fmt::format( "POST / HTTP/1.0\r\n" "From: unit-test\r\n" "User-Agent: unit-test\r\n" "Content-Type: text/plain\r\n" "Content-Encoding: GZIP\r\n" "Content-Length: {}\r\n" "Connection: close\r\n" "\r\n" "{}", compressed_data.size(), compressed_data ); std::string response; REQUIRE_NOTHROW( response = do_request( request ) ); REQUIRE_THAT( response, Catch::Matchers::EndsWith( "\r\n\r\n" + response_body ) ); } } other_thread.stop_and_join(); } TEST_CASE( "body_handler void return" , "[zlib][body_handler][void-return]" ) { std::srand( static_cast<unsigned int>(std::time( nullptr )) ); const auto response_body = create_random_text( 1024, 16 ); using router_t = restinio::router::express_router_t<>; auto router = std::make_unique< router_t >(); namespace rtz = restinio::transforms::zlib; router->http_post( "/", [ & ]( const restinio::request_handle_t& req, auto ){ auto resp = req->create_response(); resp.append_header( restinio::http_field::server, "RESTinio" ) .append_header_date_field(); restinio::transforms::zlib::handle_body( *req, [&]( auto body ){ resp.set_body( std::move( body ) ); } ); return resp.done(); } ); using http_server_t = restinio::http_server_t< restinio::traits_t< restinio::asio_timer_manager_t, utest_logger_t, router_t > >; http_server_t http_server{ restinio::own_io_context(), [&]( auto & settings ){ settings .port( utest_default_port() ) .address( "127.0.0.1" ) .request_handler( std::move( router ) ); } }; other_work_thread_for_server_t<http_server_t> other_thread{ http_server }; other_thread.run(); { const std::string request = fmt::format( "POST / HTTP/1.0\r\n" "From: unit-test\r\n" "User-Agent: unit-test\r\n" "Content-Type: text/plain\r\n" "Content-Encoding: IDENTITY\r\n" "Content-Length: {}\r\n" "Connection: close\r\n" "\r\n" "{}", response_body.size(), response_body ); std::string response; REQUIRE_NOTHROW( response = do_request( request ) ); REQUIRE_THAT( response, Catch::Matchers::EndsWith( "\r\n\r\n" + response_body ) ); } other_thread.stop_and_join(); }
2,106
1,550
# Leo colorizer control file for doxygen mode. # This file is in the public domain. # Properties for doxygen mode. properties = { "lineComment": "#", } # Attributes dict for doxygen_main ruleset. doxygen_main_attributes_dict = { "default": "null", "digit_re": "", "escape": "\\", "highlight_digits": "true", "ignore_case": "false", "no_word_sep": "", } # Attributes dict for doxygen_doxygen ruleset. doxygen_doxygen_attributes_dict = { "default": "COMMENT3", "digit_re": "", "escape": "\\", "highlight_digits": "true", "ignore_case": "true", "no_word_sep": "", } # Dictionary of attributes dictionaries for doxygen mode. attributesDictDict = { "doxygen_doxygen": doxygen_doxygen_attributes_dict, "doxygen_main": doxygen_main_attributes_dict, } # Keywords dict for doxygen_main ruleset. doxygen_main_keywords_dict = { "NO": "keyword3", "YES": "keyword2", } # Keywords dict for doxygen_doxygen ruleset. doxygen_doxygen_keywords_dict = { "&": "label", "<": "label", ">": "label", "@": "label", "@#": "label", "@$": "label", "@%": "label", "@@": "label", "@\\": "label", "@a": "label", "@addindex": "label", "@addtogroup": "label", "@anchor": "label", "@arg": "label", "@attention": "label", "@author": "label", "@b": "label", "@brief": "label", "@bug": "label", "@c": "label", "@callgraph": "label", "@category": "label", "@class": "label", "@code": "label", "@copydoc": "label", "@date": "label", "@def": "label", "@defgroup": "label", "@deprecated": "label", "@dontinclude": "label", "@dot": "label", "@dotfile": "label", "@e": "label", "@else": "label", "@elseif": "label", "@em": "label", "@endcode": "label", "@enddot": "label", "@endhtmlonly": "label", "@endif": "label", "@endlatexonly": "label", "@endlink": "label", "@endmanonly": "label", "@endverbatim": "label", "@endxmlonly": "label", "@enum": "label", "@example": "label", "@exception": "label", "@f$": "label", "@f[": "label", "@f]": "label", "@file": "label", "@fn": "label", "@hideinitializer": "label", "@htmlinclude": "label", "@htmlonly": "label", "@if": "label", "@ifnot": "label", "@image": "label", "@include": "label", "@includelineno": "label", "@ingroup": "label", "@interface": "label", "@internal": "label", "@invariant": "label", "@latexonly": "label", "@li": "label", "@line": "label", "@link": "label", "@mainpage": "label", "@manonly": "label", "@n": "label", "@name": "label", "@namespace": "label", "@nosubgrouping": "label", "@note": "label", "@overload": "label", "@p": "label", "@package": "label", "@page": "label", "@par": "label", "@paragraph": "label", "@param": "label", "@param[in,out]": "label", "@param[in]": "label", "@param[out]": "label", "@post": "label", "@pre": "label", "@private": "label", "@privatesection": "label", "@property": "label", "@protected": "label", "@protectedsection": "label", "@protocol": "label", "@public": "label", "@publicsection": "label", "@ref": "label", "@relates": "label", "@relatesalso": "label", "@remarks": "label", "@return": "label", "@retval": "label", "@sa": "label", "@section": "label", "@showinitializer": "label", "@since": "label", "@skip": "label", "@skipline": "label", "@struct": "label", "@subsection": "label", "@subsubsection": "label", "@test": "label", "@throw": "label", "@todo": "label", "@typedef": "label", "@union": "label", "@until": "label", "@var": "label", "@verbatim": "label", "@verbinclude": "label", "@version": "label", "@warning": "label", "@weakgroup": "label", "@xmlonly": "label", "@xrefitem": "label", "@~": "label", "\\": "label", "\\#": "label", "\\$": "label", "\\%": "label", "\\@": "label", "\\\\": "label", "\\a": "label", "\\addindex": "label", "\\addtogroup": "label", "\\anchor": "label", "\\arg": "label", "\\attention": "label", "\\author": "label", "\\b": "label", "\\brief": "label", "\\bug": "label", "\\c": "label", "\\callgraph": "label", "\\category": "label", "\\class": "label", "\\code": "label", "\\copydoc": "label", "\\date": "label", "\\def": "label", "\\defgroup": "label", "\\deprecated": "label", "\\dontinclude": "label", "\\dot": "label", "\\dotfile": "label", "\\e": "label", "\\else": "label", "\\elseif": "label", "\\em": "label", "\\endcode": "label", "\\enddot": "label", "\\endhtmlonly": "label", "\\endif": "label", "\\endlatexonly": "label", "\\endlink": "label", "\\endmanonly": "label", "\\endverbatim": "label", "\\endxmlonly": "label", "\\enum": "label", "\\example": "label", "\\exception": "label", "\\f$": "label", "\\f[": "label", "\\f]": "label", "\\file": "label", "\\fn": "label", "\\hideinitializer": "label", "\\htmlinclude": "label", "\\htmlonly": "label", "\\if": "label", "\\ifnot": "label", "\\image": "label", "\\include": "label", "\\includelineno": "label", "\\ingroup": "label", "\\interface": "label", "\\internal": "label", "\\invariant": "label", "\\latexonly": "label", "\\li": "label", "\\line": "label", "\\link": "label", "\\mainpage": "label", "\\manonly": "label", "\\n": "label", "\\name": "label", "\\namespace": "label", "\\nosubgrouping": "label", "\\note": "label", "\\overload": "label", "\\p": "label", "\\package": "label", "\\page": "label", "\\par": "label", "\\paragraph": "label", "\\param": "label", "\\param[in,out]": "label", "\\param[in]": "label", "\\param[out]": "label", "\\post": "label", "\\pre": "label", "\\private": "label", "\\privatesection": "label", "\\property": "label", "\\protected": "label", "\\protectedsection": "label", "\\protocol": "label", "\\public": "label", "\\publicsection": "label", "\\ref": "label", "\\relates": "label", "\\relatesalso": "label", "\\remarks": "label", "\\return": "label", "\\retval": "label", "\\sa": "label", "\\section": "label", "\\showinitializer": "label", "\\since": "label", "\\skip": "label", "\\skipline": "label", "\\struct": "label", "\\subsection": "label", "\\subsubsection": "label", "\\test": "label", "\\throw": "label", "\\todo": "label", "\\typedef": "label", "\\union": "label", "\\until": "label", "\\var": "label", "\\verbatim": "label", "\\verbinclude": "label", "\\version": "label", "\\warning": "label", "\\weakgroup": "label", "\\xmlonly": "label", "\\xrefitem": "label", "\\~": "label", } # Dictionary of keywords dictionaries for doxygen mode. keywordsDictDict = { "doxygen_doxygen": doxygen_doxygen_keywords_dict, "doxygen_main": doxygen_main_keywords_dict, } # Rules for doxygen_main ruleset. def doxygen_rule0(colorer, s, i): return colorer.match_eol_span(s, i, kind="comment1", seq="#", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="", exclude_match=False) def doxygen_rule1(colorer, s, i): return colorer.match_mark_previous(s, i, kind="keyword1", pattern="=", at_line_start=True, at_whitespace_end=False, at_word_start=False, exclude_match=True) def doxygen_rule2(colorer, s, i): return colorer.match_mark_previous(s, i, kind="keyword1", pattern="+=", at_line_start=True, at_whitespace_end=False, at_word_start=False, exclude_match=True) def doxygen_rule3(colorer, s, i): return colorer.match_span(s, i, kind="literal1", begin="\"", end="\"", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="",exclude_match=False, no_escape=False, no_line_break=True, no_word_break=False) def doxygen_rule4(colorer, s, i): return colorer.match_span(s, i, kind="literal1", begin="'", end="'", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="",exclude_match=False, no_escape=False, no_line_break=True, no_word_break=False) def doxygen_rule5(colorer, s, i): return colorer.match_span(s, i, kind="literal1", begin="`", end="`", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="",exclude_match=False, no_escape=False, no_line_break=True, no_word_break=False) def doxygen_rule6(colorer, s, i): return colorer.match_keywords(s, i) # Rules dict for doxygen_main ruleset. rulesDict1 = { "\"": [doxygen_rule3,], "#": [doxygen_rule0,doxygen_rule6,], "$": [doxygen_rule6,], "%": [doxygen_rule6,], "&": [doxygen_rule6,], "'": [doxygen_rule4,], "+": [doxygen_rule2,], ",": [doxygen_rule6,], "0": [doxygen_rule6,], "1": [doxygen_rule6,], "2": [doxygen_rule6,], "3": [doxygen_rule6,], "4": [doxygen_rule6,], "5": [doxygen_rule6,], "6": [doxygen_rule6,], "7": [doxygen_rule6,], "8": [doxygen_rule6,], "9": [doxygen_rule6,], "<": [doxygen_rule6,], "=": [doxygen_rule1,], ">": [doxygen_rule6,], "@": [doxygen_rule6,], "A": [doxygen_rule6,], "B": [doxygen_rule6,], "C": [doxygen_rule6,], "D": [doxygen_rule6,], "E": [doxygen_rule6,], "F": [doxygen_rule6,], "G": [doxygen_rule6,], "H": [doxygen_rule6,], "I": [doxygen_rule6,], "J": [doxygen_rule6,], "K": [doxygen_rule6,], "L": [doxygen_rule6,], "M": [doxygen_rule6,], "N": [doxygen_rule6,], "O": [doxygen_rule6,], "P": [doxygen_rule6,], "Q": [doxygen_rule6,], "R": [doxygen_rule6,], "S": [doxygen_rule6,], "T": [doxygen_rule6,], "U": [doxygen_rule6,], "V": [doxygen_rule6,], "W": [doxygen_rule6,], "X": [doxygen_rule6,], "Y": [doxygen_rule6,], "Z": [doxygen_rule6,], "[": [doxygen_rule6,], "\\": [doxygen_rule6,], "]": [doxygen_rule6,], "`": [doxygen_rule5,], "a": [doxygen_rule6,], "b": [doxygen_rule6,], "c": [doxygen_rule6,], "d": [doxygen_rule6,], "e": [doxygen_rule6,], "f": [doxygen_rule6,], "g": [doxygen_rule6,], "h": [doxygen_rule6,], "i": [doxygen_rule6,], "j": [doxygen_rule6,], "k": [doxygen_rule6,], "l": [doxygen_rule6,], "m": [doxygen_rule6,], "n": [doxygen_rule6,], "o": [doxygen_rule6,], "p": [doxygen_rule6,], "q": [doxygen_rule6,], "r": [doxygen_rule6,], "s": [doxygen_rule6,], "t": [doxygen_rule6,], "u": [doxygen_rule6,], "v": [doxygen_rule6,], "w": [doxygen_rule6,], "x": [doxygen_rule6,], "y": [doxygen_rule6,], "z": [doxygen_rule6,], "~": [doxygen_rule6,], } # Rules for doxygen_doxygen ruleset. def doxygen_rule7(colorer, s, i): return colorer.match_seq(s, i, kind="comment3", seq="*", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def doxygen_rule8(colorer, s, i): return colorer.match_span(s, i, kind="comment1", begin="<!--", end="-->", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="",exclude_match=False, no_escape=False, no_line_break=False, no_word_break=False) def doxygen_rule9(colorer, s, i): return colorer.match_seq(s, i, kind="comment3", seq="<<", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def doxygen_rule10(colorer, s, i): return colorer.match_seq(s, i, kind="comment3", seq="<=", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def doxygen_rule11(colorer, s, i): return colorer.match_seq(s, i, kind="comment3", seq="< ", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def doxygen_rule12(colorer, s, i): return colorer.match_span(s, i, kind="markup", begin="<", end=">", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="xml::tags",exclude_match=False, no_escape=False, no_line_break=True, no_word_break=False) def doxygen_rule13(colorer, s, i): return colorer.match_keywords(s, i) # Rules dict for doxygen_doxygen ruleset. rulesDict2 = { "#": [doxygen_rule13,], "$": [doxygen_rule13,], "%": [doxygen_rule13,], "&": [doxygen_rule13,], "*": [doxygen_rule7,], ",": [doxygen_rule13,], "0": [doxygen_rule13,], "1": [doxygen_rule13,], "2": [doxygen_rule13,], "3": [doxygen_rule13,], "4": [doxygen_rule13,], "5": [doxygen_rule13,], "6": [doxygen_rule13,], "7": [doxygen_rule13,], "8": [doxygen_rule13,], "9": [doxygen_rule13,], "<": [doxygen_rule8,doxygen_rule9,doxygen_rule10,doxygen_rule11,doxygen_rule12,doxygen_rule13,], ">": [doxygen_rule13,], "@": [doxygen_rule13,], "A": [doxygen_rule13,], "B": [doxygen_rule13,], "C": [doxygen_rule13,], "D": [doxygen_rule13,], "E": [doxygen_rule13,], "F": [doxygen_rule13,], "G": [doxygen_rule13,], "H": [doxygen_rule13,], "I": [doxygen_rule13,], "J": [doxygen_rule13,], "K": [doxygen_rule13,], "L": [doxygen_rule13,], "M": [doxygen_rule13,], "N": [doxygen_rule13,], "O": [doxygen_rule13,], "P": [doxygen_rule13,], "Q": [doxygen_rule13,], "R": [doxygen_rule13,], "S": [doxygen_rule13,], "T": [doxygen_rule13,], "U": [doxygen_rule13,], "V": [doxygen_rule13,], "W": [doxygen_rule13,], "X": [doxygen_rule13,], "Y": [doxygen_rule13,], "Z": [doxygen_rule13,], "[": [doxygen_rule13,], "\\": [doxygen_rule13,], "]": [doxygen_rule13,], "a": [doxygen_rule13,], "b": [doxygen_rule13,], "c": [doxygen_rule13,], "d": [doxygen_rule13,], "e": [doxygen_rule13,], "f": [doxygen_rule13,], "g": [doxygen_rule13,], "h": [doxygen_rule13,], "i": [doxygen_rule13,], "j": [doxygen_rule13,], "k": [doxygen_rule13,], "l": [doxygen_rule13,], "m": [doxygen_rule13,], "n": [doxygen_rule13,], "o": [doxygen_rule13,], "p": [doxygen_rule13,], "q": [doxygen_rule13,], "r": [doxygen_rule13,], "s": [doxygen_rule13,], "t": [doxygen_rule13,], "u": [doxygen_rule13,], "v": [doxygen_rule13,], "w": [doxygen_rule13,], "x": [doxygen_rule13,], "y": [doxygen_rule13,], "z": [doxygen_rule13,], "~": [doxygen_rule13,], } # x.rulesDictDict for doxygen mode. rulesDictDict = { "doxygen_doxygen": rulesDict2, "doxygen_main": rulesDict1, } # Import dict for doxygen mode. importDict = {}
7,772
320
<gh_stars>100-1000 // implementation based on: // http://map.grauw.nl/resources/midi/ym2148.php #include "YM2148.hh" #include "MidiInDevice.hh" #include "MSXMotherBoard.hh" #include "serialize.hh" namespace openmsx { // status register flags constexpr unsigned STAT_TXRDY = 0x01; // Transmitter ready: no MIDI-out send is in progress constexpr unsigned STAT_RXRDY = 0x02; // Receiver ready: a MIDI-in byte is available for the MSX constexpr unsigned STAT_OE = 0x10; // Overrun error (incoming data) constexpr unsigned STAT_FE = 0x20; // Framing error (incoming data) // command register bits constexpr unsigned CMD_TXEN = 0x01; // Transmit enable constexpr unsigned CMD_TXIE = 0x02; // TxRDY interrupt enable constexpr unsigned CMD_RXEN = 0x04; // Receive enable constexpr unsigned CMD_RXIE = 0x08; // RxRDY interrupt enable constexpr unsigned CMD_ER = 0x10; // Error Reset constexpr unsigned CMD_IR = 0x80; // Internal Reset // The meaning of bits 5 and 6 are unknown (they are used by the CX5M // software). Some documentation *guesses* they are related to IM2 // IRQ handling. constexpr auto BIT_DURATION = EmuDuration::hz(31250); constexpr auto CHAR_DURATION = BIT_DURATION * 10; // 1 start-bit, 8 data-bits, 1 stop-bit YM2148::YM2148(const std::string& name_, MSXMotherBoard& motherBoard) : MidiInConnector(motherBoard.getPluggingController(), name_ + "-MIDI-in") , syncRecv (motherBoard.getScheduler()) , syncTrans(motherBoard.getScheduler()) , rxIRQ(motherBoard, name_ + "-rx-IRQ") , txIRQ(motherBoard, name_ + "-tx-IRQ") , txBuffer1(0), txBuffer2(0) // avoid UMR , outConnector(motherBoard.getPluggingController(), name_ + "-MIDI-out") { reset(); } void YM2148::reset() { syncRecv .removeSyncPoint(); syncTrans.removeSyncPoint(); rxIRQ.reset(); txIRQ.reset(); rxReady = false; rxBuffer = 0; status = 0; commandReg = 0; } // MidiInConnector sends a new character. void YM2148::recvByte(byte value, EmuTime::param time) { assert(acceptsData() && ready()); if (status & STAT_RXRDY) { // So, there is a byte that has to be read by the MSX still! // This happens when the MSX program doesn't // respond fast enough to an earlier received byte. status |= STAT_OE; // TODO investigate: overwrite rxBuffer in case of overrun? } else { rxBuffer = value; status |= STAT_RXRDY; if (commandReg & CMD_RXIE) rxIRQ.set(); } // Not ready now, but we will be in a while rxReady = false; syncRecv.setSyncPoint(time + CHAR_DURATION); } // Triggered when we're ready to receive the next character. void YM2148::execRecv(EmuTime::param time) { assert(commandReg & CMD_RXEN); assert(!rxReady); rxReady = true; getPluggedMidiInDev().signal(time); // trigger (possible) send of next char } // MidiInDevice queries whether it can send a new character 'now'. bool YM2148::ready() { return rxReady; } // MidiInDevice queries whether it can send characters at all. bool YM2148::acceptsData() { return (commandReg & CMD_RXEN) != 0; } // MidiInDevice informs us about the format of the data it will send // (MIDI is always 1 start-bit, 8 data-bits, 1 stop-bit, no parity-bits). void YM2148::setDataBits(DataBits /*bits*/) { // ignore } void YM2148::setStopBits(StopBits /*bits*/) { // ignore } void YM2148::setParityBit(bool /*enable*/, ParityBit /*parity*/) { // ignore } // MSX program reads the status register. byte YM2148::readStatus(EmuTime::param /*time*/) const { return status; } byte YM2148::peekStatus(EmuTime::param /*time*/) const { return status; } // MSX programs reads the data register. byte YM2148::readData(EmuTime::param /*time*/) { status &= ~STAT_RXRDY; rxIRQ.reset(); // no need to check CMD_RXIE return rxBuffer; } byte YM2148::peekData(EmuTime::param /*time*/) const { return rxBuffer; } // MSX program writes the command register. void YM2148::writeCommand(byte value) { if (value & CMD_IR) { reset(); return; // do not process any other commands } if (value & CMD_ER) { status &= ~(STAT_OE | STAT_FE); return; } byte diff = commandReg ^ value; commandReg = value; if (diff & CMD_RXEN) { if (commandReg & CMD_RXEN) { // disabled -> enabled rxReady = true; } else { // enabled -> disabled rxReady = false; syncRecv.removeSyncPoint(); status &= ~STAT_RXRDY; // IRQ is handled below } } if (diff & CMD_TXEN) { if (commandReg & CMD_TXEN) { // disabled -> enabled status |= STAT_TXRDY; // IRQ is handled below // TODO transmitter is ready at this point, does this immediately trigger an IRQ (when IRQs are enabled)? } else { // enabled -> disabled status &= ~STAT_TXRDY; // IRQ handled below syncTrans.removeSyncPoint(); } } // update IRQ status rxIRQ.set((value & CMD_RXIE) && (status & STAT_RXRDY)); txIRQ.set((value & CMD_TXIE) && (status & STAT_TXRDY)); } // MSX program writes the data register. void YM2148::writeData(byte value, EmuTime::param time) { if (!(commandReg & CMD_TXEN)) return; if (syncTrans.pendingSyncPoint()) { // We're still sending the previous character, only buffer // this one. Don't accept any further characters. txBuffer2 = value; status &= ~STAT_TXRDY; txIRQ.reset(); } else { // Immediately start sending this character. We're still // ready to accept a next character. send(value, time); } } // Start sending a character. It takes a while before it's finished sending. void YM2148::send(byte value, EmuTime::param time) { txBuffer1 = value; syncTrans.setSyncPoint(time + CHAR_DURATION); } // Triggered when a character has finished sending. void YM2148::execTrans(EmuTime::param time) { assert(commandReg & CMD_TXEN); outConnector.recvByte(txBuffer1, time); if (status & STAT_TXRDY) { // No next character to send. } else { // There already is a next character, start sending that now // and accept a next one. status |= STAT_TXRDY; if (commandReg & CMD_TXIE) txIRQ.set(); send(txBuffer2, time); } } // Any pending IRQs? bool YM2148::pendingIRQ() const { return rxIRQ.getState() || txIRQ.getState(); } template<typename Archive> void YM2148::serialize(Archive& ar, unsigned version) { if (ar.versionAtLeast(version, 2)) { ar.template serializeBase<MidiInConnector>(*this); ar.serialize("outConnector", outConnector, "syncRecv", syncRecv, "syncTrans", syncTrans, "rxIRQ", rxIRQ, "txIRQ", txIRQ, "rxReady", rxReady, "rxBuffer", rxBuffer, "txBuffer1", txBuffer1, "txBuffer2", txBuffer2, "status", status, "commandReg", commandReg); } } INSTANTIATE_SERIALIZE_METHODS(YM2148); } // namespace openmsx
2,622
391
package su.levenetc.android.interactivecanvas; /** * Created by <NAME>. */ public interface ITouchEventReceiver { void handleTouchEvent(int action, int x, int y); }
54
315
#include "Module/Probe/Occurrence/Probe_occurrence.hpp" using namespace aff3ct; using namespace aff3ct::module; template <typename T> Probe_occurrence<T> ::Probe_occurrence(const int size, const std::string &col_name, tools::Reporter_probe& reporter, const int n_frames) : Probe<T>(size, col_name, reporter, n_frames), occurrences(0) { const std::string name = "Probe_occurrence<" + col_name + ">"; this->set_name(name); this->set_single_wave(true); } template <typename T> void Probe_occurrence<T> ::_probe(const T *in, const size_t frame_id) { for (size_t f = 0; f < this->get_n_frames(); f++) { this->reporter.probe(this->col_name, (void*)&occurrences, frame_id); this->occurrences++; } } template <typename T> std::type_index Probe_occurrence<T> ::get_datatype() const { return typeid(int64_t); } template <typename T> void Probe_occurrence<T> ::reset() { this->occurrences = 0; } template <typename T> int64_t Probe_occurrence<T> ::get_occurrences() const { return this->occurrences; } // ==================================================================================== explicit template instantiation #include "Tools/types.h" template class aff3ct::module::Probe_occurrence<R_32>; template class aff3ct::module::Probe_occurrence<R_64>; template class aff3ct::module::Probe_occurrence<B_8 >; template class aff3ct::module::Probe_occurrence<B_16>; template class aff3ct::module::Probe_occurrence<B_32>; template class aff3ct::module::Probe_occurrence<B_64>; // ==================================================================================== explicit template instantiation
549
1,799
package io.cucumber.java.annotation; import io.cucumber.java.fr.Étantdonné; import java.math.BigDecimal; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; public class FrenchSteps { @Étantdonné("j'ai {bigdecimal} concombres fractionnaires") public void jAiConcombresFractionnaires(BigDecimal arg0) { assertThat(arg0, is(new BigDecimal("5.5"))); } @Étantdonné("j'ai {int} concombres") public void jAiConcombres(int arg0) { assertThat(arg0, is(5)); } }
231
1,056
<reponame>timfel/netbeans /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.netbeans.modules.debugger.jpda.ui.actions; import java.beans.PropertyChangeEvent; import java.beans.PropertyChangeListener; import javax.swing.SwingUtilities; import org.netbeans.api.debugger.jpda.JPDADebugger; import org.netbeans.spi.debugger.ActionsProviderSupport; import org.openide.util.RequestProcessor; /** * Representation of a debugging session. * * @author <NAME> * @author <NAME> */ abstract class JPDADebuggerAction extends ActionsProviderSupport implements PropertyChangeListener { private JPDADebugger debugger; JPDADebuggerAction (JPDADebugger debugger) { this.debugger = debugger; debugger.addPropertyChangeListener (debugger.PROP_STATE, this); } @Override public void propertyChange (PropertyChangeEvent evt) { try { checkEnabled (debugger.getState ()); } catch (com.sun.jdi.VMDisconnectedException e) { // Causes kill action when something is being evaluated } } protected abstract void checkEnabled (int debuggerState); private boolean canApplyLazyEnabled = false; /** * Call this from {@link #checkEnabled(int)} when the code needs to be run outside of AWT. * Override {@link #checkEnabledLazyImpl(int)} method, which will be called * in the provided RequestProcessor. The returned enabled status is set through * {@link #setEnabledSingleAction(boolean)}. * <p> * Do not call {@link #setEnabled(java.lang.Object, boolean)} method! When * you also need to set the status directly, use {@link #setEnabledSingleAction(boolean)}, * which correctly cooperates with the lazy code. * * @param debuggerState * @param rp */ protected final void checkEnabledLazySingleAction(final int debuggerState, RequestProcessor rp) { canApplyLazyEnabled = true; rp.post(new Runnable() { @Override public void run() { final boolean enabled = checkEnabledLazyImpl(debuggerState); SwingUtilities.invokeLater(new Runnable() { @Override public void run() { if (canApplyLazyEnabled) { setEnabledSingleAction(enabled); canApplyLazyEnabled = true; // In case there were several lazy invocations } } }); } }); } /** Do not call setEnabled(), return the enabled state instead. */ protected boolean checkEnabledLazyImpl (int debuggerState) { return false; } protected final void setEnabledSingleAction(boolean enabled) { canApplyLazyEnabled = false; setEnabled(getActions().iterator().next(), enabled); } JPDADebugger getDebuggerImpl () { return debugger; } }
1,357
24,206
package com.alibaba.excel.write.style.row; import com.alibaba.excel.write.handler.RowWriteHandler; import com.alibaba.excel.write.handler.context.RowWriteHandlerContext; import org.apache.poi.ss.usermodel.Row; /** * Set the row height strategy * * @author <NAME> */ public abstract class AbstractRowHeightStyleStrategy implements RowWriteHandler { @Override public void afterRowDispose(RowWriteHandlerContext context) { if (context.getHead() == null) { return; } if (context.getHead()) { setHeadColumnHeight(context.getRow(), context.getRelativeRowIndex()); } else { setContentColumnHeight(context.getRow(), context.getRelativeRowIndex()); } } /** * Sets the height of header * * @param row * @param relativeRowIndex */ protected abstract void setHeadColumnHeight(Row row, int relativeRowIndex); /** * Sets the height of content * * @param row * @param relativeRowIndex */ protected abstract void setContentColumnHeight(Row row, int relativeRowIndex); }
415
29,258
<reponame>weihubeats/arthas package com.taobao.arthas.core.security; import java.security.Principal; import javax.security.auth.Subject; import javax.security.auth.login.LoginException; import org.assertj.core.api.Assertions; import org.junit.Test; /** * * @author hengyunabc 2021-03-04 * */ public class SecurityAuthenticatorImplTest { @Test public void test1() throws LoginException { String username = "test"; String password = "<PASSWORD>"; SecurityAuthenticatorImpl auth = new SecurityAuthenticatorImpl(username, password); Assertions.assertThat(auth.needLogin()).isTrue(); Principal principal = new BasicPrincipal(username, password); Subject subject = auth.login(principal); Assertions.assertThat(subject).isNotNull(); } @Test public void test2() { String username = "test"; String password = null; SecurityAuthenticatorImpl auth = new SecurityAuthenticatorImpl(username, password); Assertions.assertThat(auth.needLogin()).isTrue(); } }
386
348
<filename>docs/data/leg-t1/085/08504064.json {"nom":"Chauché","circ":"4ème circonscription","dpt":"Vendée","inscrits":1815,"abs":872,"votants":943,"blancs":7,"nuls":1,"exp":935,"res":[{"nuance":"UDI","nom":"<NAME>","voix":517},{"nuance":"REM","nom":"Mme <NAME>","voix":233},{"nuance":"FN","nom":"Mme <NAME>","voix":86},{"nuance":"FI","nom":"Mme <NAME>","voix":38},{"nuance":"ECO","nom":"Mme <NAME>","voix":35},{"nuance":"COM","nom":"Mme <NAME>","voix":12},{"nuance":"EXG","nom":"Mme <NAME>","voix":6},{"nuance":"DVD","nom":"Mme <NAME>","voix":5},{"nuance":"DIV","nom":"Mme <NAME>","voix":3}]}
247
1,056
<gh_stars>1000+ /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.netbeans.api.visual.action; import org.netbeans.api.visual.widget.Widget; import java.awt.*; /** * This interface provides a resizing strategy. * * @author <NAME> */ public interface ResizeStrategy { /** * Called after an user suggests a new boundary and before the suggested boundary is stored to a specified widget. * This allows to manipulate with a suggested boundary to perform snap-to-grid, locked-axis on any other resizing strategy. * @param widget the resized widget * @param originalBounds the original bounds of the resizing widget * @param suggestedBounds the bounds of the resizing widget suggested by an user (usually by a mouse cursor position) * @param controlPoint the control point that is used by an user for resizing * @return the new (optionally modified) boundary processed by the strategy */ public Rectangle boundsSuggested (Widget widget, Rectangle originalBounds, Rectangle suggestedBounds, ResizeProvider.ControlPoint controlPoint); }
485
711
/* * Copyright 2015 JBoss Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.apiman.gateway.engine.policies; import io.apiman.gateway.engine.beans.PolicyFailure; import io.apiman.gateway.engine.beans.PolicyFailureType; import io.apiman.gateway.engine.beans.ApiRequest; import io.apiman.gateway.engine.components.IPolicyFailureFactoryComponent; import io.apiman.gateway.engine.policies.config.RateLimitingConfig; import io.apiman.gateway.engine.policies.i18n.Messages; /** * Similar to the rate limiting policy, but less granular. Useful primarily * so that both a quota and a rate limit can be active at the same time. * * @author <EMAIL> */ public class QuotaPolicy extends RateLimitingPolicy { private static final String DEFAULT_LIMIT_HEADER = "X-Quota-Limit"; //$NON-NLS-1$ private static final String DEFAULT_REMAINING_HEADER = "X-Quota-Remaining"; //$NON-NLS-1$ private static final String DEFAULT_RESET_HEADER = "X-Quota-Reset"; //$NON-NLS-1$ /** * Constructor. */ public QuotaPolicy() { } /** * @see io.apiman.gateway.engine.policies.RateLimitingPolicy#limitExceededFailure(io.apiman.gateway.engine.components.IPolicyFailureFactoryComponent) */ @Override protected PolicyFailure limitExceededFailure(IPolicyFailureFactoryComponent failureFactory) { PolicyFailure failure = failureFactory.createFailure(PolicyFailureType.Other, PolicyFailureCodes.REQUEST_QUOTA_EXCEEDED, Messages.i18n.format("QuotaPolicy.QuotaExceeded")); //$NON-NLS-1$ failure.setResponseCode(429); return failure; } /** * @see io.apiman.gateway.engine.policies.RateLimitingPolicy#defaultLimitHeader() */ @Override protected String defaultLimitHeader() { return DEFAULT_LIMIT_HEADER; } /** * @see io.apiman.gateway.engine.policies.RateLimitingPolicy#defaultRemainingHeader() */ @Override protected String defaultRemainingHeader() { return DEFAULT_REMAINING_HEADER; } /** * @see io.apiman.gateway.engine.policies.RateLimitingPolicy#defaultResetHeader() */ @Override protected String defaultResetHeader() { return DEFAULT_RESET_HEADER; } /** * @see io.apiman.gateway.engine.policies.RateLimitingPolicy#createBucketId(io.apiman.gateway.engine.beans.ApiRequest, io.apiman.gateway.engine.policies.config.RateLimitingConfig) */ @Override protected String createBucketId(ApiRequest request, RateLimitingConfig config) { return "QUOTA||" + super.createBucketId(request, config); //$NON-NLS-1$ } }
1,097
965
void CMyDlg::SetSpinRange() { //set the min and max range of the up/down or spin control SendDlgItemMessage(IDC_SPIN1, UDM_SETRANGE, 0, (LPARAM)MAKELONG(8, 1)); }
74
416
<gh_stars>100-1000 /* * Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.tencentcloudapi.iai.v20180301.models; import com.tencentcloudapi.common.AbstractModel; import com.google.gson.annotations.SerializedName; import com.google.gson.annotations.Expose; import java.util.HashMap; public class CompareFaceRequest extends AbstractModel{ /** * A 图片 base64 数据,base64 编码后大小不可超过5M。 若图片中包含多张人脸,只选取其中人脸面积最大的人脸。 支持PNG、JPG、JPEG、BMP,不支持 GIF 图片。 */ @SerializedName("ImageA") @Expose private String ImageA; /** * B 图片 base64 数据,base64 编码后大小不可超过5M。 若图片中包含多张人脸,只选取其中人脸面积最大的人脸。 支持PNG、JPG、JPEG、BMP,不支持 GIF 图片。 */ @SerializedName("ImageB") @Expose private String ImageB; /** * A 图片的 Url ,对应图片 base64 编码后大小不可超过5M。 A 图片的 Url、Image必须提供一个,如果都提供,只使用 Url。 图片存储于腾讯云的Url可保障更高下载速度和稳定性,建议图片存储于腾讯云。 非腾讯云存储的Url速度和稳定性可能受一定影响。 若图片中包含多张人脸,只选取其中人脸面积最大的人脸。 支持PNG、JPG、JPEG、BMP,不支持 GIF 图片。 */ @SerializedName("UrlA") @Expose private String UrlA; /** * B 图片的 Url ,对应图片 base64 编码后大小不可超过5M。 B 图片的 Url、Image必须提供一个,如果都提供,只使用 Url。 图片存储于腾讯云的Url可保障更高下载速度和稳定性,建议图片存储于腾讯云。 非腾讯云存储的Url速度和稳定性可能受一定影响。 若图片中包含多张人脸,只选取其中人脸面积最大的人脸。 支持PNG、JPG、JPEG、BMP,不支持 GIF 图片。 */ @SerializedName("UrlB") @Expose private String UrlB; /** * 人脸识别服务所用的算法模型版本。 目前入参支持 “2.0”和“3.0“ 两个输入。 2020年4月2日开始,默认为“3.0”,之前使用过本接口的账号若未填写本参数默认为“2.0”。 2020年11月26日后开通服务的账号仅支持输入“3.0”。 不同算法模型版本对应的人脸识别算法不同,新版本的整体效果会优于旧版本,建议使用“3.0”版本。 */ @SerializedName("FaceModelVersion") @Expose private String FaceModelVersion; /** * 图片质量控制。 0: 不进行控制; 1:较低的质量要求,图像存在非常模糊,眼睛鼻子嘴巴遮挡至少其中一种或多种的情况; 2: 一般的质量要求,图像存在偏亮,偏暗,模糊或一般模糊,眉毛遮挡,脸颊遮挡,下巴遮挡,至少其中三种的情况; 3: 较高的质量要求,图像存在偏亮,偏暗,一般模糊,眉毛遮挡,脸颊遮挡,下巴遮挡,其中一到两种的情况; 4: 很高的质量要求,各个维度均为最好或最多在某一维度上存在轻微问题; 默认 0。 若图片质量不满足要求,则返回结果中会提示图片质量检测不符要求。 */ @SerializedName("QualityControl") @Expose private Long QualityControl; /** * 是否开启图片旋转识别支持。0为不开启,1为开启。默认为0。本参数的作用为,当图片中的人脸被旋转且图片没有exif信息时,如果不开启图片旋转识别支持则无法正确检测、识别图片中的人脸。若您确认图片包含exif信息或者您确认输入图中人脸不会出现被旋转情况,请不要开启本参数。开启后,整体耗时将可能增加数百毫秒。 */ @SerializedName("NeedRotateDetection") @Expose private Long NeedRotateDetection; /** * Get A 图片 base64 数据,base64 编码后大小不可超过5M。 若图片中包含多张人脸,只选取其中人脸面积最大的人脸。 支持PNG、JPG、JPEG、BMP,不支持 GIF 图片。 * @return ImageA A 图片 base64 数据,base64 编码后大小不可超过5M。 若图片中包含多张人脸,只选取其中人脸面积最大的人脸。 支持PNG、JPG、JPEG、BMP,不支持 GIF 图片。 */ public String getImageA() { return this.ImageA; } /** * Set A 图片 base64 数据,base64 编码后大小不可超过5M。 若图片中包含多张人脸,只选取其中人脸面积最大的人脸。 支持PNG、JPG、JPEG、BMP,不支持 GIF 图片。 * @param ImageA A 图片 base64 数据,base64 编码后大小不可超过5M。 若图片中包含多张人脸,只选取其中人脸面积最大的人脸。 支持PNG、JPG、JPEG、BMP,不支持 GIF 图片。 */ public void setImageA(String ImageA) { this.ImageA = ImageA; } /** * Get B 图片 base64 数据,base64 编码后大小不可超过5M。 若图片中包含多张人脸,只选取其中人脸面积最大的人脸。 支持PNG、JPG、JPEG、BMP,不支持 GIF 图片。 * @return ImageB B 图片 base64 数据,base64 编码后大小不可超过5M。 若图片中包含多张人脸,只选取其中人脸面积最大的人脸。 支持PNG、JPG、JPEG、BMP,不支持 GIF 图片。 */ public String getImageB() { return this.ImageB; } /** * Set B 图片 base64 数据,base64 编码后大小不可超过5M。 若图片中包含多张人脸,只选取其中人脸面积最大的人脸。 支持PNG、JPG、JPEG、BMP,不支持 GIF 图片。 * @param ImageB B 图片 base64 数据,base64 编码后大小不可超过5M。 若图片中包含多张人脸,只选取其中人脸面积最大的人脸。 支持PNG、JPG、JPEG、BMP,不支持 GIF 图片。 */ public void setImageB(String ImageB) { this.ImageB = ImageB; } /** * Get A 图片的 Url ,对应图片 base64 编码后大小不可超过5M。 A 图片的 Url、Image必须提供一个,如果都提供,只使用 Url。 图片存储于腾讯云的Url可保障更高下载速度和稳定性,建议图片存储于腾讯云。 非腾讯云存储的Url速度和稳定性可能受一定影响。 若图片中包含多张人脸,只选取其中人脸面积最大的人脸。 支持PNG、JPG、JPEG、BMP,不支持 GIF 图片。 * @return UrlA A 图片的 Url ,对应图片 base64 编码后大小不可超过5M。 A 图片的 Url、Image必须提供一个,如果都提供,只使用 Url。 图片存储于腾讯云的Url可保障更高下载速度和稳定性,建议图片存储于腾讯云。 非腾讯云存储的Url速度和稳定性可能受一定影响。 若图片中包含多张人脸,只选取其中人脸面积最大的人脸。 支持PNG、JPG、JPEG、BMP,不支持 GIF 图片。 */ public String getUrlA() { return this.UrlA; } /** * Set A 图片的 Url ,对应图片 base64 编码后大小不可超过5M。 A 图片的 Url、Image必须提供一个,如果都提供,只使用 Url。 图片存储于腾讯云的Url可保障更高下载速度和稳定性,建议图片存储于腾讯云。 非腾讯云存储的Url速度和稳定性可能受一定影响。 若图片中包含多张人脸,只选取其中人脸面积最大的人脸。 支持PNG、JPG、JPEG、BMP,不支持 GIF 图片。 * @param UrlA A 图片的 Url ,对应图片 base64 编码后大小不可超过5M。 A 图片的 Url、Image必须提供一个,如果都提供,只使用 Url。 图片存储于腾讯云的Url可保障更高下载速度和稳定性,建议图片存储于腾讯云。 非腾讯云存储的Url速度和稳定性可能受一定影响。 若图片中包含多张人脸,只选取其中人脸面积最大的人脸。 支持PNG、JPG、JPEG、BMP,不支持 GIF 图片。 */ public void setUrlA(String UrlA) { this.UrlA = UrlA; } /** * Get B 图片的 Url ,对应图片 base64 编码后大小不可超过5M。 B 图片的 Url、Image必须提供一个,如果都提供,只使用 Url。 图片存储于腾讯云的Url可保障更高下载速度和稳定性,建议图片存储于腾讯云。 非腾讯云存储的Url速度和稳定性可能受一定影响。 若图片中包含多张人脸,只选取其中人脸面积最大的人脸。 支持PNG、JPG、JPEG、BMP,不支持 GIF 图片。 * @return UrlB B 图片的 Url ,对应图片 base64 编码后大小不可超过5M。 B 图片的 Url、Image必须提供一个,如果都提供,只使用 Url。 图片存储于腾讯云的Url可保障更高下载速度和稳定性,建议图片存储于腾讯云。 非腾讯云存储的Url速度和稳定性可能受一定影响。 若图片中包含多张人脸,只选取其中人脸面积最大的人脸。 支持PNG、JPG、JPEG、BMP,不支持 GIF 图片。 */ public String getUrlB() { return this.UrlB; } /** * Set B 图片的 Url ,对应图片 base64 编码后大小不可超过5M。 B 图片的 Url、Image必须提供一个,如果都提供,只使用 Url。 图片存储于腾讯云的Url可保障更高下载速度和稳定性,建议图片存储于腾讯云。 非腾讯云存储的Url速度和稳定性可能受一定影响。 若图片中包含多张人脸,只选取其中人脸面积最大的人脸。 支持PNG、JPG、JPEG、BMP,不支持 GIF 图片。 * @param UrlB B 图片的 Url ,对应图片 base64 编码后大小不可超过5M。 B 图片的 Url、Image必须提供一个,如果都提供,只使用 Url。 图片存储于腾讯云的Url可保障更高下载速度和稳定性,建议图片存储于腾讯云。 非腾讯云存储的Url速度和稳定性可能受一定影响。 若图片中包含多张人脸,只选取其中人脸面积最大的人脸。 支持PNG、JPG、JPEG、BMP,不支持 GIF 图片。 */ public void setUrlB(String UrlB) { this.UrlB = UrlB; } /** * Get 人脸识别服务所用的算法模型版本。 目前入参支持 “2.0”和“3.0“ 两个输入。 2020年4月2日开始,默认为“3.0”,之前使用过本接口的账号若未填写本参数默认为“2.0”。 2020年11月26日后开通服务的账号仅支持输入“3.0”。 不同算法模型版本对应的人脸识别算法不同,新版本的整体效果会优于旧版本,建议使用“3.0”版本。 * @return FaceModelVersion 人脸识别服务所用的算法模型版本。 目前入参支持 “2.0”和“3.0“ 两个输入。 2020年4月2日开始,默认为“3.0”,之前使用过本接口的账号若未填写本参数默认为“2.0”。 2020年11月26日后开通服务的账号仅支持输入“3.0”。 不同算法模型版本对应的人脸识别算法不同,新版本的整体效果会优于旧版本,建议使用“3.0”版本。 */ public String getFaceModelVersion() { return this.FaceModelVersion; } /** * Set 人脸识别服务所用的算法模型版本。 目前入参支持 “2.0”和“3.0“ 两个输入。 2020年4月2日开始,默认为“3.0”,之前使用过本接口的账号若未填写本参数默认为“2.0”。 2020年11月26日后开通服务的账号仅支持输入“3.0”。 不同算法模型版本对应的人脸识别算法不同,新版本的整体效果会优于旧版本,建议使用“3.0”版本。 * @param FaceModelVersion 人脸识别服务所用的算法模型版本。 目前入参支持 “2.0”和“3.0“ 两个输入。 2020年4月2日开始,默认为“3.0”,之前使用过本接口的账号若未填写本参数默认为“2.0”。 2020年11月26日后开通服务的账号仅支持输入“3.0”。 不同算法模型版本对应的人脸识别算法不同,新版本的整体效果会优于旧版本,建议使用“3.0”版本。 */ public void setFaceModelVersion(String FaceModelVersion) { this.FaceModelVersion = FaceModelVersion; } /** * Get 图片质量控制。 0: 不进行控制; 1:较低的质量要求,图像存在非常模糊,眼睛鼻子嘴巴遮挡至少其中一种或多种的情况; 2: 一般的质量要求,图像存在偏亮,偏暗,模糊或一般模糊,眉毛遮挡,脸颊遮挡,下巴遮挡,至少其中三种的情况; 3: 较高的质量要求,图像存在偏亮,偏暗,一般模糊,眉毛遮挡,脸颊遮挡,下巴遮挡,其中一到两种的情况; 4: 很高的质量要求,各个维度均为最好或最多在某一维度上存在轻微问题; 默认 0。 若图片质量不满足要求,则返回结果中会提示图片质量检测不符要求。 * @return QualityControl 图片质量控制。 0: 不进行控制; 1:较低的质量要求,图像存在非常模糊,眼睛鼻子嘴巴遮挡至少其中一种或多种的情况; 2: 一般的质量要求,图像存在偏亮,偏暗,模糊或一般模糊,眉毛遮挡,脸颊遮挡,下巴遮挡,至少其中三种的情况; 3: 较高的质量要求,图像存在偏亮,偏暗,一般模糊,眉毛遮挡,脸颊遮挡,下巴遮挡,其中一到两种的情况; 4: 很高的质量要求,各个维度均为最好或最多在某一维度上存在轻微问题; 默认 0。 若图片质量不满足要求,则返回结果中会提示图片质量检测不符要求。 */ public Long getQualityControl() { return this.QualityControl; } /** * Set 图片质量控制。 0: 不进行控制; 1:较低的质量要求,图像存在非常模糊,眼睛鼻子嘴巴遮挡至少其中一种或多种的情况; 2: 一般的质量要求,图像存在偏亮,偏暗,模糊或一般模糊,眉毛遮挡,脸颊遮挡,下巴遮挡,至少其中三种的情况; 3: 较高的质量要求,图像存在偏亮,偏暗,一般模糊,眉毛遮挡,脸颊遮挡,下巴遮挡,其中一到两种的情况; 4: 很高的质量要求,各个维度均为最好或最多在某一维度上存在轻微问题; 默认 0。 若图片质量不满足要求,则返回结果中会提示图片质量检测不符要求。 * @param QualityControl 图片质量控制。 0: 不进行控制; 1:较低的质量要求,图像存在非常模糊,眼睛鼻子嘴巴遮挡至少其中一种或多种的情况; 2: 一般的质量要求,图像存在偏亮,偏暗,模糊或一般模糊,眉毛遮挡,脸颊遮挡,下巴遮挡,至少其中三种的情况; 3: 较高的质量要求,图像存在偏亮,偏暗,一般模糊,眉毛遮挡,脸颊遮挡,下巴遮挡,其中一到两种的情况; 4: 很高的质量要求,各个维度均为最好或最多在某一维度上存在轻微问题; 默认 0。 若图片质量不满足要求,则返回结果中会提示图片质量检测不符要求。 */ public void setQualityControl(Long QualityControl) { this.QualityControl = QualityControl; } /** * Get 是否开启图片旋转识别支持。0为不开启,1为开启。默认为0。本参数的作用为,当图片中的人脸被旋转且图片没有exif信息时,如果不开启图片旋转识别支持则无法正确检测、识别图片中的人脸。若您确认图片包含exif信息或者您确认输入图中人脸不会出现被旋转情况,请不要开启本参数。开启后,整体耗时将可能增加数百毫秒。 * @return NeedRotateDetection 是否开启图片旋转识别支持。0为不开启,1为开启。默认为0。本参数的作用为,当图片中的人脸被旋转且图片没有exif信息时,如果不开启图片旋转识别支持则无法正确检测、识别图片中的人脸。若您确认图片包含exif信息或者您确认输入图中人脸不会出现被旋转情况,请不要开启本参数。开启后,整体耗时将可能增加数百毫秒。 */ public Long getNeedRotateDetection() { return this.NeedRotateDetection; } /** * Set 是否开启图片旋转识别支持。0为不开启,1为开启。默认为0。本参数的作用为,当图片中的人脸被旋转且图片没有exif信息时,如果不开启图片旋转识别支持则无法正确检测、识别图片中的人脸。若您确认图片包含exif信息或者您确认输入图中人脸不会出现被旋转情况,请不要开启本参数。开启后,整体耗时将可能增加数百毫秒。 * @param NeedRotateDetection 是否开启图片旋转识别支持。0为不开启,1为开启。默认为0。本参数的作用为,当图片中的人脸被旋转且图片没有exif信息时,如果不开启图片旋转识别支持则无法正确检测、识别图片中的人脸。若您确认图片包含exif信息或者您确认输入图中人脸不会出现被旋转情况,请不要开启本参数。开启后,整体耗时将可能增加数百毫秒。 */ public void setNeedRotateDetection(Long NeedRotateDetection) { this.NeedRotateDetection = NeedRotateDetection; } public CompareFaceRequest() { } /** * NOTE: Any ambiguous key set via .set("AnyKey", "value") will be a shallow copy, * and any explicit key, i.e Foo, set via .setFoo("value") will be a deep copy. */ public CompareFaceRequest(CompareFaceRequest source) { if (source.ImageA != null) { this.ImageA = new String(source.ImageA); } if (source.ImageB != null) { this.ImageB = new String(source.ImageB); } if (source.UrlA != null) { this.UrlA = new String(source.UrlA); } if (source.UrlB != null) { this.UrlB = new String(source.UrlB); } if (source.FaceModelVersion != null) { this.FaceModelVersion = new String(source.FaceModelVersion); } if (source.QualityControl != null) { this.QualityControl = new Long(source.QualityControl); } if (source.NeedRotateDetection != null) { this.NeedRotateDetection = new Long(source.NeedRotateDetection); } } /** * Internal implementation, normal users should not use it. */ public void toMap(HashMap<String, String> map, String prefix) { this.setParamSimple(map, prefix + "ImageA", this.ImageA); this.setParamSimple(map, prefix + "ImageB", this.ImageB); this.setParamSimple(map, prefix + "UrlA", this.UrlA); this.setParamSimple(map, prefix + "UrlB", this.UrlB); this.setParamSimple(map, prefix + "FaceModelVersion", this.FaceModelVersion); this.setParamSimple(map, prefix + "QualityControl", this.QualityControl); this.setParamSimple(map, prefix + "NeedRotateDetection", this.NeedRotateDetection); } }
12,316
462
from datetime import datetime, timedelta from flask import session from sqlalchemy import func, desc from cloud_inquisitor.constants import ROLE_ADMIN from cloud_inquisitor.database import db from cloud_inquisitor.log import auditlog from cloud_inquisitor.plugins import BaseView from cloud_inquisitor.schema import LogEvent from cloud_inquisitor.utils import MenuItem from cloud_inquisitor.wrappers import check_auth, rollback class Logs(BaseView): URLS = ['/api/v1/logs'] MENU_ITEMS = [ MenuItem( 'admin', 'Logs', 'log.list', 'log', args={ 'page': 1, 'count': 100, 'levelno': None }, order=90 ) ] @rollback @check_auth(ROLE_ADMIN) def get(self): self.reqparse.add_argument('count', type=int, default=100) self.reqparse.add_argument('page', type=int, default=0) self.reqparse.add_argument('levelno', type=int, default=0) args = self.reqparse.parse_args() if args['levelno'] > 0: total_events = db.query( func.count(LogEvent.log_event_id) ).filter(LogEvent.levelno >= args['levelno']).first()[0] qry = ( db.LogEvent .filter(LogEvent.levelno >= args['levelno']) .order_by(desc(LogEvent.timestamp)) .limit(args['count']) ) else: total_events = db.query(func.count(LogEvent.log_event_id)).first()[0] qry = ( db.LogEvent .order_by(desc(LogEvent.timestamp)) .limit(args['count']) ) if (args['page'] - 1) > 0: offset = (args['page'] - 1) * args['count'] qry = qry.offset(offset) events = qry.all() return self.make_response({ 'logEventCount': total_events, 'logEvents': events }) @rollback @check_auth(ROLE_ADMIN) def delete(self): self.reqparse.add_argument('maxAge', type=int, default=31) args = self.reqparse.parse_args() db.LogEvent.filter( func.datesub( LogEvent.timestamp < datetime.now() - timedelta(days=args['maxAge']) ) ).delete() db.session.commit() auditlog(event='logs.prune', actor=session['user'].username, data=args) return self.make_response('Pruned logs older than {} days'.format(args['maxAge'])) class LogDetails(BaseView): URLS = ['/api/v1/logs/<int:logEventId>'] @rollback @check_auth(ROLE_ADMIN) def get(self, logEventId): evt = db.LogEvent.find_one(LogEvent.log_event_id == logEventId) return self.make_response({'logEvent': evt})
1,383
7,482
/* * Copyright (c) 2011-2012, Freescale Semiconductor, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * o Redistributions of source code must retain the above copyright notice, this list * of conditions and the following disclaimer. * * o Redistributions in binary form must reproduce the above copyright notice, this * list of conditions and the following disclaimer in the documentation and/or * other materials provided with the distribution. * * o Neither the name of Freescale Semiconductor, Inc. nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _IMX_SPI_NOR_NUMONYX_H_ #define _IMX_SPI_NOR_NUMONYX_H_ #include "sdk.h" #define SPI_NOR_XFER_SZ 512 #define WREN 0x06 #define WRDI 0x04 #define RDID 0x9F #define RDSR 0x05 #define WRSR 0x01 #define READ 0x03 #define FAST_READ 0x0B #define PP 0x02 #define SE 0xD8 #define BE 0xC7 #define DP 0xB9 #define RES 0xAB #define RDSR_BUSY (1 << 0) #define RDSR_WEL (1 << 1) #define RDSR_BP0 (1 << 2) #define RDSR_BP1 (1 << 3) #define RDSR_BP2 (1 << 4) #define RDSR_SRWD (1 << 7) #define NUMONYX_CMD_SZ 4 #define NUMONYX_MAX_RX 36 #define NUMONYX_MAX_TX 36 #define SZ_64K 0x10000 #define SZ_32K 0x8000 #define SZ_4K 0x1000 #define SZ_SECTOR SZ_64K #define SZ_CHIP (SZ_SECTOR * 64) #define SZ_PAGE 256 #define SZ_MASK 0xFFFFFF00 #define SZ_OFFSET 0x000000FF #define TRANS_FAIL -1 extern int spi_nor_query_numonyx(uint8_t *); extern int spi_nor_erase_numonyx(uint32_t, uint32_t); extern int spi_nor_read_numonyx(uint32_t, uint8_t *, int); extern int spi_nor_write_numonyx(uint32_t, uint8_t *, int); #endif
1,078
852
//------------------------------------------------- // // Class: L1MuGMTLFPhiProEtaConvLUT // // // // Author : // <NAME> HEPHY Vienna // // Migrated to CMSSW: // <NAME> // //-------------------------------------------------- //----------------------- // This Class's Header -- //----------------------- #include "L1Trigger/GlobalMuonTrigger/src/L1MuGMTLFPhiProEtaConvLUT.h" //--------------- // C++ Headers -- //--------------- //#include <iostream> //------------------------------- // Collaborating Class Headers -- //------------------------------- #include "L1Trigger/GlobalMuonTrigger/src/L1MuGMTConfig.h" #include "CondFormats/L1TObjects/interface/L1MuGMTScales.h" #include "CondFormats/L1TObjects/interface/L1MuTriggerScales.h" #include "CondFormats/L1TObjects/interface/L1MuPacking.h" #include "FWCore/MessageLogger/interface/MessageLogger.h" //------------------- // InitParameters -- //------------------- void L1MuGMTLFPhiProEtaConvLUT::InitParameters() {} //------------------------ // The Lookup Function -- //------------------------ // // The LUT converts eta from 6 to 4 bits in order to use it as an Input to the Phi Projection // LUT in the Logic FPGA. It uses the same Scales as in the MIP/ISO AU Chip. // unsigned L1MuGMTLFPhiProEtaConvLUT::TheLookupFunction(int idx, unsigned eta_in) const { // idx is DT, BRPC, CSC, FRPC // INPUTS: eta_in(6) // OUTPUTS: eta_out(4) const L1MuGMTScales* theGMTScales = L1MuGMTConfig::getGMTScales(); const L1MuTriggerScales* theTriggerScales = L1MuGMTConfig::getTriggerScales(); int isRPC = idx % 2; int isFWD = idx / 2; float etaValue = theTriggerScales->getRegionalEtaScale(idx)->getCenter(eta_in); unsigned eta4bit = 0; if ((isRPC && isFWD && fabs(etaValue) < theGMTScales->getReducedEtaScale(3)->getScaleMin()) || (isRPC && !isFWD && fabs(etaValue) > theGMTScales->getReducedEtaScale(1)->getScaleMax())) { if (!m_saveFlag) edm::LogWarning("LUTRangeViolation") << "L1MuGMTMIAUEtaConvLUT::TheLookupFunction: RPC " << (isFWD ? "fwd" : "brl") << " eta value out of range: " << etaValue; } else eta4bit = theGMTScales->getReducedEtaScale(idx)->getPacked(etaValue); return eta4bit; }
854
1,322
"""Standard plugin package."""
7
3,937
// Copyright (c) 2003-present, Jodd Team (http://jodd.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. package jodd.madvoc.injector; import jodd.bean.BeanUtil; import jodd.madvoc.WebApp; import jodd.madvoc.config.Targets; import jodd.madvoc.scope.ParamsScope; import jodd.petite.PetiteContainer; import jodd.util.StringUtil; import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertEquals; class MadvocParamsInjectorTest { @Test void testInjection() { WebApp webapp = new WebApp(); webapp.start(); PetiteContainer madpc = webapp.madvocContainer().getPetiteContainer(); String baseName = StringUtil.uncapitalize(FooBean.class.getSimpleName()); madpc.defineParameter("foo", "1"); madpc.defineParameter(baseName + ".integer", "173"); madpc.defineParameter(baseName + ".string", "jodd"); madpc.defineParameter(baseName, "huh"); ParamsScope paramsScope = new ParamsScope(); BeanUtil.declared.setProperty(paramsScope, "madpc", madpc); FooBean fooBean = new FooBean(); paramsScope.inject(new Targets(fooBean, null)); assertEquals(173, fooBean.getInteger().intValue()); assertEquals("jodd", fooBean.getString()); } }
784
32,544
<reponame>zeesh49/tutorials package com.baeldung.persistence.manytomany.dao.impl; import org.springframework.stereotype.Repository; import com.baeldung.hibernate.manytomany.model.Employee; import com.baeldung.persistence.dao.common.AbstractHibernateDao; import com.baeldung.persistence.manytomany.dao.IEmployeeDao; @Repository public class EmployeeDao extends AbstractHibernateDao<Employee> implements IEmployeeDao { public EmployeeDao() { super(); setClazz(Employee.class); } }
218
329
<filename>tests/testdata/macosx_minimal_system_version/test_lib.c<gh_stars>100-1000 int num_of_letters(char* text){ int num = 0; char * lett = text; while (lett != 0){ if (*lett >= 'a' && *lett <= 'z'){ num += 1; } else if (*lett >= 'A' && *lett <= 'Z'){ num += 1; } lett += 1; } return num; }
188
3,897
<filename>targets/TARGET_NORDIC/TARGET_NRF5x/TARGET_SDK_15_0/modules/nrfx/drivers/src/nrfx_spi.c /** * Copyright (c) 2015 - 2018, Nordic Semiconductor ASA * * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form, except as embedded into a Nordic * Semiconductor ASA integrated circuit in a product or a software update for * such product, must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other * materials provided with the distribution. * * 3. Neither the name of Nordic Semiconductor ASA nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * 4. This software, with or without modification, must only be used with a * Nordic Semiconductor ASA integrated circuit. * * 5. Any software provided in binary form under this license must not be reverse * engineered, decompiled, modified and/or disassembled. * * THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include <nrfx.h> #if NRFX_CHECK(NRFX_SPI_ENABLED) #if !(NRFX_CHECK(NRFX_SPI0_ENABLED) || NRFX_CHECK(NRFX_SPI1_ENABLED) || \ NRFX_CHECK(NRFX_SPI2_ENABLED)) #error "No enabled SPI instances. Check <nrfx_config.h>." #endif #include <nrfx_spi.h> #include "prs/nrfx_prs.h" #include <hal/nrf_gpio.h> #define NRFX_LOG_MODULE SPI #include <nrfx_log.h> // Control block - driver instance local data. typedef struct { nrfx_spi_evt_handler_t handler; void * p_context; nrfx_spi_evt_t evt; // Keep the struct that is ready for event handler. Less memcpy. nrfx_drv_state_t state; volatile bool transfer_in_progress; // [no need for 'volatile' attribute for the following members, as they // are not concurrently used in IRQ handlers and main line code] uint8_t ss_pin; uint8_t miso_pin; uint8_t orc; size_t bytes_transferred; bool abort; } spi_control_block_t; static spi_control_block_t m_cb[NRFX_SPI_ENABLED_COUNT]; nrfx_err_t nrfx_spi_init(nrfx_spi_t const * const p_instance, nrfx_spi_config_t const * p_config, nrfx_spi_evt_handler_t handler, void * p_context) { NRFX_ASSERT(p_config); spi_control_block_t * p_cb = &m_cb[p_instance->drv_inst_idx]; nrfx_err_t err_code; if (p_cb->state != NRFX_DRV_STATE_UNINITIALIZED) { err_code = NRFX_ERROR_INVALID_STATE; NRFX_LOG_WARNING("Function: %s, error code: %s.", __func__, NRFX_LOG_ERROR_STRING_GET(err_code)); return err_code; } #if NRFX_CHECK(NRFX_PRS_ENABLED) static nrfx_irq_handler_t const irq_handlers[NRFX_SPI_ENABLED_COUNT] = { #if NRFX_CHECK(NRFX_SPI0_ENABLED) nrfx_spi_0_irq_handler, #endif #if NRFX_CHECK(NRFX_SPI1_ENABLED) nrfx_spi_1_irq_handler, #endif #if NRFX_CHECK(NRFX_SPI2_ENABLED) nrfx_spi_2_irq_handler, #endif }; if (nrfx_prs_acquire(p_instance->p_reg, irq_handlers[p_instance->drv_inst_idx]) != NRFX_SUCCESS) { err_code = NRFX_ERROR_BUSY; NRFX_LOG_WARNING("Function: %s, error code: %s.", __func__, NRFX_LOG_ERROR_STRING_GET(err_code)); return err_code; } #endif // NRFX_CHECK(NRFX_PRS_ENABLED) p_cb->handler = handler; p_cb->p_context = p_context; uint32_t mosi_pin; uint32_t miso_pin; // Configure pins used by the peripheral: // - SCK - output with initial value corresponding with the SPI mode used: // 0 - for modes 0 and 1 (CPOL = 0), 1 - for modes 2 and 3 (CPOL = 1); // according to the reference manual guidelines this pin and its input // buffer must always be connected for the SPI to work. if (p_config->mode <= NRF_SPI_MODE_1) { nrf_gpio_pin_clear(p_config->sck_pin); } else { nrf_gpio_pin_set(p_config->sck_pin); } nrf_gpio_cfg(p_config->sck_pin, NRF_GPIO_PIN_DIR_OUTPUT, NRF_GPIO_PIN_INPUT_CONNECT, NRF_GPIO_PIN_NOPULL, NRF_GPIO_PIN_S0S1, NRF_GPIO_PIN_NOSENSE); // - MOSI (optional) - output with initial value 0, if (p_config->mosi_pin != NRFX_SPI_PIN_NOT_USED) { mosi_pin = p_config->mosi_pin; nrf_gpio_pin_clear(mosi_pin); nrf_gpio_cfg_output(mosi_pin); } else { mosi_pin = NRF_SPI_PIN_NOT_CONNECTED; } // - MISO (optional) - input, if (p_config->miso_pin != NRFX_SPI_PIN_NOT_USED) { miso_pin = p_config->miso_pin; nrf_gpio_cfg_input(miso_pin, (nrf_gpio_pin_pull_t)NRFX_SPI_MISO_PULL_CFG); } else { miso_pin = NRF_SPI_PIN_NOT_CONNECTED; } m_cb[p_instance->drv_inst_idx].miso_pin = p_config->miso_pin; // - Slave Select (optional) - output with initial value 1 (inactive). if (p_config->ss_pin != NRFX_SPI_PIN_NOT_USED) { nrf_gpio_pin_set(p_config->ss_pin); nrf_gpio_cfg_output(p_config->ss_pin); } m_cb[p_instance->drv_inst_idx].ss_pin = p_config->ss_pin; NRF_SPI_Type * p_spi = p_instance->p_reg; nrf_spi_pins_set(p_spi, p_config->sck_pin, mosi_pin, miso_pin); nrf_spi_frequency_set(p_spi, p_config->frequency); nrf_spi_configure(p_spi, p_config->mode, p_config->bit_order); m_cb[p_instance->drv_inst_idx].orc = p_config->orc; if (p_cb->handler) { nrf_spi_int_enable(p_spi, NRF_SPI_INT_READY_MASK); } nrf_spi_enable(p_spi); if (p_cb->handler) { NRFX_IRQ_PRIORITY_SET(nrfx_get_irq_number(p_instance->p_reg), p_config->irq_priority); NRFX_IRQ_ENABLE(nrfx_get_irq_number(p_instance->p_reg)); } p_cb->transfer_in_progress = false; p_cb->state = NRFX_DRV_STATE_INITIALIZED; err_code = NRFX_SUCCESS; NRFX_LOG_INFO("Function: %s, error code: %s.", __func__, NRFX_LOG_ERROR_STRING_GET(err_code)); return err_code; } void nrfx_spi_uninit(nrfx_spi_t const * const p_instance) { spi_control_block_t * p_cb = &m_cb[p_instance->drv_inst_idx]; NRFX_ASSERT(p_cb->state != NRFX_DRV_STATE_UNINITIALIZED); if (p_cb->handler) { NRFX_IRQ_DISABLE(nrfx_get_irq_number(p_instance->p_reg)); } NRF_SPI_Type * p_spi = p_instance->p_reg; if (p_cb->handler) { nrf_spi_int_disable(p_spi, NRF_SPI_ALL_INTS_MASK); } if (p_cb->miso_pin != NRFX_SPI_PIN_NOT_USED) { nrf_gpio_cfg_default(p_cb->miso_pin); } nrf_spi_disable(p_spi); #if NRFX_CHECK(NRFX_PRS_ENABLED) nrfx_prs_release(p_instance->p_reg); #endif p_cb->state = NRFX_DRV_STATE_UNINITIALIZED; } static void finish_transfer(spi_control_block_t * p_cb) { // If Slave Select signal is used, this is the time to deactivate it. if (p_cb->ss_pin != NRFX_SPI_PIN_NOT_USED) { nrf_gpio_pin_set(p_cb->ss_pin); } // By clearing this flag before calling the handler we allow subsequent // transfers to be started directly from the handler function. p_cb->transfer_in_progress = false; p_cb->evt.type = NRFX_SPI_EVENT_DONE; p_cb->handler(&p_cb->evt, p_cb->p_context); } // This function is called from the IRQ handler or, in blocking mode, directly // from the 'spi_xfer' function. // It returns true as long as the transfer should be continued, otherwise (when // there is nothing more to send/receive) it returns false. static bool transfer_byte(NRF_SPI_Type * p_spi, spi_control_block_t * p_cb) { // Read the data byte received in this transfer (always, because no further // READY event can be generated until the current byte is read out from the // RXD register), and store it in the RX buffer (only when needed). volatile uint8_t rx_data = nrf_spi_rxd_get(p_spi); if (p_cb->bytes_transferred < p_cb->evt.xfer_desc.rx_length) { p_cb->evt.xfer_desc.p_rx_buffer[p_cb->bytes_transferred] = rx_data; } ++p_cb->bytes_transferred; // Check if there are more bytes to send or receive and write proper data // byte (next one from TX buffer or over-run character) to the TXD register // when needed. // NOTE - we've already used 'p_cb->bytes_transferred + 1' bytes from our // buffers, because we take advantage of double buffering of TXD // register (so in effect one byte is still being transmitted now); // see how the transfer is started in the 'spi_xfer' function. size_t bytes_used = p_cb->bytes_transferred + 1; if (p_cb->abort) { if (bytes_used < p_cb->evt.xfer_desc.tx_length) { p_cb->evt.xfer_desc.tx_length = bytes_used; } if (bytes_used < p_cb->evt.xfer_desc.rx_length) { p_cb->evt.xfer_desc.rx_length = bytes_used; } } if (bytes_used < p_cb->evt.xfer_desc.tx_length) { nrf_spi_txd_set(p_spi, p_cb->evt.xfer_desc.p_tx_buffer[bytes_used]); return true; } else if (bytes_used < p_cb->evt.xfer_desc.rx_length) { nrf_spi_txd_set(p_spi, p_cb->orc); return true; } return (p_cb->bytes_transferred < p_cb->evt.xfer_desc.tx_length || p_cb->bytes_transferred < p_cb->evt.xfer_desc.rx_length); } static void spi_xfer(NRF_SPI_Type * p_spi, spi_control_block_t * p_cb, nrfx_spi_xfer_desc_t const * p_xfer_desc) { p_cb->bytes_transferred = 0; nrf_spi_int_disable(p_spi, NRF_SPI_INT_READY_MASK); nrf_spi_event_clear(p_spi, NRF_SPI_EVENT_READY); // Start the transfer by writing some byte to the TXD register; // if TX buffer is not empty, take the first byte from this buffer, // otherwise - use over-run character. nrf_spi_txd_set(p_spi, (p_xfer_desc->tx_length > 0 ? p_xfer_desc->p_tx_buffer[0] : p_cb->orc)); // TXD register is double buffered, so next byte to be transmitted can // be written immediately, if needed, i.e. if TX or RX transfer is to // be more that 1 byte long. Again - if there is something more in TX // buffer send it, otherwise use over-run character. if (p_xfer_desc->tx_length > 1) { nrf_spi_txd_set(p_spi, p_xfer_desc->p_tx_buffer[1]); } else if (p_xfer_desc->rx_length > 1) { nrf_spi_txd_set(p_spi, p_cb->orc); } // For blocking mode (user handler not provided) wait here for READY // events (indicating that the byte from TXD register was transmitted // and a new incoming byte was moved to the RXD register) and continue // transaction until all requested bytes are transferred. // In non-blocking mode - IRQ service routine will do this stuff. if (p_cb->handler) { nrf_spi_int_enable(p_spi, NRF_SPI_INT_READY_MASK); } else { do { while (!nrf_spi_event_check(p_spi, NRF_SPI_EVENT_READY)) {} nrf_spi_event_clear(p_spi, NRF_SPI_EVENT_READY); NRFX_LOG_DEBUG("SPI: Event: NRF_SPI_EVENT_READY."); } while (transfer_byte(p_spi, p_cb)); if (p_cb->ss_pin != NRFX_SPI_PIN_NOT_USED) { nrf_gpio_pin_set(p_cb->ss_pin); } } } nrfx_err_t nrfx_spi_xfer(nrfx_spi_t const * const p_instance, nrfx_spi_xfer_desc_t const * p_xfer_desc, uint32_t flags) { spi_control_block_t * p_cb = &m_cb[p_instance->drv_inst_idx]; NRFX_ASSERT(p_cb->state != NRFX_DRV_STATE_UNINITIALIZED); NRFX_ASSERT(p_xfer_desc->p_tx_buffer != NULL || p_xfer_desc->tx_length == 0); NRFX_ASSERT(p_xfer_desc->p_rx_buffer != NULL || p_xfer_desc->rx_length == 0); nrfx_err_t err_code = NRFX_SUCCESS; if (p_cb->transfer_in_progress) { err_code = NRFX_ERROR_BUSY; NRFX_LOG_WARNING("Function: %s, error code: %s.", __func__, NRFX_LOG_ERROR_STRING_GET(err_code)); return err_code; } else { if (p_cb->handler) { p_cb->transfer_in_progress = true; } } p_cb->evt.xfer_desc = *p_xfer_desc; p_cb->abort = false; if (p_cb->ss_pin != NRFX_SPI_PIN_NOT_USED) { nrf_gpio_pin_clear(p_cb->ss_pin); } if (flags) { p_cb->transfer_in_progress = false; err_code = NRFX_ERROR_NOT_SUPPORTED; } else { spi_xfer(p_instance->p_reg, p_cb, p_xfer_desc); } NRFX_LOG_INFO("Function: %s, error code: %s.", __func__, NRFX_LOG_ERROR_STRING_GET(err_code)); return err_code; } void nrfx_spi_abort(nrfx_spi_t const * p_instance) { spi_control_block_t * p_cb = &m_cb[p_instance->drv_inst_idx]; NRFX_ASSERT(p_cb->state != NRFX_DRV_STATE_UNINITIALIZED); p_cb->abort = true; } static void irq_handler(NRF_SPI_Type * p_spi, spi_control_block_t * p_cb) { NRFX_ASSERT(p_cb->handler); nrf_spi_event_clear(p_spi, NRF_SPI_EVENT_READY); NRFX_LOG_DEBUG("Event: NRF_SPI_EVENT_READY."); if (!transfer_byte(p_spi, p_cb)) { finish_transfer(p_cb); } } #if NRFX_CHECK(NRFX_SPI0_ENABLED) void nrfx_spi_0_irq_handler(void) { irq_handler(NRF_SPI0, &m_cb[NRFX_SPI0_INST_IDX]); } #endif #if NRFX_CHECK(NRFX_SPI1_ENABLED) void nrfx_spi_1_irq_handler(void) { irq_handler(NRF_SPI1, &m_cb[NRFX_SPI1_INST_IDX]); } #endif #if NRFX_CHECK(NRFX_SPI2_ENABLED) void nrfx_spi_2_irq_handler(void) { irq_handler(NRF_SPI2, &m_cb[NRFX_SPI2_INST_IDX]); } #endif #endif // NRFX_CHECK(NRFX_SPI_ENABLED)
7,091
506
// https://cses.fi/problemset/task/1142/ #include <iostream> #include <stack> #include <tuple> #include <vector> using namespace std; typedef stack<int> si; typedef vector<int> vi; typedef long long ll; int main() { int n; cin >> n; vi v(n+1, 0); for (int i = 0; i < n; i++) cin >> v[i]; ll m = 0; si s; for (int i = 0; i <= n; i++) { while (!s.empty() && v[i] <= v[s.top()]) { int h = v[s.top()]; s.pop(); int k = s.empty() ? -1 : s.top(); m = max(m, ll(i - k - 1) * h); } s.push(i); } cout << m << endl; }
280
701
#pragma once #include <brutal/base.h> // doc: https://www.amd.com/system/files/TechDocs/25481.pdf typedef struct { bool succ; uint32_t eax; uint32_t ebx; uint32_t ecx; uint32_t edx; } CpuidResult; enum cpuid_leaf { CPUID_FEATURE_IDENTIFIER = 1, CPUID_EXTENDED_FEATURE_IDENTIFIER = 7, CPUID_PROC_EXTENDED_STATE_ENUMERATION = 13 }; enum cpuid_feature_bits { // ECX CPUID_SSSE3_SUPPORT = (1 << 9), CPUID_SSE41_SUPPORT = (1 << 19), CPUID_SSE42_SUPPORT = (1 << 20), CPUID_AES_SUPPORT = (1 << 25), CPUID_XSAVE_SUPPORT = (1 << 26), CPUID_XSAVE_ENABLED = (1 << 27), CPUID_AVX_SUPPORT = (1 << 28), }; enum cpuid_extended_feature_bits { // EBX CPUID_BIT_MANIPULATION_SUPPORT = (1 << 3), CPUID_AVX512_SUPPORT = (1 << 16), }; CpuidResult cpuid(uint32_t leaf, uint32_t subleaf); static inline bool cpuid_has_xsave(void) { return (cpuid(CPUID_FEATURE_IDENTIFIER, 0).ecx & CPUID_XSAVE_SUPPORT) == CPUID_XSAVE_SUPPORT; } static inline bool cpuid_has_avx(void) { return cpuid(CPUID_FEATURE_IDENTIFIER, 0).ecx & CPUID_AVX_SUPPORT; } static inline bool cpuid_has_avx512(void) { return cpuid(CPUID_EXTENDED_FEATURE_IDENTIFIER, 0).ebx & CPUID_AVX512_SUPPORT; } static inline size_t cpuid_xsave_size(void) { return cpuid(CPUID_PROC_EXTENDED_STATE_ENUMERATION, 0).ecx; }
636
1,511
/* Shared library add-on to iptables to add TTL matching support * (C) 2000 by <NAME> <<EMAIL>> * * This program is released under the terms of GNU GPL */
45