max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
5,411
<gh_stars>1000+ // Copyright 2018 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef MOJO_PUBLIC_CPP_BASE_SHARED_MEMORY_UTILS_H_ #define MOJO_PUBLIC_CPP_BASE_SHARED_MEMORY_UTILS_H_ #include "base/component_export.h" #include "base/memory/read_only_shared_memory_region.h" #include "base/memory/unsafe_shared_memory_region.h" #include "base/memory/writable_shared_memory_region.h" namespace mojo { // These creation methods are parallel to the base::*SharedMemoryRegion::Create // methods. These methods should be used instead of the base:: ones to create // shared memory in an unprivileged context, in which case a broker in a // privileged process will be used to create the region. // // IsValid() should be checked on the return value of the following methods to // determine if the creation was successful. COMPONENT_EXPORT(MOJO_BASE) base::MappedReadOnlyRegion CreateReadOnlySharedMemoryRegion(size_t size); COMPONENT_EXPORT(MOJO_BASE) base::UnsafeSharedMemoryRegion CreateUnsafeSharedMemoryRegion(size_t size); COMPONENT_EXPORT(MOJO_BASE) base::WritableSharedMemoryRegion CreateWritableSharedMemoryRegion(size_t size); } // namespace mojo #endif // MOJO_PUBLIC_CPP_BASE_SHARED_MEMORY_UTILS_H_
429
1,601
<gh_stars>1000+ int foo() { int x = 42; return x/0; } int main() { int y; /* codechecker_suppress [all] some comment */ y = 7; int x = foo(); y = 10; return y + x; }
83
435
<reponame>amaajemyfren/data<filename>pycon-ar-2012/videos/no-toca-boton-amazon-web-services-desde-python-ezequiel-gutesman-pyconar-2012.json { "copyright_text": "Creative Commons Attribution license (reuse allowed)", "description": "En esta charla vamos a aprender c\u00f3mo desarrollar en **AWS** (Amazon Web\nServices) y c\u00f3mo controlar nuestra infraestructura dentro de este cloud.\nVeremos algunos de los servicios de AWS que pueden servirnos a la hora\nde pensar un desarrollo en esa plataforma incluyendo servicios de\nc\u00f3mputo el\u00e1stico, bases de Datos relacionales y NoSQL y servicios de\naplicaci\u00f3n como queues y notification services. Para esto, utilizaremos\nuna librer\u00eda llamada **boto** , que es la interfaz Python mas usada para\ndesarrollar en y para AWS. Veremos ejemplos en vivo y contar\u00e9 algunos\nmodulos de python que pueden sernos \u00fatiles al momento de encarar un\ndesarrollo. Desarrolladores de todo tipo y admines son bienvenidos,\nAdianchi! Adianchi!\n", "duration": 3327, "language": "spa", "recorded": "2012-11-17", "related_urls": [ { "label": "Event schedule", "url": "https://web.archive.org/web/20140626165416/http://ar.pycon.org/2012/schedule/index" }, { "label": "PyCon Argentina 2012 Brouchure", "url": "https://es.scribd.com/document/116769116/PyCon-Argentina-2012-Brouchure" } ], "speakers": [ "<NAME>" ], "tags": [], "thumbnail_url": "https://i.ytimg.com/vi/hN-STUjbzY8/maxresdefault.jpg", "title": "No toca BOTOn: Amazon Web Services desde python", "videos": [ { "type": "youtube", "url": "https://www.youtube.com/watch?v=hN-STUjbzY8" } ] }
697
460
////////////////////////////////////////////////////////////////////////////// // // (C) Copyright <NAME> 2005-2009. Distributed under the Boost // Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // // See http://www.boost.org/libs/interprocess for documentation. // ////////////////////////////////////////////////////////////////////////////// #ifndef BOOST_INTERPROCESS_DETAIL_MEM_ALGO_COMMON_HPP #define BOOST_INTERPROCESS_DETAIL_MEM_ALGO_COMMON_HPP #if (defined _MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif #include <boost/interprocess/detail/config_begin.hpp> #include <boost/interprocess/detail/workaround.hpp> #include <boost/interprocess/interprocess_fwd.hpp> #include <boost/interprocess/containers/allocation_type.hpp> #include <boost/interprocess/detail/utilities.hpp> #include <boost/interprocess/detail/type_traits.hpp> #include <boost/interprocess/detail/math_functions.hpp> #include <boost/interprocess/detail/utilities.hpp> #include <boost/interprocess/detail/move.hpp> #include <boost/interprocess/detail/min_max.hpp> #include <boost/assert.hpp> #include <boost/static_assert.hpp> #include <algorithm> #include <utility> #include <iterator> #include <boost/assert.hpp> //!\file //!Implements common operations for memory algorithms. namespace boost { namespace interprocess { namespace detail { //!This class implements several allocation functions shared by different algorithms //!(aligned allocation, multiple allocation...). template<class MemoryAlgorithm> class memory_algorithm_common { public: typedef typename MemoryAlgorithm::void_pointer void_pointer; typedef typename MemoryAlgorithm::block_ctrl block_ctrl; typedef typename MemoryAlgorithm::multiallocation_chain multiallocation_chain; typedef memory_algorithm_common<MemoryAlgorithm> this_type; static const std::size_t Alignment = MemoryAlgorithm::Alignment; static const std::size_t MinBlockUnits = MemoryAlgorithm::MinBlockUnits; static const std::size_t AllocatedCtrlBytes = MemoryAlgorithm::AllocatedCtrlBytes; static const std::size_t AllocatedCtrlUnits = MemoryAlgorithm::AllocatedCtrlUnits; static const std::size_t BlockCtrlBytes = MemoryAlgorithm::BlockCtrlBytes; static const std::size_t BlockCtrlUnits = MemoryAlgorithm::BlockCtrlUnits; static const std::size_t UsableByPreviousChunk = MemoryAlgorithm::UsableByPreviousChunk; static void assert_alignment(const void *ptr) { assert_alignment((std::size_t)ptr); } static void assert_alignment(std::size_t uint_ptr) { (void)uint_ptr; BOOST_ASSERT(uint_ptr % Alignment == 0); } static bool check_alignment(const void *ptr) { return (((std::size_t)ptr) % Alignment == 0); } static std::size_t ceil_units(std::size_t size) { return detail::get_rounded_size(size, Alignment)/Alignment; } static std::size_t floor_units(std::size_t size) { return size/Alignment; } static std::size_t multiple_of_units(std::size_t size) { return detail::get_rounded_size(size, Alignment); } static multiallocation_chain allocate_many (MemoryAlgorithm *memory_algo, std::size_t elem_bytes, std::size_t n_elements) { return this_type::priv_allocate_many(memory_algo, &elem_bytes, n_elements, 0); } static void deallocate_many(MemoryAlgorithm *memory_algo, multiallocation_chain chain) { return this_type::priv_deallocate_many(memory_algo, boost::interprocess::move(chain)); } static bool calculate_lcm_and_needs_backwards_lcmed (std::size_t backwards_multiple, std::size_t received_size, std::size_t size_to_achieve, std::size_t &lcm_out, std::size_t &needs_backwards_lcmed_out) { // Now calculate lcm std::size_t max = backwards_multiple; std::size_t min = Alignment; std::size_t needs_backwards; std::size_t needs_backwards_lcmed; std::size_t lcm; std::size_t current_forward; //Swap if necessary if(max < min){ std::size_t tmp = min; min = max; max = tmp; } //Check if it's power of two if((backwards_multiple & (backwards_multiple-1)) == 0){ if(0 != (size_to_achieve & ((backwards_multiple-1)))){ return false; } lcm = max; //If we want to use minbytes data to get a buffer between maxbytes //and minbytes if maxbytes can't be achieved, calculate the //biggest of all possibilities current_forward = detail::get_truncated_size_po2(received_size, backwards_multiple); needs_backwards = size_to_achieve - current_forward; BOOST_ASSERT((needs_backwards % backwards_multiple) == 0); needs_backwards_lcmed = detail::get_rounded_size_po2(needs_backwards, lcm); lcm_out = lcm; needs_backwards_lcmed_out = needs_backwards_lcmed; return true; } //Check if it's multiple of alignment else if((backwards_multiple & (Alignment - 1u)) == 0){ lcm = backwards_multiple; current_forward = detail::get_truncated_size(received_size, backwards_multiple); //No need to round needs_backwards because backwards_multiple == lcm needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward; BOOST_ASSERT((needs_backwards_lcmed & (Alignment - 1u)) == 0); lcm_out = lcm; needs_backwards_lcmed_out = needs_backwards_lcmed; return true; } //Check if it's multiple of the half of the alignmment else if((backwards_multiple & ((Alignment/2u) - 1u)) == 0){ lcm = backwards_multiple*2u; current_forward = detail::get_truncated_size(received_size, backwards_multiple); needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward; if(0 != (needs_backwards_lcmed & (Alignment-1))) //while(0 != (needs_backwards_lcmed & (Alignment-1))) needs_backwards_lcmed += backwards_multiple; BOOST_ASSERT((needs_backwards_lcmed % lcm) == 0); lcm_out = lcm; needs_backwards_lcmed_out = needs_backwards_lcmed; return true; } //Check if it's multiple of the half of the alignmment else if((backwards_multiple & ((Alignment/4u) - 1u)) == 0){ std::size_t remainder; lcm = backwards_multiple*4u; current_forward = detail::get_truncated_size(received_size, backwards_multiple); needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward; //while(0 != (needs_backwards_lcmed & (Alignment-1))) //needs_backwards_lcmed += backwards_multiple; if(0 != (remainder = ((needs_backwards_lcmed & (Alignment-1))>>(Alignment/8u)))){ if(backwards_multiple & Alignment/2u){ needs_backwards_lcmed += (remainder)*backwards_multiple; } else{ needs_backwards_lcmed += (4-remainder)*backwards_multiple; } } BOOST_ASSERT((needs_backwards_lcmed % lcm) == 0); lcm_out = lcm; needs_backwards_lcmed_out = needs_backwards_lcmed; return true; } else{ lcm = detail::lcm(max, min); } //If we want to use minbytes data to get a buffer between maxbytes //and minbytes if maxbytes can't be achieved, calculate the //biggest of all possibilities current_forward = detail::get_truncated_size(received_size, backwards_multiple); needs_backwards = size_to_achieve - current_forward; BOOST_ASSERT((needs_backwards % backwards_multiple) == 0); needs_backwards_lcmed = detail::get_rounded_size(needs_backwards, lcm); lcm_out = lcm; needs_backwards_lcmed_out = needs_backwards_lcmed; return true; } static multiallocation_chain allocate_many ( MemoryAlgorithm *memory_algo , const std::size_t *elem_sizes , std::size_t n_elements , std::size_t sizeof_element) { return this_type::priv_allocate_many(memory_algo, elem_sizes, n_elements, sizeof_element); } static void* allocate_aligned (MemoryAlgorithm *memory_algo, std::size_t nbytes, std::size_t alignment) { //Ensure power of 2 if ((alignment & (alignment - std::size_t(1u))) != 0){ //Alignment is not power of two BOOST_ASSERT((alignment & (alignment - std::size_t(1u))) == 0); return 0; } std::size_t real_size; if(alignment <= Alignment){ return memory_algo->priv_allocate (boost::interprocess::allocate_new, nbytes, nbytes, real_size).first; } if(nbytes > UsableByPreviousChunk) nbytes -= UsableByPreviousChunk; //We can find a aligned portion if we allocate a block that has alignment //nbytes + alignment bytes or more. std::size_t minimum_allocation = max_value (nbytes + alignment, std::size_t(MinBlockUnits*Alignment)); //Since we will split that block, we must request a bit more memory //if the alignment is near the beginning of the buffer, because otherwise, //there is no space for a new block before the alignment. // // ____ Aligned here // | // ----------------------------------------------------- // | MBU | // ----------------------------------------------------- std::size_t request = minimum_allocation + (2*MinBlockUnits*Alignment - AllocatedCtrlBytes //prevsize - UsableByPreviousChunk ); //Now allocate the buffer void *buffer = memory_algo->priv_allocate (boost::interprocess::allocate_new, request, request, real_size).first; if(!buffer){ return 0; } else if ((((std::size_t)(buffer)) % alignment) == 0){ //If we are lucky and the buffer is aligned, just split it and //return the high part block_ctrl *first = memory_algo->priv_get_block(buffer); std::size_t old_size = first->m_size; const std::size_t first_min_units = max_value(ceil_units(nbytes) + AllocatedCtrlUnits, std::size_t(MinBlockUnits)); //We can create a new block in the end of the segment if(old_size >= (first_min_units + MinBlockUnits)){ block_ctrl *second = reinterpret_cast<block_ctrl *> (reinterpret_cast<char*>(first) + Alignment*first_min_units); first->m_size = first_min_units; second->m_size = old_size - first->m_size; BOOST_ASSERT(second->m_size >= MinBlockUnits); memory_algo->priv_mark_new_allocated_block(first); //memory_algo->priv_tail_size(first, first->m_size); memory_algo->priv_mark_new_allocated_block(second); memory_algo->priv_deallocate(memory_algo->priv_get_user_buffer(second)); } return buffer; } //Buffer not aligned, find the aligned part. // // ____ Aligned here // | // ----------------------------------------------------- // | MBU +more | ACB | // ----------------------------------------------------- char *pos = reinterpret_cast<char*> (reinterpret_cast<std::size_t>(static_cast<char*>(buffer) + //This is the minimum size of (2) (MinBlockUnits*Alignment - AllocatedCtrlBytes) + //This is the next MBU for the aligned memory AllocatedCtrlBytes + //This is the alignment trick alignment - 1) & -alignment); //Now obtain the address of the blocks block_ctrl *first = memory_algo->priv_get_block(buffer); block_ctrl *second = memory_algo->priv_get_block(pos); BOOST_ASSERT(pos <= (reinterpret_cast<char*>(first) + first->m_size*Alignment)); BOOST_ASSERT(first->m_size >= 2*MinBlockUnits); BOOST_ASSERT((pos + MinBlockUnits*Alignment - AllocatedCtrlBytes + nbytes*Alignment/Alignment) <= (reinterpret_cast<char*>(first) + first->m_size*Alignment)); //Set the new size of the first block std::size_t old_size = first->m_size; first->m_size = (reinterpret_cast<char*>(second) - reinterpret_cast<char*>(first))/Alignment; memory_algo->priv_mark_new_allocated_block(first); //Now check if we can create a new buffer in the end // // __"second" block // | __Aligned here // | | __"third" block // -----------|-----|-----|------------------------------ // | MBU +more | ACB | (3) | BCU | // ----------------------------------------------------- //This size will be the minimum size to be able to create a //new block in the end. const std::size_t second_min_units = max_value(std::size_t(MinBlockUnits), ceil_units(nbytes) + AllocatedCtrlUnits ); //Check if we can create a new block (of size MinBlockUnits) in the end of the segment if((old_size - first->m_size) >= (second_min_units + MinBlockUnits)){ //Now obtain the address of the end block block_ctrl *third = new (reinterpret_cast<char*>(second) + Alignment*second_min_units)block_ctrl; second->m_size = second_min_units; third->m_size = old_size - first->m_size - second->m_size; BOOST_ASSERT(third->m_size >= MinBlockUnits); memory_algo->priv_mark_new_allocated_block(second); memory_algo->priv_mark_new_allocated_block(third); memory_algo->priv_deallocate(memory_algo->priv_get_user_buffer(third)); } else{ second->m_size = old_size - first->m_size; BOOST_ASSERT(second->m_size >= MinBlockUnits); memory_algo->priv_mark_new_allocated_block(second); } memory_algo->priv_deallocate(memory_algo->priv_get_user_buffer(first)); return memory_algo->priv_get_user_buffer(second); } static bool try_shrink (MemoryAlgorithm *memory_algo, void *ptr ,const std::size_t max_size, const std::size_t preferred_size ,std::size_t &received_size) { (void)memory_algo; //Obtain the real block block_ctrl *block = memory_algo->priv_get_block(ptr); std::size_t old_block_units = block->m_size; //The block must be marked as allocated BOOST_ASSERT(memory_algo->priv_is_allocated_block(block)); //Check if alignment and block size are right assert_alignment(ptr); //Put this to a safe value received_size = (old_block_units - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk; //Now translate it to Alignment units const std::size_t max_user_units = floor_units(max_size - UsableByPreviousChunk); const std::size_t preferred_user_units = ceil_units(preferred_size - UsableByPreviousChunk); //Check if rounded max and preferred are possible correct if(max_user_units < preferred_user_units) return false; //Check if the block is smaller than the requested minimum std::size_t old_user_units = old_block_units - AllocatedCtrlUnits; if(old_user_units < preferred_user_units) return false; //If the block is smaller than the requested minimum if(old_user_units == preferred_user_units) return true; std::size_t shrunk_user_units = ((BlockCtrlUnits - AllocatedCtrlUnits) > preferred_user_units) ? (BlockCtrlUnits - AllocatedCtrlUnits) : preferred_user_units; //Some parameter checks if(max_user_units < shrunk_user_units) return false; //We must be able to create at least a new empty block if((old_user_units - shrunk_user_units) < BlockCtrlUnits ){ return false; } //Update new size received_size = shrunk_user_units*Alignment + UsableByPreviousChunk; return true; } static bool shrink (MemoryAlgorithm *memory_algo, void *ptr ,const std::size_t max_size, const std::size_t preferred_size ,std::size_t &received_size) { //Obtain the real block block_ctrl *block = memory_algo->priv_get_block(ptr); std::size_t old_block_units = block->m_size; if(!try_shrink (memory_algo, ptr, max_size, preferred_size, received_size)){ return false; } //Check if the old size was just the shrunk size (no splitting) if((old_block_units - AllocatedCtrlUnits) == ceil_units(preferred_size - UsableByPreviousChunk)) return true; //Now we can just rewrite the size of the old buffer block->m_size = (received_size-UsableByPreviousChunk)/Alignment + AllocatedCtrlUnits; BOOST_ASSERT(block->m_size >= BlockCtrlUnits); //We create the new block block_ctrl *new_block = reinterpret_cast<block_ctrl*> (reinterpret_cast<char*>(block) + block->m_size*Alignment); //Write control data to simulate this new block was previously allocated //and deallocate it new_block->m_size = old_block_units - block->m_size; BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits); memory_algo->priv_mark_new_allocated_block(block); memory_algo->priv_mark_new_allocated_block(new_block); memory_algo->priv_deallocate(memory_algo->priv_get_user_buffer(new_block)); return true; } private: static multiallocation_chain priv_allocate_many ( MemoryAlgorithm *memory_algo , const std::size_t *elem_sizes , std::size_t n_elements , std::size_t sizeof_element) { //Note: sizeof_element == 0 indicates that we want to //allocate n_elements of the same size "*elem_sizes" //Calculate the total size of all requests std::size_t total_request_units = 0; std::size_t elem_units = 0; const std::size_t ptr_size_units = memory_algo->priv_get_total_units(sizeof(void_pointer)); if(!sizeof_element){ elem_units = memory_algo->priv_get_total_units(*elem_sizes); elem_units = ptr_size_units > elem_units ? ptr_size_units : elem_units; total_request_units = n_elements*elem_units; } else{ for(std::size_t i = 0; i < n_elements; ++i){ elem_units = memory_algo->priv_get_total_units(elem_sizes[i]*sizeof_element); elem_units = ptr_size_units > elem_units ? ptr_size_units : elem_units; total_request_units += elem_units; } } multiallocation_chain chain; std::size_t low_idx = 0; while(low_idx < n_elements){ std::size_t total_bytes = total_request_units*Alignment - AllocatedCtrlBytes + UsableByPreviousChunk; std::size_t min_allocation = (!sizeof_element) ? elem_units : memory_algo->priv_get_total_units(elem_sizes[low_idx]*sizeof_element); min_allocation = min_allocation*Alignment - AllocatedCtrlBytes + UsableByPreviousChunk; std::size_t received_size; std::pair<void *, bool> ret = memory_algo->priv_allocate (boost::interprocess::allocate_new, min_allocation, total_bytes, received_size, 0); if(!ret.first){ break; } block_ctrl *block = memory_algo->priv_get_block(ret.first); std::size_t received_units = block->m_size; char *block_address = reinterpret_cast<char*>(block); std::size_t total_used_units = 0; // block_ctrl *prev_block = 0; while(total_used_units < received_units){ if(sizeof_element){ elem_units = memory_algo->priv_get_total_units(elem_sizes[low_idx]*sizeof_element); elem_units = ptr_size_units > elem_units ? ptr_size_units : elem_units; } if(total_used_units + elem_units > received_units) break; total_request_units -= elem_units; //This is the position where the new block must be created block_ctrl *new_block = reinterpret_cast<block_ctrl *>(block_address); assert_alignment(new_block); //The last block should take all the remaining space if((low_idx + 1) == n_elements || (total_used_units + elem_units + ((!sizeof_element) ? elem_units : memory_algo->priv_get_total_units(elem_sizes[low_idx+1]*sizeof_element)) ) > received_units){ //By default, the new block will use the rest of the buffer new_block->m_size = received_units - total_used_units; memory_algo->priv_mark_new_allocated_block(new_block); //If the remaining units are bigger than needed and we can //split it obtaining a new free memory block do it. if((received_units - total_used_units) >= (elem_units + MemoryAlgorithm::BlockCtrlUnits)){ std::size_t shrunk_received; std::size_t shrunk_request = elem_units*Alignment - AllocatedCtrlBytes + UsableByPreviousChunk; bool shrink_ok = shrink (memory_algo ,memory_algo->priv_get_user_buffer(new_block) ,shrunk_request ,shrunk_request ,shrunk_received); (void)shrink_ok; //Shrink must always succeed with passed parameters BOOST_ASSERT(shrink_ok); //Some sanity checks BOOST_ASSERT(shrunk_request == shrunk_received); BOOST_ASSERT(elem_units == ((shrunk_request-UsableByPreviousChunk)/Alignment + AllocatedCtrlUnits)); //"new_block->m_size" must have been reduced to elem_units by "shrink" BOOST_ASSERT(new_block->m_size == elem_units); //Now update the total received units with the reduction received_units = elem_units + total_used_units; } } else{ new_block->m_size = elem_units; memory_algo->priv_mark_new_allocated_block(new_block); } block_address += new_block->m_size*Alignment; total_used_units += new_block->m_size; //Check we have enough room to overwrite the intrusive pointer BOOST_ASSERT((new_block->m_size*Alignment - AllocatedCtrlUnits) >= sizeof(void_pointer)); void_pointer p = new(memory_algo->priv_get_user_buffer(new_block))void_pointer(0); chain.push_back(p); ++low_idx; //prev_block = new_block; } //Sanity check BOOST_ASSERT(total_used_units == received_units); } if(low_idx != n_elements){ priv_deallocate_many(memory_algo, boost::interprocess::move(chain)); } return boost::interprocess::move(chain); } static void priv_deallocate_many(MemoryAlgorithm *memory_algo, multiallocation_chain chain) { while(!chain.empty()){ void *addr = detail::get_pointer(chain.front()); chain.pop_front(); memory_algo->priv_deallocate(addr); } } }; } //namespace detail { } //namespace interprocess { } //namespace boost { #include <boost/interprocess/detail/config_end.hpp> #endif //#ifndef BOOST_INTERPROCESS_DETAIL_MEM_ALGO_COMMON_HPP
10,696
4,168
package sandbox.org.bouncycastle.asn1.x509; import sandbox.org.bouncycastle.asn1.ASN1Object; /** * This is a dummy class that implements just enough of {@link org.bouncycastle.asn1.x509.AlgorithmIdentifier} * to allow us to compile {@link sandbox.net.corda.core.crypto.Crypto}. */ @SuppressWarnings("unused") public class AlgorithmIdentifier extends ASN1Object { public static AlgorithmIdentifier getInstance(Object obj) { throw new UnsupportedOperationException("Dummy class - not implemented"); } }
174
335
/* ********************************************************************************************************* * lwIP TCP/IP Stack * port for uC/OS-II RTOS on TIC6711 DSK * * File : tcp_ip.c * By : ZengMing @ DEP,Tsinghua University,Beijing,China ********************************************************************************************************* */ #include <stdio.h> #include <string.h> #include <ctype.h> #include <stdlib.h> #include "lwip/opt.h" #include "lwip/def.h" #include "lwip/mem.h" #include "lwip/pbuf.h" #include "lwip/sys.h" #include "lwip/timers.h" #include <lwip/stats.h> #include "lwip/netif.h" #include "netif/etharp.h" #include "lwip/tcpip.h" #include "lwip/init.h" #include "esp_event.h" #include "esp_event_loop.h" #include "port/arch/sys_arch.h" // -- Generic network interface -- extern err_t ethoc_init(struct netif *netif); int is_running_qemu() { int *quemu_test=(int *) 0x3ff005f0; int ret_val; if (*quemu_test==0x42) { printf("Running in qemu\n"); ret_val=1; } else { ret_val=0; } return ret_val; } struct netif ethoc_if; //struct netif loop_if; void ethernet_hardreset(void); //These reset codes are built for C6711 DSP void tcpip_init_done_ok(void * arg); void task_lwip_init(void * pParam) { ip4_addr_t ipaddr, netmask, gw; sys_sem_t sem; // From esp-idf //lwip_init(); sys_init(); //ethernet_hardreset();//hard reset of EthernetDaughterCard // This should be done in lwip_init # if 0 #if LWIP_STATS stats_init(); #endif // initial lwIP stack sys_init(); mem_init(); memp_init(); pbuf_init(); netif_init(); #endif // The initiation above is done in tcpip_init //add loop interface //set local loop-interface 127.0.0.1 /* IP4_ADDR(&gw, 127,0,0,1); IP4_ADDR(&ipaddr, 127,0,0,1); IP4_ADDR(&netmask, 255,0,0,0); netif_add(&loop_if, &ipaddr, &netmask, &gw, NULL, loopif_init, tcpip_input); */ //add interface IP4_ADDR(&gw, 192,168,4,1); IP4_ADDR(&ipaddr, 192,168,4,3); IP4_ADDR(&netmask, 255,255,255,0); netif_add(&ethoc_if, &ipaddr, &netmask, &gw, NULL, ethoc_init, tcpip_input); netif_set_default(&ethoc_if); printf("TCP/IP initializing...\n"); if (sys_sem_new(&sem,0)!=ERR_OK) { printf("Failed creating semaphore\n"); } // OLAS tcpip_init(tcpip_init_done_ok, &sem); sys_sem_wait(sem); sys_sem_free(sem); printf("TCP/IP initialized.\n"); netif_set_up(&ethoc_if); // Fake got_ip event if (esp_event_loop_get_queue()!=NULL) { system_event_t evt; //ip4_addr_set(&ip_info->ip, ip_2_ip4(&netif->ip_addr)); //ip4_addr_set(&ip_info->netmask, ip_2_ip4(&netif->netmask)); //ip4_addr_set(&ip_info->gw, ip_2_ip4(&netif->gw)); //notify event evt.event_id = SYSTEM_EVENT_STA_GOT_IP; memcpy(&evt.event_info.got_ip.ip_info, &ipaddr, sizeof(tcpip_adapter_ip_info_t)); esp_event_send(&evt); } printf("Applications started.\n"); vTaskDelete(NULL); //--------------------------------------------------------------------- //All thread(task) of lwIP must have their PRI between 10 and 14. //------------------------------------------------------------ //sys_thread_new(httpd_init, (void*)"httpd",10); //httpd_init(); //--------------------------------------------------------------------- //DSP_C6x_TimerInit(); // Timer interrupt enabled // Ethernet interrupt //DSP_C6x_Int4Init(); // Int4(Ethernet Chip int) enabled /* Block for ever. */ //sem = sys_sem_new(0); //sys_sem_wait(sem); //printf("Finished\n"); } //--------------------------------------------------------- void tcpip_init_done_ok(void * arg) { sys_sem_t *sem; sem = arg; sys_sem_signal(*sem); } /*-----------------------------------------------------------*/ /* This function do the hard reset of EthernetDaughterCard * * through the DaughterBoardControl0 signal in DB-IF */ /*-----------------------------------------------------------*/ void ethernet_hardreset(void) //These reset codes are built for C6711 DSK { }
1,869
583
#include "operator_performance_data.hpp" #include <string> #include "utils/format_duration.hpp" namespace opossum { std::ostream& operator<<(std::ostream& stream, const AbstractOperatorPerformanceData& performance_data) { performance_data.output_to_stream(stream, DescriptionMode::SingleLine); return stream; } } // namespace opossum
107
744
<gh_stars>100-1000 /* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package org.apache.griffin.core.info; import static org.hamcrest.CoreMatchers.is; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath; import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; import org.apache.griffin.core.util.URLHelper; import org.junit.Test; import org.junit.runner.RunWith; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.test.autoconfigure.web.servlet.WebMvcTest; import org.springframework.test.context.junit4.SpringRunner; import org.springframework.test.web.servlet.MockMvc; @RunWith(SpringRunner.class) @WebMvcTest(value = GriffinInfoController.class, secure = false) public class GriffinInfoControllerTest { @Autowired private MockMvc mockMvc; @Test public void testGreeting() throws Exception { mockMvc.perform(get(URLHelper.API_VERSION_PATH + "/version")) .andExpect(status().isOk()) .andExpect(jsonPath("$", is("0.5.0"))); } }
596
770
<filename>src/theia/math/find_polynomial_roots_jenkins_traub.cc<gh_stars>100-1000 // Copyright (C) 2015 The Regents of the University of California (Regents). // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // * Neither the name of The Regents or University of California nor the // names of its contributors may be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. // // Please contact the author of this library if you have any questions. // Author: <NAME> (<EMAIL>) #include "theia/math/find_polynomial_roots_jenkins_traub.h" #include <Eigen/Dense> #include <Eigen/Eigenvalues> #include <glog/logging.h> #include <cmath> #include <complex> #include <limits> #include <vector> #include "theia/math/polynomial.h" #include "theia/math/util.h" namespace theia { using Eigen::MatrixXd; using Eigen::Vector3d; using Eigen::VectorXd; using Eigen::Vector3cd; using Eigen::VectorXcd; namespace { // Machine precision constants. static const double mult_eps = std::numeric_limits<double>::epsilon(); static const double sum_eps = std::numeric_limits<double>::epsilon(); static const double kAbsoluteTolerance = 1e-14; static const double kRelativeTolerance = 1e-10; enum class ConvergenceType { NO_CONVERGENCE = 0, LINEAR_CONVERGENCE = 1, QUADRATIC_CONVERGENCE = 2 }; // Perform division by a linear term of the form (z - x) and evaluate P at x. void SyntheticDivisionAndEvaluate(const VectorXd& polynomial, const double x, VectorXd* quotient, double* eval) { quotient->setZero(polynomial.size() - 1); (*quotient)(0) = polynomial(0); for (int i = 1; i < polynomial.size() - 1; i++) { (*quotient)(i) = polynomial(i) + (*quotient)(i - 1) * x; } const VectorXd::ReverseReturnType& creverse_quotient = quotient->reverse(); *eval = polynomial.reverse()(0) + creverse_quotient(0) * x; } // Perform division of a polynomial by a quadratic factor. The quadratic divisor // should have leading 1s. void QuadraticSyntheticDivision(const VectorXd& polynomial, const VectorXd& quadratic_divisor, VectorXd* quotient, VectorXd* remainder) { CHECK_EQ(quadratic_divisor.size(), 3); CHECK_GE(polynomial.size(), 3); quotient->setZero(polynomial.size() - 2); remainder->setZero(2); (*quotient)(0) = polynomial(0); // If the quotient is a constant then polynomial is degree 2 and the math is // simple. if (quotient->size() == 1) { *remainder = polynomial.tail<2>() - polynomial(0) * quadratic_divisor.tail<2>(); return; } (*quotient)(1) = polynomial(1) - polynomial(0) * quadratic_divisor(1); for (int i = 2; i < polynomial.size() - 2; i++) { (*quotient)(i) = polynomial(i) - (*quotient)(i - 2) * quadratic_divisor(2) - (*quotient)(i - 1) * quadratic_divisor(1); } const VectorXd::ReverseReturnType &creverse_quotient = quotient->reverse(); (*remainder)(0) = polynomial.reverse()(1) - quadratic_divisor(1) * creverse_quotient(0) - quadratic_divisor(2) * creverse_quotient(1); (*remainder)(1) = polynomial.reverse()(0) - quadratic_divisor(2) * creverse_quotient(0); } // Determines whether the iteration has converged by examining the three most // recent values for convergence. template<typename T> bool HasConverged(const T& sequence) { const bool convergence_condition_1 = std::abs(sequence(1) - sequence(0)) < std::abs(sequence(0)) / 2.0; const bool convergence_condition_2 = std::abs(sequence(2) - sequence(1)) < std::abs(sequence(1)) / 2.0; // If the sequence has converged then return true. return convergence_condition_1 && convergence_condition_2; } // Determines if the root has converged by measuring the relative and absolute // change in the root value. This stopping criterion is a simple measurement // that proves to work well. It is referred to as "Ward's method" in the // following reference: // // Nikolajsen, <NAME>. "New stopping criteria for iterative root finding." // Royal Society open science (2014) template <typename T> bool HasRootConverged(const std::vector<T>& roots) { static const double kRootMagnitudeTolerance = 1e-8; if (roots.size() != 3) { return false; } const double e_i = std::abs(roots[2] - roots[1]); const double e_i_minus_1 = std::abs(roots[1] - roots[0]); const double mag_root = std::abs(roots[1]); if (e_i <= e_i_minus_1) { if (mag_root < kRootMagnitudeTolerance) { return e_i < kAbsoluteTolerance; } else { return e_i / mag_root <= kRelativeTolerance; } } return false; } // Implementation closely follows the three-stage algorithm for finding roots of // polynomials with real coefficients as outlined in: "A Three-Stage Algorithm // for Real Polynomaials Using Quadratic Iteration" by Jenkins and Traub, SIAM // 1970. Please note that this variant is different than the complex-coefficient // version, and is estimated to be up to 4 times faster. class JenkinsTraubSolver { public: JenkinsTraubSolver(const VectorXd& coeffs, VectorXd* real_roots, VectorXd* complex_roots) : polynomial_(coeffs), real_roots_(real_roots), complex_roots_(complex_roots), num_solved_roots_(0) {} // Extracts the roots using the Jenkins Traub method. bool ExtractRoots(); private: // Removes any zero roots and divides polynomial by z. void RemoveZeroRoots(); // Computes the magnitude of the roots to provide and initial search radius // for the iterative solver. double ComputeRootRadius(); // Computes the zero-shift applied to the K-Polynomial. void ComputeZeroShiftKPolynomial(); // Stage 1 of the Jenkins-Traub method. This stage is not technically // necessary, but helps separate roots that are close to zero. void ApplyZeroShiftToKPolynomial(const int num_iterations); // Computes and returns the update of sigma(z) based on the current // K-polynomial. // // NOTE: This function is used by the fixed shift iterations (which hold sigma // constant) so sigma is *not* modified internally by this function. If you // want to change sigma, simply call // sigma = ComputeNextSigma(); VectorXd ComputeNextSigma(); // Updates the K-polynomial based on the current value of sigma for the fixed // or variable shift stage. void UpdateKPolynomialWithQuadraticShift( const VectorXd& polynomial_quotient, const VectorXd& k_polynomial_quotient); // Apply fixed-shift iterations to the K-polynomial to separate the // roots. Based on the convergence of the K-polynomial, we apply a // variable-shift linear or quadratic iteration to determine a real root or // complex conjugate pair of roots respectively. ConvergenceType ApplyFixedShiftToKPolynomial(const std::complex<double>& root, const int max_iterations); // Applies one of the variable shifts to the K-Polynomial. Returns true upon // successful convergence to a good root, and false otherwise. bool ApplyVariableShiftToKPolynomial( const ConvergenceType& fixed_shift_convergence, const std::complex<double>& root); // Applies a quadratic shift to the K-polynomial to determine a pair of roots // that are complex conjugates. Return true if a root was successfully found. bool ApplyQuadraticShiftToKPolynomial(const std::complex<double>& root, const int max_iterations); // Applies a linear shift to the K-polynomial to determine a single real root. // Return true if a root was successfully found. bool ApplyLinearShiftToKPolynomial(const std::complex<double>& root, const int max_iterations); // These methods determine whether the root finding has converged based on the // machine roundoff error expected in evaluating the polynomials at the root. bool HasQuadraticSequenceConverged(const VectorXd& quotient, const std::complex<double>& root); bool HasLinearSequenceConverged(const VectorXd& quotient, const double root, const double p_at_root); // Adds the root to the output variables. void AddRootToOutput(const double real, const double imag); // Solves polynomials of degree <= 2. bool SolveClosedFormPolynomial(); // Helper variables to manage the polynomials as they are being manipulated // and deflated. VectorXd polynomial_; VectorXd k_polynomial_; // Sigma is the quadratic factor the divides the K-polynomial. Vector3d sigma_; // Let us define a, b, c, and d such that: // P(z) = Q_P * sigma(z) + b * (z + u) + a // K(z) = Q_K * sigma(z) + d * (z + u ) + c // // where Q_P and Q_K are the quotients from polynomial division of // sigma(z). Note that this means for a given a root s of sigma: // // P(s) = a - b * s_conj // P(s_conj) = a - b * s // K(s) = c - d * s_conj // K(s_conj) = c - d * s double a_, b_, c_, d_; // Output reference variables. VectorXd* real_roots_; VectorXd* complex_roots_; int num_solved_roots_; // Keeps track of whether the linear and quadratic shifts have been attempted // yet so that we do not attempt the same shift twice. bool attempted_linear_shift_; bool attempted_quadratic_shift_; // Number of zero-shift iterations to perform. static const int kNumZeroShiftIterations = 20; // The number of fixed shift iterations is computed as // # roots found * this multiplier. static const int kFixedShiftIterationMultiplier = 20; // If the fixed shift iterations fail to converge, we restart this many times // before considering the solve attempt as a failure. static const int kMaxFixedShiftRestarts = 20; // The maximum number of linear shift iterations to perform before considering // the shift as a failure. static const int kMaxLinearShiftIterations = 20; // The maximum number of quadratic shift iterations to perform before // considering the shift as a failure. static const int kMaxQuadraticShiftIterations = 20; // When quadratic shift iterations are stalling, we attempt a few fixed shift // iterations to help convergence. static const int kInnerFixedShiftIterations = 5; // During quadratic iterations, the real values of the root pairs should be // nearly equal since the root pairs are complex conjugates. This tolerance // measures how much the real values may diverge before consider the quadratic // shift to be failed. const double kRootPairTolerance = 0.01; }; bool JenkinsTraubSolver::ExtractRoots() { if (polynomial_.size() == 0) { LOG(ERROR) << "Invalid polynomial of size 0 passed to " "FindPolynomialRootsJenkinsTraub"; return false; } // Remove any leading zeros of the polynomial. polynomial_ = RemoveLeadingZeros(polynomial_); const int degree = static_cast<int>(polynomial_.size()) - 1; // Allocate the output roots. if (real_roots_ != NULL) { real_roots_->setZero(degree); } if (complex_roots_ != NULL) { complex_roots_->setZero(degree); } // Normalize the polynomial. polynomial_ /= polynomial_(0); // Remove any zero roots. RemoveZeroRoots(); // Choose the initial starting value for the root-finding on the complex // plane. double phi = DegToRad(49.0); // Iterate until the polynomial has been completely deflated. for (int i = 0; i < degree; i++) { // Compute the root radius. const double root_radius = ComputeRootRadius(); // Solve in closed form if the polynomial is small enough. if (polynomial_.size() <= 3) { break; } // Stage 1: Apply zero-shifts to the K-polynomial to separate the small // zeros of the polynomial. ApplyZeroShiftToKPolynomial(kNumZeroShiftIterations); // Stage 2: Apply fixed shift iterations to the K-polynomial to separate the // roots further. std::complex<double> root; ConvergenceType convergence = ConvergenceType::NO_CONVERGENCE; for (int j = 0; j < kMaxFixedShiftRestarts; j++) { root = root_radius * std::complex<double>(std::cos(phi), std::sin(phi)); convergence = ApplyFixedShiftToKPolynomial( root, kFixedShiftIterationMultiplier * (i + 1)); if (convergence != ConvergenceType::NO_CONVERGENCE) { break; } // Rotate the initial root value on the complex plane and try again. phi += DegToRad(94.0); } // Stage 3: Find the root(s) with variable shift iterations on the // K-polynomial. If this stage was not successful then we return a failure. if (!ApplyVariableShiftToKPolynomial(convergence, root)) { return false; } } return SolveClosedFormPolynomial(); } // Stage 1: Generate K-polynomials with no shifts (i.e. zero-shifts). void JenkinsTraubSolver::ApplyZeroShiftToKPolynomial( const int num_iterations) { // K0 is the first order derivative of polynomial. k_polynomial_ = DifferentiatePolynomial(polynomial_) / polynomial_.size(); for (int i = 1; i < num_iterations; i++) { ComputeZeroShiftKPolynomial(); } } ConvergenceType JenkinsTraubSolver::ApplyFixedShiftToKPolynomial( const std::complex<double>& root, const int max_iterations) { // Compute the fixed-shift quadratic: // sigma(z) = (x - m - n * i) * (x - m + n * i) = x^2 - 2 * m + m^2 + n^2. sigma_(0) = 1.0; sigma_(1) = -2.0 * root.real(); sigma_(2) = root.real() * root.real() + root.imag() * root.imag(); // Compute the quotient and remainder for divinding P by the quadratic // divisor. Since this iteration involves a fixed-shift sigma these may be // computed once prior to any iterations. VectorXd polynomial_quotient, polynomial_remainder; QuadraticSyntheticDivision( polynomial_, sigma_, &polynomial_quotient, &polynomial_remainder); // Compute a and b from the above equations. b_ = polynomial_remainder(0); a_ = polynomial_remainder(1) - b_ * sigma_(1); // Precompute P(s) for later using the equation above. const std::complex<double> p_at_root = a_ - b_ * std::conj(root); // These two containers hold values that we test for convergence such that the // zero index is the convergence value from 2 iterations ago, the first // index is from one iteration ago, and the second index is the current value. Vector3cd t_lambda = Vector3cd::Zero(); Vector3d sigma_lambda = Vector3d::Zero(); VectorXd k_polynomial_quotient, k_polynomial_remainder; for (int i = 0; i < max_iterations; i++) { k_polynomial_ /= k_polynomial_(0); // Divide the shifted polynomial by the quadratic polynomial. QuadraticSyntheticDivision( k_polynomial_, sigma_, &k_polynomial_quotient, &k_polynomial_remainder); d_ = k_polynomial_remainder(0); c_ = k_polynomial_remainder(1) - d_ * sigma_(1); // Test for convergence. const VectorXd variable_shift_sigma = ComputeNextSigma(); const std::complex<double> k_at_root = c_ - d_ * std::conj(root); t_lambda.head<2>() = t_lambda.tail<2>().eval(); sigma_lambda.head<2>() = sigma_lambda.tail<2>().eval(); t_lambda(2) = root - p_at_root / k_at_root; sigma_lambda(2) = variable_shift_sigma(2); // Return with the convergence code if the sequence has converged. if (HasConverged(sigma_lambda)) { return ConvergenceType::QUADRATIC_CONVERGENCE; } else if (HasConverged(t_lambda)) { return ConvergenceType::LINEAR_CONVERGENCE; } // Compute K_next using the formula above. UpdateKPolynomialWithQuadraticShift(polynomial_quotient, k_polynomial_quotient); } return ConvergenceType::NO_CONVERGENCE; } bool JenkinsTraubSolver::ApplyVariableShiftToKPolynomial( const ConvergenceType& fixed_shift_convergence, const std::complex<double>& root) { attempted_linear_shift_ = false; attempted_quadratic_shift_ = false; if (fixed_shift_convergence == ConvergenceType::LINEAR_CONVERGENCE) { return ApplyLinearShiftToKPolynomial(root, kMaxLinearShiftIterations); } else if (fixed_shift_convergence == ConvergenceType::QUADRATIC_CONVERGENCE) { return ApplyQuadraticShiftToKPolynomial(root, kMaxQuadraticShiftIterations); } return false; } // Generate K-polynomials with variable-shifts. During variable shifts, the // quadratic shift is computed as: // | K0(s1) K0(s2) z^2 | // | K1(s1) K1(s2) z | // | K2(s1) K2(s2) 1 | // sigma(z) = __________________________ // | K1(s1) K2(s1) | // | K2(s1) K2(s2) | // Where K0, K1, and K2 are successive zero-shifts of the K-polynomial. // // The K-polynomial shifts are otherwise exactly the same as Stage 2 after // accounting for a variable-shift sigma. bool JenkinsTraubSolver::ApplyQuadraticShiftToKPolynomial( const std::complex<double>& root, const int max_iterations) { // Only proceed if we have not already tried a quadratic shift. if (attempted_quadratic_shift_) { return false; } const double kTinyRelativeStep = 0.01; // Compute the fixed-shift quadratic: // sigma(z) = (x - m - n * i) * (x - m + n * i) = x^2 - 2 * m + m^2 + n^2. sigma_(0) = 1.0; sigma_(1) = -2.0 * root.real(); sigma_(2) = root.real() * root.real() + root.imag() * root.imag(); // These two containers hold values that we test for convergence such that the // zero index is the convergence value from 2 iterations ago, the first // index is from one iteration ago, and the second index is the current value. VectorXd polynomial_quotient, polynomial_remainder, k_polynomial_quotient, k_polynomial_remainder; double poly_at_root(0), prev_poly_at_root(0), prev_v(0); bool tried_fixed_shifts = false; // These containers maintain a history of the predicted roots. The convergence // of the algorithm is determined by the convergence of the root value. std::vector<std::complex<double> > roots1, roots2; roots1.push_back(root); roots2.push_back(std::conj(root)); for (int i = 0; i < max_iterations; i++) { // Terminate if the root evaluation is within our tolerance. This will // return false if we do not have enough samples. if (HasRootConverged(roots1) && HasRootConverged(roots2)) { AddRootToOutput(roots1[1].real(), roots1[1].imag()); AddRootToOutput(roots2[1].real(), roots2[1].imag()); polynomial_ = polynomial_quotient; return true; } QuadraticSyntheticDivision( polynomial_, sigma_, &polynomial_quotient, &polynomial_remainder); // Compute a and b from the above equations. b_ = polynomial_remainder(0); a_ = polynomial_remainder(1) - b_ * sigma_(1); // Solve for the roots of the quadratic factor sigma. std::complex<double> roots[2]; VectorXd real, imag; FindQuadraticPolynomialRoots(sigma_, &real, &imag); roots[0] = std::complex<double>(real(0), imag(0)); roots[1] = std::complex<double>(real(1), imag(1)); // Check that the roots are close. If not, then try a linear shift. if (std::abs(std::abs(roots[0].real()) - std::abs(roots[1].real())) > kRootPairTolerance * std::abs(roots[1].real())) { return ApplyLinearShiftToKPolynomial(root, kMaxLinearShiftIterations); } // If the iteration is stalling at a root pair then apply a few fixed shift // iterations to help convergence. poly_at_root = std::abs(a_ - roots[0].real() * b_) + std::abs(roots[0].imag() * b_); const double rel_step = std::abs((sigma_(2) - prev_v) / sigma_(2)); if (!tried_fixed_shifts && rel_step < kTinyRelativeStep && prev_poly_at_root > poly_at_root) { tried_fixed_shifts = true; ApplyFixedShiftToKPolynomial(roots[0], kInnerFixedShiftIterations); } // Divide the shifted polynomial by the quadratic polynomial. QuadraticSyntheticDivision( k_polynomial_, sigma_, &k_polynomial_quotient, &k_polynomial_remainder); d_ = k_polynomial_remainder(0); c_ = k_polynomial_remainder(1) - d_ * sigma_(1); prev_v = sigma_(2); sigma_ = ComputeNextSigma(); // Compute K_next using the formula above. UpdateKPolynomialWithQuadraticShift(polynomial_quotient, k_polynomial_quotient); k_polynomial_ /= k_polynomial_(0); prev_poly_at_root = poly_at_root; // Save the roots for convergence testing. roots1.push_back(roots[0]); roots2.push_back(roots[1]); if (roots1.size() > 3) { roots1.erase(roots1.begin()); roots2.erase(roots2.begin()); } } attempted_quadratic_shift_ = true; return ApplyLinearShiftToKPolynomial(root, kMaxLinearShiftIterations); } // Generate K-Polynomials with variable-shifts that are linear. The shift is // computed as: // K_next(z) = 1 / (z - s) * (K(z) - K(s) / P(s) * P(z)) // s_next = s - P(s) / K_next(s) bool JenkinsTraubSolver::ApplyLinearShiftToKPolynomial( const std::complex<double>& root, const int max_iterations) { if (attempted_linear_shift_) { return false; } // Compute an initial guess for the root. double real_root = (root - EvaluatePolynomial(polynomial_, root) / EvaluatePolynomial(k_polynomial_, root)).real(); VectorXd deflated_polynomial, deflated_k_polynomial; double polynomial_at_root(0), k_polynomial_at_root(0); // This container maintains a history of the predicted roots. The convergence // of the algorithm is determined by the convergence of the root value. std::vector<double> roots; roots.push_back(real_root); for (int i = 0; i < max_iterations; i++) { // Terminate if the root evaluation is within our tolerance. This will // return false if we do not have enough samples. if (HasRootConverged(roots)) { AddRootToOutput(roots[1], 0); polynomial_ = deflated_polynomial; return true; } const double prev_polynomial_at_root = polynomial_at_root; SyntheticDivisionAndEvaluate( polynomial_, real_root, &deflated_polynomial, &polynomial_at_root); // If the root is exactly the root then end early. Otherwise, the k // polynomial will be filled with inf or nans. if (std::abs(polynomial_at_root) <= kAbsoluteTolerance) { AddRootToOutput(roots[0], 0); polynomial_ = deflated_polynomial; return true; } // Update the K-Polynomial. SyntheticDivisionAndEvaluate(k_polynomial_, real_root, &deflated_k_polynomial, &k_polynomial_at_root); k_polynomial_ = AddPolynomials( deflated_k_polynomial, -k_polynomial_at_root / polynomial_at_root * deflated_polynomial); k_polynomial_ /= k_polynomial_(0); // Compute the update for the root estimation. k_polynomial_at_root = EvaluatePolynomial(k_polynomial_, real_root); const double delta_root = polynomial_at_root / k_polynomial_at_root; real_root -= delta_root; // Save the root so that convergence can be measured. Only the 3 most // recently root values are needed. roots.push_back(real_root); if (roots.size() > 3) { roots.erase(roots.begin()); } // If the linear iterations appear to be stalling then we may have found a // double real root of the form (z - x^2). Attempt a quadratic variable // shift from the current estimate of the root. if (i >= 2 && std::abs(delta_root) < 0.001 * std::abs(real_root) && std::abs(prev_polynomial_at_root) < std::abs(polynomial_at_root)) { const std::complex<double> new_root(real_root, 0); return ApplyQuadraticShiftToKPolynomial(new_root, kMaxQuadraticShiftIterations); } } attempted_linear_shift_ = true; return ApplyQuadraticShiftToKPolynomial(root, kMaxQuadraticShiftIterations); } bool JenkinsTraubSolver::HasQuadraticSequenceConverged( const VectorXd& quotient, const std::complex<double>& root) { const double z = std::sqrt(std::abs(sigma_(2))); const double t = -root.real() * b_; double e = 2.0 * std::abs(quotient(0)); for (int i = 1; i < quotient.size(); i++) { e = e * z + std::abs(quotient(i)); } e = e * z + std::abs(a_ + t); e *= 5.0 * mult_eps + 4.0 * sum_eps; e = e - (5.0 * mult_eps + 2.0 * sum_eps) * (std::abs(a_ + t) + std::abs(b_) * z); e = e + 2.0 * sum_eps * std::abs(t); return std::abs(a_ - b_ * root) < e; } bool JenkinsTraubSolver::HasLinearSequenceConverged(const VectorXd& quotient, const double root, const double p_at_root) { double e = std::abs(quotient(0)) * mult_eps / (sum_eps + mult_eps); const double abs_root = std::abs(root); for (int i = 0; i < quotient.size(); i++) { e = e * abs_root + std::abs(quotient(i)); } const double machine_precision = (sum_eps + mult_eps) * e - mult_eps * std::abs(p_at_root); return std::abs(p_at_root) < machine_precision; } void JenkinsTraubSolver::AddRootToOutput(const double real, const double imag) { if (real_roots_ != NULL) { (*real_roots_)(num_solved_roots_) = real; } if (complex_roots_ != NULL) { (*complex_roots_)(num_solved_roots_) = imag; } ++num_solved_roots_; } void JenkinsTraubSolver::RemoveZeroRoots() { int num_zero_roots = 0; const VectorXd::ReverseReturnType& creverse_polynomial = polynomial_.reverse(); while (creverse_polynomial(num_zero_roots) == 0) { ++num_zero_roots; } // The output roots have 0 as the default value so there is no need to // explicitly add the zero roots. polynomial_ = polynomial_.head(polynomial_.size() - num_zero_roots).eval(); } bool JenkinsTraubSolver::SolveClosedFormPolynomial() { const int degree = static_cast<int>(polynomial_.size()) - 1; // Is the polynomial constant? if (degree == 0) { LOG(WARNING) << "Trying to extract roots from a constant " << "polynomial in FindPolynomialRoots"; // We return true with no roots, not false, as if the polynomial is constant // it is correct that there are no roots. It is not the case that they were // there, but that we have failed to extract them. return true; } // Linear if (degree == 1) { AddRootToOutput(-polynomial_(1) / polynomial_(0), 0); return true; } // Quadratic if (degree == 2) { VectorXd real, imaginary; FindQuadraticPolynomialRoots(polynomial_, &real, &imaginary); AddRootToOutput(real(0), imaginary(0)); AddRootToOutput(real(1), imaginary(1)); return true; } return false; } // Computes a lower bound on the radius of the roots of polynomial by examining // the Cauchy sequence: // // z^n + |a_1| * z^{n - 1} + ... + |a_{n-1}| * z - |a_n| // // The unique positive zero of this polynomial is an approximate lower bound of // the radius of zeros of the original polynomial. double JenkinsTraubSolver::ComputeRootRadius() { static const double kEpsilon = 1e-2; static const int kMaxIterations = 100; VectorXd poly = polynomial_; // Take the absolute value of all coefficients. poly = poly.array().abs(); // Negate the last coefficient. poly(poly.size() - 1) *= -1.0; // Find the unique positive zero using Newton-Raphson iterations. double x0 = 1.0; return FindRootIterativeNewton(poly, x0, kEpsilon, kMaxIterations); } // The k polynomial with a zero-shift is // (K(x) - K(0) / P(0) * P(x)) / x. // // This is equivalent to: // K(x) - K(0) K(0) P(x) - P(0) // ___________ - ____ * ___________ // x P(0) x // // Note that removing the constant term and dividing by x is equivalent to // shifting the polynomial to one degree lower in our representation. void JenkinsTraubSolver::ComputeZeroShiftKPolynomial() { // Evaluating the polynomial at zero is equivalent to the constant term // (i.e. the last coefficient). const double polynomial_at_zero = polynomial_(polynomial_.size() - 1); const double k_at_zero = k_polynomial_(k_polynomial_.size() - 1); k_polynomial_ = AddPolynomials(k_polynomial_.head(k_polynomial_.size() - 1), -k_at_zero / polynomial_at_zero * polynomial_.head(polynomial_.size() - 1)); } // The iterations are computed with the following equation: // a^2 + u * a * b + v * b^2 // K_next = ___________________________ * Q_K // b * c - a * d // // a * c + u * a * d + v * b * d // + (z - _______________________________) * Q_P + b. // b * c - a * d // // This is done using *only* realy arithmetic so it can be done very fast! void JenkinsTraubSolver::UpdateKPolynomialWithQuadraticShift( const VectorXd& polynomial_quotient, const VectorXd& k_polynomial_quotient) { const double coefficient_q_k = (a_ * a_ + sigma_(1) * a_ * b_ + sigma_(2) * b_ * b_) / (b_ * c_ - a_ * d_); VectorXd linear_polynomial(2); linear_polynomial(0) = 1.0; linear_polynomial(1) = -(a_ * c_ + sigma_(1) * a_ * d_ + sigma_(2) * b_ * d_) / (b_ * c_ - a_ * d_); k_polynomial_ = AddPolynomials( coefficient_q_k * k_polynomial_quotient, MultiplyPolynomials(linear_polynomial, polynomial_quotient)); k_polynomial_(k_polynomial_.size() - 1) += b_; } // Using a bit of algebra, the update of sigma(z) can be computed from the // previous value along with a, b, c, and d defined above. The details of this // simplification can be found in "Three Stage Variable-Shift Iterations for the // Solution of Polynomial Equations With a Posteriori Error Bounds for the // Zeros" by <NAME>, Doctoral Thesis, Stanford Univeristy, 1969. // // NOTE: we assume the leading term of quadratic_sigma is 1.0. VectorXd JenkinsTraubSolver::ComputeNextSigma() { const double u = sigma_(1); const double v = sigma_(2); const VectorXd::ReverseReturnType& creverse_k_polynomial = k_polynomial_.reverse(); const VectorXd::ReverseReturnType& creverse_polynomial = polynomial_.reverse(); const double b1 = -creverse_k_polynomial(0) / creverse_polynomial(0); const double b2 = -(creverse_k_polynomial(1) + b1 * creverse_polynomial(1)) / creverse_polynomial(0); const double a1 = b_* c_ - a_ * d_; const double a2 = a_ * c_ + u * a_ * d_ + v * b_* d_; const double c2 = b1 * a2; const double c3 = b1 * b1 * (a_ * a_ + u * a_ * b_ + v * b_ * b_); const double c4 = v * b2 * a1 - c2 - c3; const double c1 = c_ * c_ + u * c_ * d_ + v * d_ * d_ + b1 * (a_ * c_ + u * b_ * c_ + v * b_ * d_) - c4; const double delta_u = -(u * (c2 + c3) + v * (b1 * a1 + b2 * a2)) / c1; const double delta_v = v * c4 / c1; // Update u and v in the quadratic sigma. VectorXd new_quadratic_sigma(3); new_quadratic_sigma(0) = 1.0; new_quadratic_sigma(1) = u + delta_u; new_quadratic_sigma(2) = v + delta_v; return new_quadratic_sigma; } } // namespace bool FindPolynomialRootsJenkinsTraub(const VectorXd& polynomial, VectorXd* real_roots, VectorXd* complex_roots) { JenkinsTraubSolver solver(polynomial, real_roots, complex_roots); return solver.ExtractRoots(); } } // namespace theia
12,641
585
# -*- coding: utf-8 -*- import os from .base import NipyBaseInterface from ..base import ( TraitedSpec, traits, File, OutputMultiPath, BaseInterfaceInputSpec, isdefined, ) class FitGLMInputSpec(BaseInterfaceInputSpec): session_info = traits.List( minlen=1, maxlen=1, mandatory=True, desc=( "Session specific information generated by" " ``modelgen.SpecifyModel``, FitGLM does " "not support multiple runs uless they are " "concatenated (see SpecifyModel options)" ), ) hrf_model = traits.Enum( "Canonical", "Canonical With Derivative", "FIR", desc=( "that specifies the hemodynamic reponse " "function it can be 'Canonical', 'Canonical " "With Derivative' or 'FIR'" ), usedefault=True, ) drift_model = traits.Enum( "Cosine", "Polynomial", "Blank", desc=( "string that specifies the desired drift " "model, to be chosen among 'Polynomial', " "'Cosine', 'Blank'" ), usedefault=True, ) TR = traits.Float(mandatory=True) model = traits.Enum( "ar1", "spherical", desc=("autoregressive mode is available only for the " "kalman method"), usedefault=True, ) method = traits.Enum( "kalman", "ols", desc=( "method to fit the model, ols or kalma; kalman " "is more time consuming but it supports " "autoregressive model" ), usedefault=True, ) mask = File( exists=True, desc=("restrict the fitting only to the region defined " "by this mask"), ) normalize_design_matrix = traits.Bool( False, desc=("normalize (zscore) the " "regressors before fitting"), usedefault=True, ) save_residuals = traits.Bool(False, usedefault=True) plot_design_matrix = traits.Bool(False, usedefault=True) class FitGLMOutputSpec(TraitedSpec): beta = File(exists=True) nvbeta = traits.Any() s2 = File(exists=True) dof = traits.Any() constants = traits.Any() axis = traits.Any() reg_names = traits.List() residuals = File() a = File(exists=True) class FitGLM(NipyBaseInterface): """ Fit GLM model based on the specified design. Supports only single or concatenated runs. """ input_spec = FitGLMInputSpec output_spec = FitGLMOutputSpec def _run_interface(self, runtime): import nibabel as nb import numpy as np import nipy.modalities.fmri.glm as GLM import nipy.modalities.fmri.design_matrix as dm try: BlockParadigm = dm.BlockParadigm except AttributeError: from nipy.modalities.fmri.experimental_paradigm import BlockParadigm session_info = self.inputs.session_info functional_runs = self.inputs.session_info[0]["scans"] if isinstance(functional_runs, (str, bytes)): functional_runs = [functional_runs] nii = nb.load(functional_runs[0]) data = nii.get_fdata(caching="unchanged") if isdefined(self.inputs.mask): mask = np.asanyarray(nb.load(self.inputs.mask).dataobj) > 0 else: mask = np.ones(nii.shape[:3]) == 1 timeseries = data[mask, :] del data for functional_run in functional_runs[1:]: nii = nb.load(functional_run, mmap=False) npdata = np.asarray(nii.dataobj) timeseries = np.concatenate((timeseries, npdata[mask, :]), axis=1) del npdata nscans = timeseries.shape[1] if "hpf" in list(session_info[0].keys()): hpf = session_info[0]["hpf"] drift_model = self.inputs.drift_model else: hpf = 0 drift_model = "Blank" reg_names = [] for reg in session_info[0]["regress"]: reg_names.append(reg["name"]) reg_vals = np.zeros((nscans, len(reg_names))) for i in range(len(reg_names)): reg_vals[:, i] = np.array(session_info[0]["regress"][i]["val"]).reshape( 1, -1 ) frametimes = np.linspace(0, (nscans - 1) * self.inputs.TR, nscans) conditions = [] onsets = [] duration = [] for i, cond in enumerate(session_info[0]["cond"]): onsets += cond["onset"] conditions += [cond["name"]] * len(cond["onset"]) if len(cond["duration"]) == 1: duration += cond["duration"] * len(cond["onset"]) else: duration += cond["duration"] if conditions: paradigm = BlockParadigm(con_id=conditions, onset=onsets, duration=duration) else: paradigm = None design_matrix, self._reg_names = dm.dmtx_light( frametimes, paradigm, drift_model=drift_model, hfcut=hpf, hrf_model=self.inputs.hrf_model, add_regs=reg_vals, add_reg_names=reg_names, ) if self.inputs.normalize_design_matrix: for i in range(len(self._reg_names) - 1): design_matrix[:, i] = ( design_matrix[:, i] - design_matrix[:, i].mean() ) / design_matrix[:, i].std() if self.inputs.plot_design_matrix: import pylab pylab.pcolor(design_matrix) pylab.savefig("design_matrix.pdf") pylab.close() pylab.clf() glm = GLM.GeneralLinearModel() glm.fit( timeseries.T, design_matrix, method=self.inputs.method, model=self.inputs.model, ) self._beta_file = os.path.abspath("beta.nii") beta = np.zeros(mask.shape + (glm.beta.shape[0],)) beta[mask, :] = glm.beta.T nb.save(nb.Nifti1Image(beta, nii.affine), self._beta_file) self._s2_file = os.path.abspath("s2.nii") s2 = np.zeros(mask.shape) s2[mask] = glm.s2 nb.save(nb.Nifti1Image(s2, nii.affine), self._s2_file) if self.inputs.save_residuals: explained = np.dot(design_matrix, glm.beta) residuals = np.zeros(mask.shape + (nscans,)) residuals[mask, :] = timeseries - explained.T self._residuals_file = os.path.abspath("residuals.nii") nb.save(nb.Nifti1Image(residuals, nii.affine), self._residuals_file) self._nvbeta = glm.nvbeta self._dof = glm.dof self._constants = glm._constants self._axis = glm._axis if self.inputs.model == "ar1": self._a_file = os.path.abspath("a.nii") a = np.zeros(mask.shape) a[mask] = glm.a.squeeze() nb.save(nb.Nifti1Image(a, nii.affine), self._a_file) self._model = glm.model self._method = glm.method return runtime def _list_outputs(self): outputs = self._outputs().get() outputs["beta"] = self._beta_file outputs["nvbeta"] = self._nvbeta outputs["s2"] = self._s2_file outputs["dof"] = self._dof outputs["constants"] = self._constants outputs["axis"] = self._axis outputs["reg_names"] = self._reg_names if self.inputs.model == "ar1": outputs["a"] = self._a_file if self.inputs.save_residuals: outputs["residuals"] = self._residuals_file return outputs class EstimateContrastInputSpec(BaseInterfaceInputSpec): contrasts = traits.List( traits.Either( traits.Tuple( traits.Str, traits.Enum("T"), traits.List(traits.Str), traits.List(traits.Float), ), traits.Tuple( traits.Str, traits.Enum("T"), traits.List(traits.Str), traits.List(traits.Float), traits.List(traits.Float), ), traits.Tuple( traits.Str, traits.Enum("F"), traits.List( traits.Either( traits.Tuple( traits.Str, traits.Enum("T"), traits.List(traits.Str), traits.List(traits.Float), ), traits.Tuple( traits.Str, traits.Enum("T"), traits.List(traits.Str), traits.List(traits.Float), traits.List(traits.Float), ), ) ), ), ), desc="""List of contrasts with each contrast being a list of the form: [('name', 'stat', [condition list], [weight list], [session list])]. if session list is None or not provided, all sessions are used. For F contrasts, the condition list should contain previously defined T-contrasts.""", mandatory=True, ) beta = File( exists=True, desc="beta coefficients of the fitted model", mandatory=True ) nvbeta = traits.Any(mandatory=True) s2 = File(exists=True, desc="squared variance of the residuals", mandatory=True) dof = traits.Any(desc="degrees of freedom", mandatory=True) constants = traits.Any(mandatory=True) axis = traits.Any(mandatory=True) reg_names = traits.List(mandatory=True) mask = File(exists=True) class EstimateContrastOutputSpec(TraitedSpec): stat_maps = OutputMultiPath(File(exists=True)) z_maps = OutputMultiPath(File(exists=True)) p_maps = OutputMultiPath(File(exists=True)) class EstimateContrast(NipyBaseInterface): """ Estimate contrast of a fitted model. """ input_spec = EstimateContrastInputSpec output_spec = EstimateContrastOutputSpec def _run_interface(self, runtime): import nibabel as nb import numpy as np import nipy.modalities.fmri.glm as GLM beta_nii = nb.load(self.inputs.beta) if isdefined(self.inputs.mask): mask = np.asanyarray(nb.load(self.inputs.mask).dataobj) > 0 else: mask = np.ones(beta_nii.shape[:3]) == 1 glm = GLM.GeneralLinearModel() glm.beta = np.array(beta_nii.dataobj)[mask, :].T glm.nvbeta = self.inputs.nvbeta glm.s2 = np.array(nb.load(self.inputs.s2).dataobj)[mask] glm.dof = self.inputs.dof glm._axis = self.inputs.axis glm._constants = self.inputs.constants reg_names = self.inputs.reg_names self._stat_maps = [] self._p_maps = [] self._z_maps = [] for contrast_def in self.inputs.contrasts: name = contrast_def[0] contrast = np.zeros(len(reg_names)) for i, reg_name in enumerate(reg_names): if reg_name in contrast_def[2]: idx = contrast_def[2].index(reg_name) contrast[i] = contrast_def[3][idx] est_contrast = glm.contrast(contrast) stat_map = np.zeros(mask.shape) stat_map[mask] = est_contrast.stat().T stat_map_file = os.path.abspath(name + "_stat_map.nii") nb.save(nb.Nifti1Image(stat_map, beta_nii.affine), stat_map_file) self._stat_maps.append(stat_map_file) p_map = np.zeros(mask.shape) p_map[mask] = est_contrast.pvalue().T p_map_file = os.path.abspath(name + "_p_map.nii") nb.save(nb.Nifti1Image(p_map, nii.affine), p_map_file) self._p_maps.append(p_map_file) z_map = np.zeros(mask.shape) z_map[mask] = est_contrast.zscore().T z_map_file = os.path.abspath(name + "_z_map.nii") nb.save(nb.Nifti1Image(z_map, nii.affine), z_map_file) self._z_maps.append(z_map_file) return runtime def _list_outputs(self): outputs = self._outputs().get() outputs["stat_maps"] = self._stat_maps outputs["p_maps"] = self._p_maps outputs["z_maps"] = self._z_maps return outputs
6,382
726
<reponame>xqgdmg/DavidNBA-master package com.yuyh.library.view.list.indexablelistview; import android.app.Activity; import android.content.Context; import android.content.res.ColorStateList; import android.content.res.TypedArray; import android.graphics.Color; import android.os.Handler; import android.os.HandlerThread; import android.os.Looper; import android.os.Message; import android.support.v7.widget.AppCompatTextView; import android.util.AttributeSet; import android.util.SparseArray; import android.view.Gravity; import android.view.View; import android.view.WindowManager; import android.widget.AbsListView; import android.widget.AdapterView; import android.widget.FrameLayout; import android.widget.ListView; import android.widget.TextView; import com.yuyh.library.R; import java.lang.ref.WeakReference; import java.util.ArrayList; import java.util.List; /** * Created by YoKeyword on 16/3/21. */ public class IndexableStickyListView extends FrameLayout implements AdapterView.OnItemClickListener, AbsListView.OnScrollListener { private static final int MSG_BIND_DATA = 1; private OnItemTitleClickListener mOnTitleListener; private OnItemContentClickListener mOnContentListener; private int mBarTextColor, mBarSelectedTextColor, mRightOverlayColor; private float mBarTextSize; private int mTypeOverlay; private ArrayList<View> mAddHeaderViewList; private ListView mListView; private IndexBar mIndexBar; private SearchLayout mSearchLayout; private TextView mTvOverlay; private AppCompatTextView mTvRightOverlay; private Context mContext; private IndexableAdapter mAdapter; private List<IndexEntity> mItems; private IndexHeaderEntity[] mHeaderEntities; private int mCurrentScrollItemPosition, mCurrentScrollItemTop; private int mTitleHeight; private SparseArray<String> mTitleMap; private TextView mStickView; private HandlerThread mBindDataHandlerThread; private Handler mBindDataHandler; public IndexableStickyListView(Context context) { super(context); init(context, null); } public IndexableStickyListView(Context context, AttributeSet attrs) { super(context, attrs); init(context, attrs); } public IndexableStickyListView(Context context, AttributeSet attrs, int defStyleAttr) { super(context, attrs, defStyleAttr); init(context, attrs); } private void init(Context context, AttributeSet attrs) { mContext = context; if (attrs != null) { TypedArray a = context.obtainStyledAttributes(attrs, R.styleable.IndexableStickyListView); mBarTextColor = a.getColor(R.styleable.IndexableStickyListView_indexBar_textColor, getResources().getColor(R.color.default_indexBar_textcolor)); mBarTextSize = a.getDimension(R.styleable.IndexableStickyListView_indexBar_textSize, getResources().getDimension(R.dimen.default_indexBar_textSize)); mBarSelectedTextColor = a.getColor(R.styleable.IndexableStickyListView_indexBar_selected_textColor, getResources().getColor(R.color.dafault_indexBar_selected_textColor)); mRightOverlayColor = a.getColor(R.styleable.IndexableStickyListView_indexListView_rightOverlayColor, getResources().getColor(R.color.default_indexListView_rightOverlayColor)); mTypeOverlay = a.getInt(R.styleable.IndexableStickyListView_indexListView_type_overlay, 0); a.recycle(); } if (mContext instanceof Activity) { ((Activity) mContext).getWindow().setSoftInputMode(WindowManager.LayoutParams.SOFT_INPUT_ADJUST_PAN); } mListView = new ListView(context); mListView.setVerticalScrollBarEnabled(false); mListView.setOverScrollMode(View.OVER_SCROLL_NEVER); mListView.setDivider(null); addView(mListView, new LayoutParams(LayoutParams.MATCH_PARENT, LayoutParams.MATCH_PARENT)); mIndexBar = new IndexBar(context, mBarTextColor, mBarSelectedTextColor, mBarTextSize); LayoutParams params = new LayoutParams(LayoutParams.WRAP_CONTENT, LayoutParams.MATCH_PARENT); params.gravity = Gravity.RIGHT; params.topMargin = IndexBar.dp2px(context, 16); params.bottomMargin = params.topMargin; addView(mIndexBar, params); if (mTypeOverlay == 1) { showCenterOverlayView(true); } else if (mTypeOverlay == 2) { showRightOverlayView(true, mRightOverlayColor); } mSearchLayout = new SearchLayout(context); LayoutParams paramsLayout = new LayoutParams(LayoutParams.MATCH_PARENT, LayoutParams.MATCH_PARENT); addView(mSearchLayout, paramsLayout); mSearchLayout.setVisibility(GONE); mListView.setOnItemClickListener(this); mListView.setOnScrollListener(this); mIndexBar.setOnIndexSelectedListener(new IndexBar.OnIndexTitleSelectedListener() { @Override public void onSelection(int position, String indexTitle) { if (mStickView != null) { if (!mStickView.getText().toString().equals(indexTitle)) { mStickView.setText(indexTitle); } if (mStickView.getY() != 0) { mStickView.setY(0); } } } }); } public void addHeaderView(View view) { if (mListView == null) return; if (mAddHeaderViewList == null) { mAddHeaderViewList = new ArrayList<>(); } mListView.addHeaderView(view); mAddHeaderViewList.add(view); } public void removeHeaderView(View view) { if (mListView == null || mAddHeaderViewList == null || !mAddHeaderViewList.contains(view)) { return; } mListView.removeHeaderView(view); mAddHeaderViewList.remove(view); } public int getHeaderViewsCount() { if (mListView == null) return 0; return mListView.getHeaderViewsCount(); } public ArrayList<View> getHeaderViews() { return mAddHeaderViewList; } /** * @return IndexListView的ListView */ public ListView getListView() { return mListView; } /** * @return IndexListView中心位置的 悬浮TextView */ public TextView getCenterOverlayTextView() { return mTvOverlay; } public IndexBar getIndexBar() { return mIndexBar; } /** * @param show 是否显示IndexListView右侧位置的 悬浮TextView */ public void showRightOverlayView(boolean show, int color) { if (show) { if (mTvRightOverlay == null) { initRightOverlayTextView(color); addView(mTvRightOverlay); mTvRightOverlay.invalidate(); mIndexBar.showTouchOverlayView(mTvRightOverlay); } } else { if (mTvRightOverlay != null) { removeView(mTvRightOverlay); mIndexBar.showTouchOverlayView(null); } } } /** * @param show 是否显示IndexListView中心位置的 悬浮TextView */ public void showCenterOverlayView(boolean show) { if (show) { if (mTvOverlay == null) { initOverlayTextView(); } addView(mTvOverlay); mIndexBar.setOverlayView(mTvOverlay); } else { if (mTvOverlay != null) { removeView(mTvOverlay); } mIndexBar.setOverlayView(null); } } private void initOverlayTextView() { mTvOverlay = new TextView(mContext); mTvOverlay.setBackgroundResource(R.drawable.bg_translucent_4dp); mTvOverlay.setTextColor(Color.WHITE); mTvOverlay.setTextSize(40); mTvOverlay.setGravity(Gravity.CENTER); int size = IndexBar.dp2px(mContext, 70); LayoutParams params = new LayoutParams(size, size); params.gravity = Gravity.CENTER; mTvOverlay.setLayoutParams(params); mTvOverlay.setVisibility(INVISIBLE); } private void initRightOverlayTextView(int color) { mTvRightOverlay = new AppCompatTextView(mContext); mTvRightOverlay.setBackgroundResource(R.drawable.bg_right_overlay); mTvRightOverlay.setSupportBackgroundTintList(ColorStateList.valueOf(color)); mTvRightOverlay.setTextColor(Color.WHITE); mTvRightOverlay.setTextSize(38); mTvRightOverlay.setGravity(Gravity.CENTER); int size = IndexBar.dp2px(mContext, 72); LayoutParams params = new LayoutParams(size, size); params.rightMargin = IndexBar.dp2px(mContext, 33); params.gravity = Gravity.RIGHT; mTvRightOverlay.setLayoutParams(params); mTvRightOverlay.setVisibility(INVISIBLE); } private static class BindDatasHanlder extends Handler { private final WeakReference<IndexableStickyListView> mIndexListView; public BindDatasHanlder(Looper looper, IndexableStickyListView indexListView) { super(looper); mIndexListView = new WeakReference<>(indexListView); } @Override public void handleMessage(Message msg) { super.handleMessage(msg); final IndexableStickyListView indexListView = mIndexListView.get(); indexListView.mAdapter.setNeedShutdown(false); indexListView.mAdapter.setDatas(indexListView.mItems, indexListView.mHeaderEntities); if (indexListView.mAdapter.isNeedShutdown()) return; ((Activity) indexListView.mContext).runOnUiThread(new Runnable() { @Override public void run() { indexListView.updateListView(); } }); } } /** * 绑定数据 * * @param items 继承IndexEntity的List * @param headerEntities IndexListView自定义Header 如添加定位,热门城市等 * @param <T> 继承IndexEntity */ public <T extends IndexEntity> void bindDatas(final List<T> items, final IndexHeaderEntity... headerEntities) { mItems = new ArrayList<>(); mHeaderEntities = new IndexHeaderEntity[headerEntities.length]; mItems.addAll(items); for (int i = 0; i < headerEntities.length; i++) { mHeaderEntities[i] = headerEntities[i]; } if (mAdapter == null) { return; } if (mContext instanceof Activity) { mAdapter.setNeedShutdown(true); if (mBindDataHandlerThread == null) { mBindDataHandlerThread = new HandlerThread("BindData_Thread"); mBindDataHandlerThread.start(); mBindDataHandler = new BindDatasHanlder(mBindDataHandlerThread.getLooper(), this); } mBindDataHandler.sendEmptyMessage(MSG_BIND_DATA); } else { mAdapter.setDatas(mItems, headerEntities); updateListView(); } } /** * 搜索过滤处理 * * @param newText 变化后的内容 */ public void searchTextChange(final String newText) { mIndexBar.searchTextChange(newText); } /** * 为IndexListView设置Adapter */ public <T extends IndexEntity> void setAdapter(IndexableAdapter<T> adapter) { mAdapter = adapter; mAdapter.setParent(this); mListView.setAdapter(adapter); if (mItems != null) { bindDatas(mItems, mHeaderEntities); } } /** * 设置 IndexListView 标题头 部分的点击事件 * * @param listener */ public void setOnItemTitleClickListener(OnItemTitleClickListener listener) { mOnTitleListener = listener; } /** * 设置 IndexListView 内容item 部分的点击事件 * * @param listener */ public void setOnItemContentClickListener(OnItemContentClickListener listener) { mOnContentListener = listener; } @Override public void onItemClick(AdapterView<?> parent, View view, int position, long id) { if (position < mListView.getHeaderViewsCount()) return; Object object = mAdapter.getItem(position - mListView.getHeaderViewsCount()); if (mOnTitleListener != null && object instanceof String) { String title = (String) object; mOnTitleListener.onItemClick(view, title); } else if (mOnContentListener != null && object instanceof IndexEntity) { IndexEntity indexEntity = (IndexEntity) object; mOnContentListener.onItemClick(view, indexEntity); } } @Override public void onScrollStateChanged(AbsListView view, int scrollState) { mIndexBar.onListViewScrollStateChanged(scrollState); } @Override public void onScroll(AbsListView view, int firstVisibleItem, int visibleItemCount, int totalItemCount) { mIndexBar.onListViewScroll(firstVisibleItem); if (mTitleHeight == 0 || mTitleMap == null) return; if (firstVisibleItem < mListView.getHeaderViewsCount()) { if (mStickView.getVisibility() == VISIBLE) { mStickView.setVisibility(INVISIBLE); } return; } else if (firstVisibleItem == mListView.getHeaderViewsCount()) { if (mStickView.getVisibility() != VISIBLE) { if (mAdapter == null || !mAdapter.isFilter()) { mStickView.setVisibility(VISIBLE); } } } if (firstVisibleItem > mCurrentScrollItemPosition) { // 向下 mCurrentScrollItemPosition = firstVisibleItem; processStick(firstVisibleItem, totalItemCount); } else if (firstVisibleItem < mCurrentScrollItemPosition) { // 向上 mCurrentScrollItemPosition = firstVisibleItem; processStick(firstVisibleItem, totalItemCount); } else { View firstView = mListView.getChildAt(0); if (firstView == null) return; int top = firstView.getTop(); if (top < mCurrentScrollItemTop) { // 向下 mCurrentScrollItemTop = top; processStick(firstVisibleItem, totalItemCount); } else { // 向上 mCurrentScrollItemTop = top; processStick(firstVisibleItem, totalItemCount); } } } private void processStick(int firstVisibleItem, int totalItemCount) { if (firstVisibleItem < totalItemCount - 1 && mTitleMap.get(firstVisibleItem - mListView.getHeaderViewsCount() + 1) != null) { int nextTop = mListView.getChildAt(1).getTop(); if (nextTop <= mTitleHeight) { if (mStickView.getVisibility() != VISIBLE) { if (mAdapter == null || !mAdapter.isFilter()) { mStickView.setVisibility(VISIBLE); } } mStickView.setTranslationY(nextTop - mTitleHeight); } } } public interface OnItemContentClickListener { void onItemClick(View v, IndexEntity indexEntity); } public interface OnItemTitleClickListener { void onItemClick(View v, String title); } /** * 更新相关View数据 */ private void updateListView() { mListView.post(new Runnable() { @Override public void run() { final TextView titleTextView = mAdapter.getTitleTextView(); titleTextView.post(new Runnable() { @Override public void run() { mTitleHeight = titleTextView.getHeight(); } }); } }); mTitleMap = mAdapter.getTitleMap(); if (mStickView == null) { if (mTitleMap.size() > 0) { View view = mAdapter.getView(mTitleMap.keyAt(0), null, mListView); mStickView = (TextView) view; addView(mStickView, 1); mStickView.setOnClickListener(new OnClickListener() { @Override public void onClick(View v) { if (mOnTitleListener != null) { mOnTitleListener.onItemClick(v, mStickView.getText().toString()); } } }); if (mListView.getHeaderViewsCount() > 0) { mStickView.setVisibility(INVISIBLE); } } mIndexBar.setOnSearchResultListener(new IndexBar.OnSearchResultListener() { @Override public void onStart() { if (!mSearchLayout.isProgressVisible()) { if (mContext instanceof Activity) { ((Activity) mContext).runOnUiThread(new Runnable() { @Override public void run() { mSearchLayout.showProgress(); } }); } } } @Override public void onResult(boolean isSearch, int dataSize) { if (mAdapter == null) return; if (!isSearch || dataSize > 0) { mSearchLayout.hide(); } else { mSearchLayout.showTip(); } mListView.setSelection(1); if (mAdapter.isFilter()) { if (mAddHeaderViewList != null) { for (View view : mAddHeaderViewList) { if (view.getHeight() != 0) { view.setTag(view.getHeight()); view.getLayoutParams().height = 1; } } } if (mStickView != null && mStickView.getVisibility() == VISIBLE) { mStickView.setVisibility(INVISIBLE); } } else { if (mAddHeaderViewList != null) { for (View view : mAddHeaderViewList) { view.getLayoutParams().height = (int) view.getTag(); } } if (mStickView != null && mStickView.getVisibility() != VISIBLE) { mStickView.setVisibility(VISIBLE); } } mListView.smoothScrollToPosition(0); } }); } else { if (mTitleMap.size() == 0) { removeView(mStickView); mStickView = null; } else { String title = mAdapter.getItemTitle(mListView.getFirstVisiblePosition()); mStickView.setText(title); } } mAdapter.notifyDataSetChanged(); mIndexBar.setListView(mListView); mIndexBar.postInvalidate(); } @Override protected void onDetachedFromWindow() { if (mBindDataHandlerThread != null) { mBindDataHandlerThread.quit(); } super.onDetachedFromWindow(); } }
9,330
848
<reponame>hito0512/Vitis-AI /* * Copyright 2019 Xilinx Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "medicalsegmentation_post.hpp" #include <sys/stat.h> #include <vitis/ai/image_util.hpp> #include <vitis/ai/library/tensor.hpp> #include <vitis/ai/math.hpp> #include <vitis/ai/profiling.hpp> using namespace std; namespace vitis { namespace ai { MedicalSegmentationPost::~MedicalSegmentationPost() {} MedicalSegmentationPost::MedicalSegmentationPost( const std::vector<vitis::ai::library::InputTensor>& input_tensors, const std::vector<vitis::ai::library::OutputTensor>& output_tensors, const vitis::ai::proto::DpuModelParam& config, int& real_batch_sizex) : input_tensors_(input_tensors), output_tensors_(output_tensors), real_batch_size(real_batch_sizex) {} std::vector<vitis::ai::MedicalSegmentationResult> MedicalSegmentationPost::medicalsegmentation_post_process() { auto ret = std::vector<vitis::ai::MedicalSegmentationResult>{}; ret.reserve(real_batch_size); for (auto i = 0; i < real_batch_size; ++i) { ret.emplace_back(medicalsegmentation_post_process(i)); } return ret; } vitis::ai::MedicalSegmentationResult MedicalSegmentationPost::medicalsegmentation_post_process( unsigned int batch_idx) { std::vector<cv::Mat> segMatV(5); for (int j = 0; j < 5; ++j) { unsigned int col_ind = 0; unsigned int row_ind = 0; auto output_layer = output_tensors_[j]; cv::Mat segMat(output_layer.height, output_layer.width, CV_8UC1); for (size_t i = 0; i < output_layer.height * output_layer.width * output_layer.channel; i = i + output_layer.channel) { auto max_ind = std::max_element(((int8_t*)output_layer.get_data(batch_idx)) + i, ((int8_t*)output_layer.get_data(batch_idx)) + i + output_layer.channel); uint8_t posit = std::distance( ((int8_t*)output_layer.get_data(batch_idx)) + i, max_ind); segMat.at<uchar>(row_ind, col_ind) = posit; col_ind++; if (col_ind > output_layer.width - 1) { row_ind++; col_ind = 0; } } segMatV[j] = segMat; } return MedicalSegmentationResult{(int)input_tensors_[0].width, (int)input_tensors_[0].height, segMatV}; } } // namespace ai } // namespace vitis
1,153
422
<filename>java/edu/ucsd/cs/riffa/FpgaInfo.java // ---------------------------------------------------------------------- // Copyright (c) 2016, The Regents of the University of California All // rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // * Neither the name of The Regents of the University of California // nor the names of its contributors may be used to endorse or // promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL REGENTS OF THE // UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS // OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR // TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // ---------------------------------------------------------------------- /* * Filename: FpgaInfo.java * Version: 2.0 * Description: Java API for RIFFA. * Author: <NAME> * History: @mattj: Initial release. Version 2.0. */ /** * Value object to hold information about all the installed FPGA accessible by * RIFFA. */ package edu.ucsd.cs.riffa; public class FpgaInfo { private static final int NUM_FPGAS = 5; private int numFpgas; private int[] numChannels; private int[] id; private int[] vendorId; private int[] deviceId; private String[] name; /** * Default constructor. */ public FpgaInfo() { this.numFpgas = 0; this.name = new String[NUM_FPGAS]; this.id = new int[NUM_FPGAS]; this.numChannels = new int[NUM_FPGAS]; this.deviceId = new int[NUM_FPGAS]; this.vendorId = new int[NUM_FPGAS]; } /** * Returns the number of RIFFA accessible FPGAs installed in the system. * * @returns Number of RIFFA accessible FPGAs installed in the system. */ public int getNumFpgas() { return this.numFpgas; } /** * Sets the number of RIFFA accessible FPGAs installed in the system. * * @param val - Number of RIFFA accessible FPGAs installed in the system. */ public void setNumFpgas(int val) { this.numFpgas = val; } /** * Returns the number of RIFFA channels configured on the FPGA at position * pos. * * @returns Number of RIFFA channels configured on the FPGA at position pos. */ public int getNumChannels(int pos) { return this.numChannels[pos]; } /** * Sets the number of RIFFA channels configured on the FPGA at position pos. * * @param pos - Position of FPGA. * @param val - Number of RIFFA channels configured on the FPGA at position * pos. */ public void setNumChannels(int pos, int val) { this.numChannels[pos] = val; } /** * Returns the FPGA id at position pos. This id is used to open the FPGA on * the Fpga's open method. * * @returns FPGA id at position pos.. */ public int getId(int pos) { return this.id[pos]; } /** * Sets the FPGA id at position pos. * * @param pos - Position of FPGA. * @param val - FPGA id at position pos. */ public void setId(int pos, int val) { this.id[pos] = val; } /** * Returns the name of the FPGA at position pos. This is typically the PCIe * bus and slot number. * * @returns Name of the FPGA at position pos. */ public String getName(int pos) { return this.name[pos]; } /** * Sets the name of the FPGA at position pos. * * @param pos - Position of FPGA. * @param val - Name of the FPGA at position pos. */ public void setName(int pos, String val) { this.name[pos] = val; } /** * Returns the FPGA vendor id at position pos. * * @returns The FPGA vendor id at position pos. */ public int getVendorId(int pos) { return this.vendorId[pos]; } /** * Sets the FPGA vendor id at position pos. * * @param pos - Position of FPGA. * @param val - The FPGA vendor id at position pos. */ public void setVendorId(int pos, int val) { this.vendorId[pos] = val; } /** * Returns the FPGA device id at position pos. * * @returns The FPGA device id at position pos. */ public int getDeviceId(int pos) { return this.deviceId[pos]; } /** * Sets the FPGA device id at position pos. * * @param pos - Position of FPGA. * @param val - The FPGA device id at position pos. */ public void setDeviceId(int pos, int val) { this.deviceId[pos] = val; } /** * Returns a nicely formatted listing of all the RIFFA FPGAs detected. * * @returns a nicely formatted String of all the RIFFA FPGAs detected. */ public String toString() { StringBuffer buffy = new StringBuffer(); String eol = System.getProperty("line.separator"); buffy.append("num fpgas: " + this.numFpgas + eol); for (int i=0; i < this.numFpgas; i++) { buffy.append("id: " + this.id[i] + eol); buffy.append("name: " + this.name[i] + eol); buffy.append("num channels: " + this.numChannels[i] + eol); buffy.append("vendor id: " + Integer.toHexString(this.vendorId[i]) + eol); buffy.append("device id: " + Integer.toHexString(this.deviceId[i]) + eol); } return buffy.toString(); } }
2,096
977
<filename>src/test/java/io/leangen/graphql/PolymorphicJacksonTest.java package io.leangen.graphql; import com.fasterxml.jackson.annotation.JsonSubTypes; import com.fasterxml.jackson.annotation.JsonTypeInfo; import com.fasterxml.jackson.annotation.JsonTypeName; import graphql.ExecutionResult; import graphql.GraphQL; import graphql.schema.GraphQLInputObjectField; import graphql.schema.GraphQLInputObjectType; import graphql.schema.GraphQLSchema; import graphql.schema.GraphQLType; import io.leangen.geantyref.TypeFactory; import io.leangen.graphql.annotations.GraphQLQuery; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import static io.leangen.graphql.support.GraphQLTypeAssertions.assertEnum; import static io.leangen.graphql.support.QueryResultAssertions.assertNoErrors; import static io.leangen.graphql.support.QueryResultAssertions.assertValueAtPathEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertSame; @RunWith(Parameterized.class) public class PolymorphicJacksonTest { @Parameterized.Parameter public boolean abstractInputResolverEnabled; @Parameterized.Parameters(name = "{index}: {0}") public static Object[] data() { return new Object[] { Boolean.TRUE, Boolean.FALSE }; } @Test public void testExplicitSubtypeInfoOnParentWithDiscriminator() { checkTypeDiscriminator(new Service<>(), Vehicle.class, "Vehicle", "vehicleType", "CarX", "TruckX"); } @Test public void testExplicitSubtypeInfoOnParentNoDiscriminator() { checkTypeDiscriminator(new Service<>(), Event.class, "Event", "_type_", "ConcertX", "ExhibitionX"); } @Test public void testExplicitSubtypeInfoOnChildWithDiscriminator() { checkTypeDiscriminator(new Service<>(), Pet.class, "Pet", "species", "CatX", "DogX"); } @Test public void testExplicitSubtypeInfoOnChildNoDiscriminator() { checkTypeDiscriminator(new Service<>(), Nut.class, "Nut", "_type_", "WalnutX", "ChestnutX"); } @Test public void testSingleExplicitSubtypeWithDiscriminator() { checkTypeDiscriminator(new Service<>(), Game.class, "Game", "kind", "CardGameX"); } @Test public void testSingleExplicitSubtypeNoDiscriminator() { checkTypeDiscriminator(new Service<>(), Abstract.class, "Abstract", "_type_", "ConcreteX"); } private <T extends Named> void checkTypeDiscriminator(Service<T> service, Class<T> serviceType, String typeName, String discriminatorField, String... subTypes) { GraphQLSchemaGenerator generator = new TestSchemaGenerator() .withOperationsFromSingleton(service, TypeFactory.parameterizedClass(Service.class, serviceType)); if (abstractInputResolverEnabled) { generator.withAbstractInputTypeResolution(); } GraphQLSchema schema = generator .generate(); GraphQLType vehicleTypeDisambiguator = schema.getType(typeName + "TypeDisambiguator"); assertNotNull(vehicleTypeDisambiguator); assertEnum(vehicleTypeDisambiguator, subTypes); GraphQLInputObjectField vehicleType = ((GraphQLInputObjectType) schema.getType(typeName + "Input")) .getFieldDefinition(discriminatorField); assertNotNull(vehicleType); assertSame(vehicleTypeDisambiguator, vehicleType.getType()); GraphQL exe = GraphQL.newGraphQL(schema).build(); ExecutionResult result = exe.execute("{" + "test (in: {" + " name: \"yay\"" + " " + discriminatorField + ": " + subTypes[0] + "}) {" + " name}}"); assertNoErrors(result); assertValueAtPathEquals("yay" + subTypes[0], result, "test.name"); } public static class Service<T extends Named> { @GraphQLQuery public T test(T in) { in.name += in.getClass().getSimpleName() + "X"; return in; } } //TODO Consider supporting field-level disambiguation /*public static class OnField { @JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "vehicleType") @JsonSubTypes({ @JsonSubTypes.Type(value = Car.class, name = "CarX"), @JsonSubTypes.Type(value = Truck.class, name = "TruckX") }) public Vehicle vehicle; }*/ public abstract static class Named { public String name; } @JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "vehicleType") @JsonSubTypes({ @JsonSubTypes.Type(value = Car.class, name = "CarX"), @JsonSubTypes.Type(value = Truck.class, name = "TruckX") }) public abstract static class Vehicle extends Named { public String regNumber; } public static class Car extends Vehicle { public String carField = "Car"; } public static class Truck extends Vehicle { public String truckField = "Truck"; } @JsonSubTypes({ @JsonSubTypes.Type(value = Concert.class, name = "ConcertX"), @JsonSubTypes.Type(value = Exhibition.class, name = "ExhibitionX") }) public abstract static class Event extends Named { public String venue; } public static class Concert extends Event { public String bandName = "b4nD"; } public static class Exhibition extends Event { public String artist = "<NAME>"; } @JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "species") @JsonSubTypes({ @JsonSubTypes.Type(value = Cat.class), @JsonSubTypes.Type(value = Dog.class) }) public abstract static class Pet extends Named { public String sound; } @JsonTypeName("CatX") public static class Cat extends Pet { public int clawLength = 99; } @JsonTypeName("DogX") public static class Dog extends Pet { public int barkVolume = 99; } @JsonSubTypes({ @JsonSubTypes.Type(value = Walnut.class), @JsonSubTypes.Type(value = Chestnut.class) }) public abstract static class Nut extends Named { public String weight; } @JsonTypeName("WalnutX") public static class Walnut extends Nut { public int smell = 99; } @JsonTypeName("ChestnutX") public static class Chestnut extends Nut { public int color = 99; } @JsonSubTypes(@JsonSubTypes.Type(value = Concrete.class, name = "ConcreteX")) public abstract static class Abstract extends Named { public String abs; } public static class Concrete extends Abstract { public int thickness = 99; } @JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "kind") @JsonSubTypes(@JsonSubTypes.Type(value = CardGame.class, name = "CardGameX")) public abstract static class Game extends Named { public String duration; } public static class CardGame extends Game { public int deckType = 99; } }
2,730
678
<gh_stars>100-1000 /** * This header is generated by class-dump-z 0.2b. * * Source: /System/Library/PrivateFrameworks/CalDAV.framework/CalDAV */ #import <CalDAV/XXUnknownSuperclass.h> @interface CalDAVCalendarInfoTaskGroup : XXUnknownSuperclass { } - (id)initWithAccountInfoProvider:(id)accountInfoProvider containerURLs:(id)urls taskManager:(id)manager; // 0xe3d - (id)_copyContainerParserMappings; // 0x105d - (id)_copyContainerWithURL:(id)url andProperties:(id)properties; // 0x1021 - (id)containerForURL:(id)url; // 0xeb5 @end
194
695
<filename>core/src/event/Event.java /* * To change this template, choose Tools | Templates * and open the template in the editor. */ package event; import com.jme3.network.AbstractMessage; import com.jme3.network.serializing.Serializable; @Serializable public abstract class Event extends AbstractMessage { public Event() { } public String getName() { return this.getClass().getSimpleName(); } }
123
552
#include <fcntl.h> #include <errno.h> #include <sys/types.h> #include <unistd.h> #define LINUX_FILE_BUFFER_SIZE 8192
54
684
/* Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.activiti.engine.test.api.repository; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.ObjectOutputStream; import java.util.Date; import java.util.List; import java.util.zip.ZipInputStream; import org.activiti.bpmn.model.BpmnModel; import org.activiti.bpmn.model.EndEvent; import org.activiti.bpmn.model.ParallelGateway; import org.activiti.bpmn.model.StartEvent; import org.activiti.bpmn.model.UserTask; import org.activiti.engine.ActivitiException; import org.activiti.engine.ActivitiIllegalArgumentException; import org.activiti.engine.ActivitiObjectNotFoundException; import org.activiti.engine.impl.test.PluggableActivitiTestCase; import org.activiti.engine.repository.Model; import org.activiti.engine.repository.ProcessDefinition; import org.activiti.engine.test.Deployment; /** * @author <NAME> * @author <NAME> */ public class RepositoryServiceTest extends PluggableActivitiTestCase { @Deployment(resources = { "org/activiti/engine/test/api/oneTaskProcess.bpmn20.xml"}) public void testStartProcessInstanceById() { List<ProcessDefinition> processDefinitions = repositoryService.createProcessDefinitionQuery().list(); assertEquals(1, processDefinitions.size()); ProcessDefinition processDefinition = processDefinitions.get(0); assertEquals("oneTaskProcess", processDefinition.getKey()); assertNotNull(processDefinition.getId()); } @Deployment(resources={ "org/activiti/engine/test/api/oneTaskProcess.bpmn20.xml"}) public void testFindProcessDefinitionById() { List<ProcessDefinition> definitions = repositoryService.createProcessDefinitionQuery().list(); assertEquals(1, definitions.size()); ProcessDefinition processDefinition = repositoryService.createProcessDefinitionQuery().processDefinitionId(definitions.get(0).getId()).singleResult(); runtimeService.startProcessInstanceByKey("oneTaskProcess"); assertNotNull(processDefinition); assertEquals("oneTaskProcess", processDefinition.getKey()); assertEquals("The One Task Process", processDefinition.getName()); processDefinition = repositoryService.getProcessDefinition(definitions.get(0).getId()); assertEquals("This is a process for testing purposes", processDefinition.getDescription()); } @Deployment(resources = { "org/activiti/engine/test/api/oneTaskProcess.bpmn20.xml" }) public void testDeleteDeploymentWithRunningInstances() { List<ProcessDefinition> processDefinitions = repositoryService.createProcessDefinitionQuery().list(); assertEquals(1, processDefinitions.size()); ProcessDefinition processDefinition = processDefinitions.get(0); runtimeService.startProcessInstanceById(processDefinition.getId()); // Try to delete the deployment try { repositoryService.deleteDeployment(processDefinition.getDeploymentId()); fail("Exception expected"); } catch (RuntimeException ae) { // Exception expected when deleting deployment with running process } } public void testDeleteDeploymentNullDeploymentId() { try { repositoryService.deleteDeployment(null); fail("ActivitiException expected"); } catch (ActivitiIllegalArgumentException ae) { assertTextPresent("deploymentId is null", ae.getMessage()); } } public void testDeleteDeploymentCascadeNullDeploymentId() { try { repositoryService.deleteDeployment(null, true); fail("ActivitiException expected"); } catch (ActivitiIllegalArgumentException ae) { assertTextPresent("deploymentId is null", ae.getMessage()); } } public void testDeleteDeploymentNonExistentDeploymentId() { try { repositoryService.deleteDeployment("foobar"); fail("ActivitiException expected"); } catch (ActivitiObjectNotFoundException ae) { assertTextPresent("Could not find a deployment with id 'foobar'.", ae.getMessage()); } catch (Throwable t) { fail("Unexpected exception: " + t); } } public void testDeleteDeploymentCascadeNonExistentDeploymentId() { try { repositoryService.deleteDeployment("foobar", true); fail("ActivitiException expected"); } catch (ActivitiObjectNotFoundException ae) { assertTextPresent("Could not find a deployment with id 'foobar'.", ae.getMessage()); } catch (Throwable t) { fail("Unexpected exception: " + t); } } @Deployment(resources = { "org/activiti/engine/test/api/oneTaskProcess.bpmn20.xml" }) public void testDeleteDeploymentCascadeWithRunningInstances() { List<ProcessDefinition> processDefinitions = repositoryService.createProcessDefinitionQuery().list(); assertEquals(1, processDefinitions.size()); ProcessDefinition processDefinition = processDefinitions.get(0); runtimeService.startProcessInstanceById(processDefinition.getId()); // Try to delete the deployment, no exception should be thrown repositoryService.deleteDeployment(processDefinition.getDeploymentId(), true); } public void testFindDeploymentResourceNamesNullDeploymentId() { try { repositoryService.getDeploymentResourceNames(null); fail("ActivitiException expected"); } catch (ActivitiIllegalArgumentException ae) { assertTextPresent("deploymentId is null", ae.getMessage()); } } public void testDeploymentWithDelayedProcessDefinitionActivation() { Date startTime = new Date(); processEngineConfiguration.getClock().setCurrentTime(startTime); Date inThreeDays = new Date(startTime.getTime() + (3 * 24 * 60 * 60 * 1000)); // Deploy process, but activate after three days org.activiti.engine.repository.Deployment deployment = repositoryService.createDeployment() .addClasspathResource("org/activiti/engine/test/api/oneTaskProcess.bpmn20.xml") .addClasspathResource("org/activiti/engine/test/api/twoTasksProcess.bpmn20.xml") .activateProcessDefinitionsOn(inThreeDays) .deploy(); assertEquals(1, repositoryService.createDeploymentQuery().count()); assertEquals(2, repositoryService.createProcessDefinitionQuery().count()); assertEquals(2, repositoryService.createProcessDefinitionQuery().suspended().count()); assertEquals(0, repositoryService.createProcessDefinitionQuery().active().count()); // Shouldn't be able to start a process instance try { runtimeService.startProcessInstanceByKey("oneTaskProcess"); fail(); } catch (ActivitiException e) { assertTextPresentIgnoreCase("suspended", e.getMessage()); } // Move time four days forward, the timer will fire and the process definitions will be active Date inFourDays = new Date(startTime.getTime() + (4 * 24 * 60 * 60 * 1000)); processEngineConfiguration.getClock().setCurrentTime(inFourDays); waitForJobExecutorToProcessAllJobs(5000L, 50L); assertEquals(1, repositoryService.createDeploymentQuery().count()); assertEquals(2, repositoryService.createProcessDefinitionQuery().count()); assertEquals(0, repositoryService.createProcessDefinitionQuery().suspended().count()); assertEquals(2, repositoryService.createProcessDefinitionQuery().active().count()); // Should be able to start process instance runtimeService.startProcessInstanceByKey("oneTaskProcess"); assertEquals(1, runtimeService.createProcessInstanceQuery().count()); // Cleanup repositoryService.deleteDeployment(deployment.getId(), true); } @Deployment(resources = { "org/activiti/engine/test/api/oneTaskProcess.bpmn20.xml" }) public void testGetResourceAsStreamUnexistingResourceInExistingDeployment() { // Get hold of the deployment id org.activiti.engine.repository.Deployment deployment = repositoryService.createDeploymentQuery().singleResult(); try { repositoryService.getResourceAsStream(deployment.getId(), "org/activiti/engine/test/api/unexistingProcess.bpmn.xml"); fail("ActivitiException expected"); } catch (ActivitiObjectNotFoundException ae) { assertTextPresent("no resource found with name", ae.getMessage()); assertEquals(InputStream.class, ae.getObjectClass()); } } @Deployment(resources = { "org/activiti/engine/test/api/oneTaskProcess.bpmn20.xml" }) public void testGetResourceAsStreamUnexistingDeployment() { try { repositoryService.getResourceAsStream("unexistingdeployment", "org/activiti/engine/test/api/unexistingProcess.bpmn.xml"); fail("ActivitiException expected"); } catch (ActivitiObjectNotFoundException ae) { assertTextPresent("deployment does not exist", ae.getMessage()); assertEquals(org.activiti.engine.repository.Deployment.class, ae.getObjectClass()); } } public void testGetResourceAsStreamNullArguments() { try { repositoryService.getResourceAsStream(null, "resource"); fail("ActivitiException expected"); } catch (ActivitiIllegalArgumentException ae) { assertTextPresent("deploymentId is null", ae.getMessage()); } try { repositoryService.getResourceAsStream("deployment", null); fail("ActivitiException expected"); } catch (ActivitiIllegalArgumentException ae) { assertTextPresent("resourceName is null", ae.getMessage()); } } public void testNewModelPersistence() { Model model = repositoryService.newModel(); assertNotNull(model); model.setName("Test model"); model.setCategory("test"); model.setMetaInfo("meta"); repositoryService.saveModel(model); assertNotNull(model.getId()); model = repositoryService.getModel(model.getId()); assertNotNull(model); assertEquals("Test model", model.getName()); assertEquals("test", model.getCategory()); assertEquals("meta", model.getMetaInfo()); assertNotNull(model.getCreateTime()); assertEquals(Integer.valueOf(1), model.getVersion()); repositoryService.deleteModel(model.getId()); } public void testNewModelWithSource() throws Exception { Model model = repositoryService.newModel(); model.setName("Test model"); byte[] testSource = "modelsource".getBytes("utf-8"); repositoryService.saveModel(model); assertNotNull(model.getId()); repositoryService.addModelEditorSource(model.getId(), testSource); model = repositoryService.getModel(model.getId()); assertNotNull(model); assertEquals("Test model", model.getName()); byte[] editorSourceBytes = repositoryService.getModelEditorSource(model.getId()); assertEquals("modelsource", new String(editorSourceBytes, "utf-8")); repositoryService.deleteModel(model.getId()); } public void testUpdateModelPersistence() throws Exception { Model model = repositoryService.newModel(); assertNotNull(model); model.setName("<NAME>"); model.setCategory("test"); model.setMetaInfo("meta"); repositoryService.saveModel(model); assertNotNull(model.getId()); model = repositoryService.getModel(model.getId()); assertNotNull(model); model.setName("<NAME>"); model.setCategory("New category"); model.setMetaInfo("test"); model.setVersion(2); repositoryService.saveModel(model); assertNotNull(model.getId()); repositoryService.addModelEditorSource(model.getId(), "new".getBytes("utf-8")); repositoryService.addModelEditorSourceExtra(model.getId(), "new".getBytes("utf-8")); model = repositoryService.getModel(model.getId()); assertEquals("New name", model.getName()); assertEquals("New category", model.getCategory()); assertEquals("test", model.getMetaInfo()); assertNotNull(model.getCreateTime()); assertEquals(Integer.valueOf(2), model.getVersion()); assertEquals("new", new String(repositoryService.getModelEditorSource(model.getId()), "utf-8")); assertEquals("new", new String(repositoryService.getModelEditorSourceExtra(model.getId()), "utf-8")); repositoryService.deleteModel(model.getId()); } @Deployment(resources = { "org/activiti/engine/test/api/oneTaskProcess.bpmn20.xml"}) public void testProcessDefinitionEntitySerializable() { String procDefId = repositoryService.createProcessDefinitionQuery().singleResult().getId(); ProcessDefinition processDefinition = repositoryService.getProcessDefinition(procDefId); try { ByteArrayOutputStream baos = new ByteArrayOutputStream(); new ObjectOutputStream(baos).writeObject(processDefinition); byte[] bytes = baos.toByteArray(); assertTrue(bytes.length > 0); } catch (IOException e) { e.printStackTrace(); fail(); } } @Deployment public void testGetBpmnModel() { ProcessDefinition processDefinition = repositoryService.createProcessDefinitionQuery().singleResult(); // Some basic assertions BpmnModel bpmnModel = repositoryService.getBpmnModel(processDefinition.getId()); assertNotNull(bpmnModel); assertEquals(1, bpmnModel.getProcesses().size()); assertTrue(!bpmnModel.getLocationMap().isEmpty()); assertTrue(!bpmnModel.getFlowLocationMap().isEmpty()); // Test the flow org.activiti.bpmn.model.Process process = bpmnModel.getProcesses().get(0); List<StartEvent> startEvents = process.findFlowElementsOfType(StartEvent.class); assertEquals(1, startEvents.size()); StartEvent startEvent = startEvents.get(0); assertEquals(1, startEvent.getOutgoingFlows().size()); assertEquals(0, startEvent.getIncomingFlows().size()); String nextElementId = startEvent.getOutgoingFlows().get(0).getTargetRef(); UserTask userTask = (UserTask) process.getFlowElement(nextElementId); assertEquals("First Task", userTask.getName()); assertEquals(1, userTask.getOutgoingFlows().size()); assertEquals(1, userTask.getIncomingFlows().size()); nextElementId = userTask.getOutgoingFlows().get(0).getTargetRef(); ParallelGateway parallelGateway = (ParallelGateway) process.getFlowElement(nextElementId); assertEquals(2, parallelGateway.getOutgoingFlows().size()); nextElementId = parallelGateway.getOutgoingFlows().get(0).getTargetRef(); assertEquals(1, parallelGateway.getIncomingFlows().size()); userTask = (UserTask) process.getFlowElement(nextElementId); assertEquals(1, userTask.getOutgoingFlows().size()); nextElementId = userTask.getOutgoingFlows().get(0).getTargetRef(); parallelGateway = (ParallelGateway) process.getFlowElement(nextElementId); assertEquals(1, parallelGateway.getOutgoingFlows().size()); assertEquals(2, parallelGateway.getIncomingFlows().size()); nextElementId = parallelGateway.getOutgoingFlows().get(0).getTargetRef(); EndEvent endEvent = (EndEvent) process.getFlowElement(nextElementId); assertEquals(0, endEvent.getOutgoingFlows().size()); assertEquals(1, endEvent.getIncomingFlows().size()); } /** * This test was added due to issues with unzip of JDK 7, * where the default is changed to UTF8 instead of the platform * encoding (which is, in fact, good). * However, some platforms do not create UTF8-compatible ZIP files. * * The tested zip file is created on OS X (non-UTF-8). * * See https://blogs.oracle.com/xuemingshen/entry/non_utf_8_encoding_in */ public void testDeployZipFile() { InputStream inputStream = this.getClass().getClassLoader().getResourceAsStream("org/activiti/engine/test/api/repository/test-processes.zip"); assertNotNull(inputStream); ZipInputStream zipInputStream = new ZipInputStream(inputStream); assertNotNull(zipInputStream); repositoryService.createDeployment() .addZipInputStream(zipInputStream) .deploy(); assertEquals(6, repositoryService.createProcessDefinitionQuery().count()); // Delete for (org.activiti.engine.repository.Deployment deployment : repositoryService.createDeploymentQuery().list()) { repositoryService.deleteDeployment(deployment.getId(), true); } } }
5,457
369
<reponame>laraschmidt/cdap /* * Copyright © 2021 <NAME>, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. * */ package io.cdap.cdap.etl.mock.common; import io.cdap.cdap.api.plugin.PluginConfigurer; import io.cdap.cdap.etl.api.FailureCollector; import io.cdap.cdap.etl.api.connector.ConnectorContext; import io.cdap.cdap.etl.proto.validation.SimpleFailureCollector; /** * Mock context for connector */ public class MockConnectorContext implements ConnectorContext { private final FailureCollector failureCollector; private final PluginConfigurer pluginConfigurer; public MockConnectorContext(MockConnectorConfigurer pluginConfigurer) { this.failureCollector = new SimpleFailureCollector(); this.pluginConfigurer = pluginConfigurer; } @Override public FailureCollector getFailureCollector() { return failureCollector; } @Override public PluginConfigurer getPluginConfigurer() { return pluginConfigurer; } }
422
14,668
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef NET_CERT_INTERNAL_TRUST_STORE_MAC_H_ #define NET_CERT_INTERNAL_TRUST_STORE_MAC_H_ #include <CoreFoundation/CoreFoundation.h> #include "base/gtest_prod_util.h" #include "base/mac/scoped_cftyperef.h" #include "base/memory/ref_counted.h" #include "net/base/net_export.h" #include "net/cert/internal/trust_store.h" namespace net { // TrustStoreMac is an implementation of TrustStore which uses macOS keychain // to find trust anchors for path building. Trust state is cached, so a single // TrustStoreMac instance should be created and used for all verifications of a // given policy. // TrustStoreMac objects are threadsafe and methods may be called from multiple // threads simultaneously. It is the owner's responsibility to ensure the // TrustStoreMac object outlives any threads accessing it. class NET_EXPORT TrustStoreMac : public TrustStore { public: // Bits representing different conditions encountered while evaluating // the trustSettings returned by SecTrustSettingsCopyTrustSettings. enum TrustDebugInfo { // The trustSettings array was empty. TRUST_SETTINGS_ARRAY_EMPTY = 1 << 0, // One of the trustSettings dictionaries was empty. TRUST_SETTINGS_DICT_EMPTY = 1 << 1, // One of the trustSettings dictionaries contained an unknown key. TRUST_SETTINGS_DICT_UNKNOWN_KEY = 1 << 2, // One of the trustSettings dictionaries contained a // kSecTrustSettingsPolicy key. TRUST_SETTINGS_DICT_CONTAINS_POLICY = 1 << 3, // One of the trustSettings dictionaries contained a // kSecTrustSettingsPolicy key with a value that was not a SecPolicyRef. TRUST_SETTINGS_DICT_INVALID_POLICY_TYPE = 1 << 4, // One of the trustSettings dictionaries contained a // kSecTrustSettingsApplication key. TRUST_SETTINGS_DICT_CONTAINS_APPLICATION = 1 << 5, // One of the trustSettings dictionaries contained a // kSecTrustSettingsPolicyString key. TRUST_SETTINGS_DICT_CONTAINS_POLICY_STRING = 1 << 6, // One of the trustSettings dictionaries contained a // kSecTrustSettingsKeyUsage key. TRUST_SETTINGS_DICT_CONTAINS_KEY_USAGE = 1 << 7, // One of the trustSettings dictionaries contained a // kSecTrustSettingsResult key. TRUST_SETTINGS_DICT_CONTAINS_RESULT = 1 << 8, // One of the trustSettings dictionaries contained a // kSecTrustSettingsResult key with a value that was not a CFNumber or // could not be represented by a signed int. TRUST_SETTINGS_DICT_INVALID_RESULT_TYPE = 1 << 9, // One of the trustSettings dictionaries contained a // kSecTrustSettingsAllowedError key. TRUST_SETTINGS_DICT_CONTAINS_ALLOWED_ERROR = 1 << 10, // SecTrustSettingsCopyTrustSettings returned a value other than // errSecSuccess or errSecItemNotFound. COPY_TRUST_SETTINGS_ERROR = 1 << 11, }; enum class TrustImplType { kUnknown = 0, kDomainCache = 1, kSimple = 2, kLruCache = 3, }; class ResultDebugData : public base::SupportsUserData::Data { public: static const ResultDebugData* Get(const base::SupportsUserData* debug_data); static ResultDebugData* GetOrCreate(base::SupportsUserData* debug_data); void UpdateTrustDebugInfo(int trust_debug_info, TrustImplType impl_type); // base::SupportsUserData::Data implementation: std::unique_ptr<Data> Clone() override; // Returns a bitfield of TrustDebugInfo flags. If multiple GetTrust calls // were done with the same SupportsUserData object, this will return the // union of all the TrustDebugInfo flags. int combined_trust_debug_info() const { return combined_trust_debug_info_; } // Returns an enum representing which trust implementation was used. TrustImplType trust_impl() const { return trust_impl_; } private: int combined_trust_debug_info_ = 0; TrustImplType trust_impl_ = TrustImplType::kUnknown; }; // Creates a TrustStoreMac which will find anchors that are trusted for // |policy_oid|. For list of possible policy values, see: // https://developer.apple.com/reference/security/1667150-certificate_key_and_trust_servic/1670151-standard_policies_for_specific_c?language=objc // |impl| selects which internal implementation is used for checking trust // settings, and the interpretation of |cache_size| varies depending on // |impl|. TrustStoreMac(CFStringRef policy_oid, TrustImplType impl, size_t cache_size); TrustStoreMac(const TrustStoreMac&) = delete; TrustStoreMac& operator=(const TrustStoreMac&) = delete; ~TrustStoreMac() override; // Initializes the trust cache, if it isn't already initialized. void InitializeTrustCache() const; // Returns true if the given certificate is present in the system trust // domain. bool IsKnownRoot(const ParsedCertificate* cert) const; // TrustStore implementation: void SyncGetIssuersOf(const ParsedCertificate* cert, ParsedCertificateList* issuers) override; CertificateTrust GetTrust(const ParsedCertificate* cert, base::SupportsUserData* debug_data) const override; private: class TrustImpl; class TrustImplDomainCache; class TrustImplNoCache; class TrustImplLRUCache; FRIEND_TEST_ALL_PREFIXES(TrustStoreMacImplTest, MultiRootNotTrusted); // Finds certificates in the OS keychains whose Subject matches |name_data|. // The result is an array of SecCertificateRef. static base::ScopedCFTypeRef<CFArrayRef> FindMatchingCertificatesForMacNormalizedSubject(CFDataRef name_data); // Returns the OS-normalized issuer of |cert|. // macOS internally uses a normalized form of subject/issuer names for // comparing, roughly similar to RFC3280's normalization scheme. The // normalized form is used for any database lookups and comparisons. static base::ScopedCFTypeRef<CFDataRef> GetMacNormalizedIssuer( const ParsedCertificate* cert); std::unique_ptr<TrustImpl> trust_cache_; }; } // namespace net #endif // NET_CERT_INTERNAL_TRUST_STORE_MAC_H_
1,945
479
<filename>ctypes_generation/extended_structs/_EVENT_RECORD.py _OLD_EVENT_RECORD = _EVENT_RECORD class _EVENT_RECORD(_OLD_EVENT_RECORD): pass
59
8,633
from time import sleep from typing import List, Tuple import requests # dbt Cloud Trigger Job API -> https://docs.getdbt.com/dbt-cloud/api-v2#operation/triggerRun __DBT_CLOUD_TRIGGER_JOB_API_ENDPOINT_V2 = ( "https://cloud.getdbt.com/api/v2/accounts/{accountId}/jobs/{jobId}/run/" ) # dbt Cloud Get Run API -> https://docs.getdbt.com/dbt-cloud/api-v2#operation/getRunById __DBT_CLOUD_GET_RUN_API_ENDPOINT_V2 = ( "https://cloud.getdbt.com/api/v2/accounts/{accountId}/runs/{runId}/" ) # dbt Cloud List Run Artifacts API -> # https://docs.getdbt.com/dbt-cloud/api-v2#operation/listArtifactsByRunId __DBT_CLOUD_LIST_RUN_ARTIFACTS_ENDPOINT_V2 = ( "https://cloud.getdbt.com/api/v2/accounts/{accountId}/runs/{runId}/artifacts/" ) # dbt Cloud Get Run Artifact API -> # https://docs.getdbt.com/dbt-cloud/api-v2#operation/getArtifactsByRunId __DBT_CLOUD_GET_RUN_ARTIFACT_ENDPOINT_V2 = ( "https://cloud.getdbt.com/api/v2/accounts/{accountId}/runs/{runId}/artifacts/{path}" ) dbt_cloud_artifact_paths = ("manifest.json", "run_results.json", "catalog.json") class DbtCloudBaseException(Exception): """Base exception for all dbt Cloud errors""" pass class TriggerDbtCloudRunFailed(DbtCloudBaseException): """Raised when triggering a dbt job run fails""" pass class GetDbtCloudRunFailed(DbtCloudBaseException): """Raised when details for a dbt Cloud job run cannot be retrieved""" pass class DbtCloudRunFailed(DbtCloudBaseException): """Raised when a dbt Cloud run fails""" pass class DbtCloudRunCanceled(DbtCloudBaseException): """Raised when a dbt Cloud run has been canceled before completion""" pass class DbtCloudRunTimedOut(DbtCloudBaseException): """Raised when a dbt Cloud run does not complete in the provided time""" pass class DbtCloudListArtifactsFailed(DbtCloudBaseException): """Raised when dbt Cloud artifacts cannot be listed""" pass def trigger_job_run( account_id: int, job_id: int, token: str, cause: str, additional_args: dict ) -> dict: """ Trigger a dbt Cloud job run Args: - account_id (int): dbt Cloud account ID - job_id (int): dbt Cloud job ID - token (str): dbt Cloud token - cause (str): the reason describing why the job run is being triggered - additional_args (dict): additional information to pass to the Trigger Job Run API Returns: - The trigger run result, namely the "data" key in the API response Raises: - TriggerDbtCloudRunFailed: when the response code is != 200 """ data = additional_args if additional_args else {} data["cause"] = cause trigger_request = requests.post( url=__DBT_CLOUD_TRIGGER_JOB_API_ENDPOINT_V2.format( accountId=account_id, jobId=job_id ), headers={"Authorization": f"Bearer {token}"}, data=data, ) if trigger_request.status_code != 200: raise TriggerDbtCloudRunFailed(trigger_request.reason) return trigger_request.json()["data"] def wait_for_job_run( account_id: int, token: str, run_id: int, max_wait_time: int = None ) -> dict: """ Get a dbt Cloud job run. Please note that this function will fail if any call to dbt Cloud APIs fail. Args: - account_id (int): dbt Cloud account ID - token (str): dbt Cloud token - run_id (int): dbt Cloud job run ID - max_wait_time: the number od seconds to wait for the job to complete Returns: - The job run result, namely the "data" key in the API response Raises: - DbtCloudRunFailed: if "finished_at" is not None and the result status == 20 - DbtCloudRunCanceled: if "finished_at" is not None and the result status == 30 - DbtCloudRunTimedOut: if run does not finish before provided max_wait_time """ wait_time_between_api_calls = 10 elapsed_wait_time = 0 while not max_wait_time or elapsed_wait_time <= max_wait_time: get_run_request = requests.get( url=__DBT_CLOUD_GET_RUN_API_ENDPOINT_V2.format( accountId=account_id, runId=run_id ), headers={"Authorization": f"Bearer {token}"}, ) if get_run_request.status_code != 200: raise GetDbtCloudRunFailed(get_run_request.reason) result = get_run_request.json()["data"] if result["finished_at"]: if result["status"] == 10: return result elif result["status"] == 20: raise DbtCloudRunFailed(f"Job run with ID: {run_id} failed.") elif result["status"] == 30: raise DbtCloudRunCanceled(f"Job run with ID: {run_id} cancelled.") sleep(wait_time_between_api_calls) elapsed_wait_time += wait_time_between_api_calls raise DbtCloudRunTimedOut( f"Max attempts reached while checking status of job run with ID: {run_id}" ) def list_run_artifact_links( account_id: int, run_id: int, token: str ) -> List[Tuple[str, str]]: """ Lists URLs that can be used to download artifacts from a dbt run Args: - account_id (int): dbt Cloud account ID - run_id (int): dbt Cloud job run ID - token (str): dbt Cloud token Returns: - List of artifact download URLs Raises: - DbtCloudListArtifactsFailed: if API to list dbt artifacts fails """ list_run_artifact_response = requests.get( url=__DBT_CLOUD_LIST_RUN_ARTIFACTS_ENDPOINT_V2.format( accountId=account_id, runId=run_id ), headers={"Authorization": f"Bearer {token}"}, ) if list_run_artifact_response.status_code != 200: raise DbtCloudListArtifactsFailed(list_run_artifact_response.reason) artifact_paths = list_run_artifact_response.json().get("data") return [ ( __DBT_CLOUD_GET_RUN_ARTIFACT_ENDPOINT_V2.format( accountId=account_id, runId=run_id, path=artifact_path ), artifact_path, ) for artifact_path in artifact_paths ]
2,530
703
<reponame>Tekh-ops/ezEngine #include <EditorFramework/EditorFrameworkPCH.h> #include <EditorFramework/Assets/AssetBrowserDlg.moc.h> #include <EditorFramework/Dialogs/DashboardDlg.moc.h> #include <EditorFramework/EditorApp/EditorApp.moc.h> #include <EditorFramework/Settings/SettingsTab.moc.h> #include <GuiFoundation/ActionViews/MenuBarActionMapView.moc.h> #include <QDesktopServices> EZ_IMPLEMENT_SINGLETON(ezQtSettingsTab); ezString ezQtSettingsTab::GetWindowIcon() const { return ""; //:/GuiFoundation/EZ-logo.svg"; } ezString ezQtSettingsTab::GetDisplayNameShort() const { return ""; } void ezQtEditorApp::ShowSettingsDocument() { ezQtSettingsTab* pSettingsTab = ezQtSettingsTab::GetSingleton(); if (pSettingsTab == nullptr) { pSettingsTab = new ezQtSettingsTab(); } pSettingsTab->EnsureVisible(); } void ezQtEditorApp::CloseSettingsDocument() { ezQtSettingsTab* pSettingsTab = ezQtSettingsTab::GetSingleton(); if (pSettingsTab != nullptr) { pSettingsTab->CloseDocumentWindow(); } } ezQtSettingsTab::ezQtSettingsTab() : ezQtDocumentWindow("Settings") , m_SingletonRegistrar(this) { setCentralWidget(new QWidget()); EZ_ASSERT_DEV(centralWidget() != nullptr, ""); setupUi(centralWidget()); QMetaObject::connectSlotsByName(this); ezQtMenuBarActionMapView* pMenuBar = static_cast<ezQtMenuBarActionMapView*>(menuBar()); ezActionContext context; context.m_sMapping = "SettingsTabMenuBar"; context.m_pDocument = nullptr; pMenuBar->SetActionContext(context); FinishWindowCreation(); ezToolsProject::s_Events.AddEventHandler(ezMakeDelegate(&ezQtSettingsTab::ToolsProjectEventHandler, this)); } ezQtSettingsTab::~ezQtSettingsTab() { ezToolsProject::s_Events.RemoveEventHandler(ezMakeDelegate(&ezQtSettingsTab::ToolsProjectEventHandler, this)); } void ezQtSettingsTab::on_OpenScene_clicked() { ezQtAssetBrowserDlg dlg(this, ezUuid(), "Scene"); if (dlg.exec() == 0) return; ezQtEditorApp::GetSingleton()->OpenDocument(dlg.GetSelectedAssetPathAbsolute(), ezDocumentFlags::RequestWindow | ezDocumentFlags::AddToRecentFilesList); } void ezQtSettingsTab::on_OpenProject_clicked() { ezQtDashboardDlg dlg(nullptr, ezQtDashboardDlg::DashboardTab::Samples); dlg.exec(); } void ezQtSettingsTab::on_GettingStarted_clicked() { QDesktopServices::openUrl(QUrl("https://ezengine.net/pages/getting-started/editor-overview.html")); } bool ezQtSettingsTab::InternalCanCloseWindow() { // if this is the last window, prevent closing it return ezQtDocumentWindow::GetAllDocumentWindows().GetCount() > 1; } void ezQtSettingsTab::InternalCloseDocumentWindow() { // make sure this instance isn't used anymore UnregisterSingleton(); } void ezQtSettingsTab::ToolsProjectEventHandler(const ezToolsProjectEvent& e) { if (e.m_Type == ezToolsProjectEvent::Type::ProjectClosed || e.m_Type == ezToolsProjectEvent::Type::ProjectCreated || e.m_Type == ezToolsProjectEvent::Type::ProjectOpened) { ezStringBuilder txt = "<html><head/><body><p align=\"center\"><span style=\" font-size:18pt;\">Open Project:</span></p><p align=\"center\"><span style=\" font-size:18pt;\">None</span></p></body></html>"; if (ezToolsProject::GetSingleton()->IsProjectOpen()) { txt.ReplaceAll("None", ezToolsProject::GetSingleton()->GetProjectName()); OpenScene->setVisible(true); } else { txt = "<html><head/><body><p align=\"center\"><span style=\" font-size:18pt;\">No Project Open</span></p></body></html>"; OpenScene->setVisible(false); } ProjectLabel->setText(txt.GetData()); } }
1,307
1,042
<gh_stars>1000+ /******************************************************************************** * ReactPhysics3D physics library, http://www.reactphysics3d.com * * Copyright (c) 2010-2020 <NAME> * ********************************************************************************* * * * This software is provided 'as-is', without any express or implied warranty. * * In no event will the authors be held liable for any damages arising from the * * use of this software. * * * * Permission is granted to anyone to use this software for any purpose, * * including commercial applications, and to alter it and redistribute it * * freely, subject to the following restrictions: * * * * 1. The origin of this software must not be misrepresented; you must not claim * * that you wrote the original software. If you use this software in a * * product, an acknowledgment in the product documentation would be * * appreciated but is not required. * * * * 2. Altered source versions must be plainly marked as such, and must not be * * misrepresented as being the original software. * * * * 3. This notice may not be removed or altered from any source distribution. * * * ********************************************************************************/ // Libraries #include <reactphysics3d/systems/ContactSolverSystem.h> #include <reactphysics3d/engine/PhysicsWorld.h> #include <reactphysics3d/body/RigidBody.h> #include <reactphysics3d/constraint/ContactPoint.h> #include <reactphysics3d/utils/Profiler.h> #include <reactphysics3d/engine/Island.h> #include <reactphysics3d/collision/Collider.h> #include <reactphysics3d/components/CollisionBodyComponents.h> #include <reactphysics3d/components/ColliderComponents.h> #include <reactphysics3d/collision/ContactManifold.h> using namespace reactphysics3d; using namespace std; // Constants initialization const decimal ContactSolverSystem::BETA = decimal(0.2); const decimal ContactSolverSystem::BETA_SPLIT_IMPULSE = decimal(0.2); const decimal ContactSolverSystem::SLOP = decimal(0.01); // Constructor ContactSolverSystem::ContactSolverSystem(MemoryManager& memoryManager, PhysicsWorld& world, Islands& islands, CollisionBodyComponents& bodyComponents, RigidBodyComponents& rigidBodyComponents, ColliderComponents& colliderComponents, decimal& restitutionVelocityThreshold) :mMemoryManager(memoryManager), mWorld(world), mRestitutionVelocityThreshold(restitutionVelocityThreshold), mContactConstraints(nullptr), mContactPoints(nullptr), mIslands(islands), mAllContactManifolds(nullptr), mAllContactPoints(nullptr), mBodyComponents(bodyComponents), mRigidBodyComponents(rigidBodyComponents), mColliderComponents(colliderComponents), mIsSplitImpulseActive(true) { #ifdef IS_RP3D_PROFILING_ENABLED mProfiler = nullptr; #endif } // Initialize the contact constraints void ContactSolverSystem::init(List<ContactManifold>* contactManifolds, List<ContactPoint>* contactPoints, decimal timeStep) { mAllContactManifolds = contactManifolds; mAllContactPoints = contactPoints; RP3D_PROFILE("ContactSolver::init()", mProfiler); mTimeStep = timeStep; uint nbContactManifolds = mAllContactManifolds->size(); uint nbContactPoints = mAllContactPoints->size(); mNbContactManifolds = 0; mNbContactPoints = 0; mContactConstraints = nullptr; mContactPoints = nullptr; if (nbContactManifolds == 0 || nbContactPoints == 0) return; mContactPoints = static_cast<ContactPointSolver*>(mMemoryManager.allocate(MemoryManager::AllocationType::Frame, sizeof(ContactPointSolver) * nbContactPoints)); assert(mContactPoints != nullptr); mContactConstraints = static_cast<ContactManifoldSolver*>(mMemoryManager.allocate(MemoryManager::AllocationType::Frame, sizeof(ContactManifoldSolver) * nbContactManifolds)); assert(mContactConstraints != nullptr); // For each island of the world for (uint i = 0; i < mIslands.getNbIslands(); i++) { if (mIslands.nbContactManifolds[i] > 0) { initializeForIsland(i); } } // Warmstarting warmStart(); } // Release allocated memory void ContactSolverSystem::reset() { if (mAllContactPoints->size() > 0) mMemoryManager.release(MemoryManager::AllocationType::Frame, mContactPoints, sizeof(ContactPointSolver) * mAllContactPoints->size()); if (mAllContactManifolds->size() > 0) mMemoryManager.release(MemoryManager::AllocationType::Frame, mContactConstraints, sizeof(ContactManifoldSolver) * mAllContactManifolds->size()); } // Initialize the constraint solver for a given island void ContactSolverSystem::initializeForIsland(uint islandIndex) { RP3D_PROFILE("ContactSolver::initializeForIsland()", mProfiler); assert(mIslands.bodyEntities[islandIndex].size() > 0); assert(mIslands.nbContactManifolds[islandIndex] > 0); // For each contact manifold of the island uint contactManifoldsIndex = mIslands.contactManifoldsIndices[islandIndex]; uint nbContactManifolds = mIslands.nbContactManifolds[islandIndex]; for (uint m=contactManifoldsIndex; m < contactManifoldsIndex + nbContactManifolds; m++) { ContactManifold& externalManifold = (*mAllContactManifolds)[m]; assert(externalManifold.nbContactPoints > 0); // Get the two bodies of the contact RigidBody* body1 = static_cast<RigidBody*>(mBodyComponents.getBody(externalManifold.bodyEntity1)); RigidBody* body2 = static_cast<RigidBody*>(mBodyComponents.getBody(externalManifold.bodyEntity2)); assert(body1 != nullptr); assert(body2 != nullptr); assert(!mBodyComponents.getIsEntityDisabled(externalManifold.bodyEntity1)); assert(!mBodyComponents.getIsEntityDisabled(externalManifold.bodyEntity2)); const uint rigidBodyIndex1 = mRigidBodyComponents.getEntityIndex(externalManifold.bodyEntity1); const uint rigidBodyIndex2 = mRigidBodyComponents.getEntityIndex(externalManifold.bodyEntity2); Collider* collider1 = mColliderComponents.getCollider(externalManifold.colliderEntity1); Collider* collider2 = mColliderComponents.getCollider(externalManifold.colliderEntity2); // Get the position of the two bodies const Vector3& x1 = mRigidBodyComponents.mCentersOfMassWorld[rigidBodyIndex1]; const Vector3& x2 = mRigidBodyComponents.mCentersOfMassWorld[rigidBodyIndex2]; // Initialize the internal contact manifold structure using the external contact manifold new (mContactConstraints + mNbContactManifolds) ContactManifoldSolver(); mContactConstraints[mNbContactManifolds].rigidBodyComponentIndexBody1 = rigidBodyIndex1; mContactConstraints[mNbContactManifolds].rigidBodyComponentIndexBody2 = rigidBodyIndex2; mContactConstraints[mNbContactManifolds].inverseInertiaTensorBody1 = RigidBody::getWorldInertiaTensorInverse(mWorld, externalManifold.bodyEntity1); mContactConstraints[mNbContactManifolds].inverseInertiaTensorBody2 = RigidBody::getWorldInertiaTensorInverse(mWorld, externalManifold.bodyEntity2); mContactConstraints[mNbContactManifolds].massInverseBody1 = mRigidBodyComponents.mInverseMasses[rigidBodyIndex1]; mContactConstraints[mNbContactManifolds].massInverseBody2 = mRigidBodyComponents.mInverseMasses[rigidBodyIndex2]; mContactConstraints[mNbContactManifolds].nbContacts = externalManifold.nbContactPoints; mContactConstraints[mNbContactManifolds].frictionCoefficient = computeMixedFrictionCoefficient(collider1, collider2); mContactConstraints[mNbContactManifolds].rollingResistanceFactor = computeMixedRollingResistance(collider1, collider2); mContactConstraints[mNbContactManifolds].externalContactManifold = &externalManifold; mContactConstraints[mNbContactManifolds].normal.setToZero(); mContactConstraints[mNbContactManifolds].frictionPointBody1.setToZero(); mContactConstraints[mNbContactManifolds].frictionPointBody2.setToZero(); // Get the velocities of the bodies const Vector3& v1 = mRigidBodyComponents.mLinearVelocities[rigidBodyIndex1]; const Vector3& w1 = mRigidBodyComponents.mAngularVelocities[rigidBodyIndex1]; const Vector3& v2 = mRigidBodyComponents.mLinearVelocities[rigidBodyIndex2]; const Vector3& w2 = mRigidBodyComponents.mAngularVelocities[rigidBodyIndex2]; // For each contact point of the contact manifold assert(externalManifold.nbContactPoints > 0); uint contactPointsStartIndex = externalManifold.contactPointsIndex; uint nbContactPoints = static_cast<uint>(externalManifold.nbContactPoints); for (uint c=contactPointsStartIndex; c < contactPointsStartIndex + nbContactPoints; c++) { ContactPoint& externalContact = (*mAllContactPoints)[c]; // Get the contact point on the two bodies Vector3 p1 = mColliderComponents.getLocalToWorldTransform(externalManifold.colliderEntity1) * externalContact.getLocalPointOnShape1(); Vector3 p2 = mColliderComponents.getLocalToWorldTransform(externalManifold.colliderEntity2) * externalContact.getLocalPointOnShape2(); new (mContactPoints + mNbContactPoints) ContactPointSolver(); mContactPoints[mNbContactPoints].externalContact = &externalContact; mContactPoints[mNbContactPoints].normal = externalContact.getNormal(); mContactPoints[mNbContactPoints].r1.x = p1.x - x1.x; mContactPoints[mNbContactPoints].r1.y = p1.y - x1.y; mContactPoints[mNbContactPoints].r1.z = p1.z - x1.z; mContactPoints[mNbContactPoints].r2.x = p2.x - x2.x; mContactPoints[mNbContactPoints].r2.y = p2.y - x2.y; mContactPoints[mNbContactPoints].r2.z = p2.z - x2.z; mContactPoints[mNbContactPoints].penetrationDepth = externalContact.getPenetrationDepth(); mContactPoints[mNbContactPoints].isRestingContact = externalContact.getIsRestingContact(); externalContact.setIsRestingContact(true); mContactPoints[mNbContactPoints].penetrationImpulse = externalContact.getPenetrationImpulse(); mContactPoints[mNbContactPoints].penetrationSplitImpulse = 0.0; mContactConstraints[mNbContactManifolds].frictionPointBody1.x += p1.x; mContactConstraints[mNbContactManifolds].frictionPointBody1.y += p1.y; mContactConstraints[mNbContactManifolds].frictionPointBody1.z += p1.z; mContactConstraints[mNbContactManifolds].frictionPointBody2.x += p2.x; mContactConstraints[mNbContactManifolds].frictionPointBody2.y += p2.y; mContactConstraints[mNbContactManifolds].frictionPointBody2.z += p2.z; // Compute the velocity difference //deltaV = v2 + w2.cross(mContactPoints[mNbContactPoints].r2) - v1 - w1.cross(mContactPoints[mNbContactPoints].r1); Vector3 deltaV(v2.x + w2.y * mContactPoints[mNbContactPoints].r2.z - w2.z * mContactPoints[mNbContactPoints].r2.y - v1.x - w1.y * mContactPoints[mNbContactPoints].r1.z - w1.z * mContactPoints[mNbContactPoints].r1.y, v2.y + w2.z * mContactPoints[mNbContactPoints].r2.x - w2.x * mContactPoints[mNbContactPoints].r2.z - v1.y - w1.z * mContactPoints[mNbContactPoints].r1.x - w1.x * mContactPoints[mNbContactPoints].r1.z, v2.z + w2.x * mContactPoints[mNbContactPoints].r2.y - w2.y * mContactPoints[mNbContactPoints].r2.x - v1.z - w1.x * mContactPoints[mNbContactPoints].r1.y - w1.y * mContactPoints[mNbContactPoints].r1.x); // r1CrossN = mContactPoints[mNbContactPoints].r1.cross(mContactPoints[mNbContactPoints].normal); Vector3 r1CrossN(mContactPoints[mNbContactPoints].r1.y * mContactPoints[mNbContactPoints].normal.z - mContactPoints[mNbContactPoints].r1.z * mContactPoints[mNbContactPoints].normal.y, mContactPoints[mNbContactPoints].r1.z * mContactPoints[mNbContactPoints].normal.x - mContactPoints[mNbContactPoints].r1.x * mContactPoints[mNbContactPoints].normal.z, mContactPoints[mNbContactPoints].r1.x * mContactPoints[mNbContactPoints].normal.y - mContactPoints[mNbContactPoints].r1.y * mContactPoints[mNbContactPoints].normal.x); // r2CrossN = mContactPoints[mNbContactPoints].r2.cross(mContactPoints[mNbContactPoints].normal); Vector3 r2CrossN(mContactPoints[mNbContactPoints].r2.y * mContactPoints[mNbContactPoints].normal.z - mContactPoints[mNbContactPoints].r2.z * mContactPoints[mNbContactPoints].normal.y, mContactPoints[mNbContactPoints].r2.z * mContactPoints[mNbContactPoints].normal.x - mContactPoints[mNbContactPoints].r2.x * mContactPoints[mNbContactPoints].normal.z, mContactPoints[mNbContactPoints].r2.x * mContactPoints[mNbContactPoints].normal.y - mContactPoints[mNbContactPoints].r2.y * mContactPoints[mNbContactPoints].normal.x); mContactPoints[mNbContactPoints].i1TimesR1CrossN = mContactConstraints[mNbContactManifolds].inverseInertiaTensorBody1 * r1CrossN; mContactPoints[mNbContactPoints].i2TimesR2CrossN = mContactConstraints[mNbContactManifolds].inverseInertiaTensorBody2 * r2CrossN; // Compute the inverse mass matrix K for the penetration constraint decimal massPenetration = mContactConstraints[mNbContactManifolds].massInverseBody1 + mContactConstraints[mNbContactManifolds].massInverseBody2 + ((mContactPoints[mNbContactPoints].i1TimesR1CrossN).cross(mContactPoints[mNbContactPoints].r1)).dot(mContactPoints[mNbContactPoints].normal) + ((mContactPoints[mNbContactPoints].i2TimesR2CrossN).cross(mContactPoints[mNbContactPoints].r2)).dot(mContactPoints[mNbContactPoints].normal); mContactPoints[mNbContactPoints].inversePenetrationMass = massPenetration > decimal(0.0) ? decimal(1.0) / massPenetration : decimal(0.0); // Compute the restitution velocity bias "b". We compute this here instead // of inside the solve() method because we need to use the velocity difference // at the beginning of the contact. Note that if it is a resting contact (normal // velocity bellow a given threshold), we do not add a restitution velocity bias mContactPoints[mNbContactPoints].restitutionBias = 0.0; // deltaVDotN = deltaV.dot(mContactPoints[mNbContactPoints].normal); decimal deltaVDotN = deltaV.x * mContactPoints[mNbContactPoints].normal.x + deltaV.y * mContactPoints[mNbContactPoints].normal.y + deltaV.z * mContactPoints[mNbContactPoints].normal.z; const decimal restitutionFactor = computeMixedRestitutionFactor(collider1, collider2); if (deltaVDotN < -mRestitutionVelocityThreshold) { mContactPoints[mNbContactPoints].restitutionBias = restitutionFactor * deltaVDotN; } mContactConstraints[mNbContactManifolds].normal.x += mContactPoints[mNbContactPoints].normal.x; mContactConstraints[mNbContactManifolds].normal.y += mContactPoints[mNbContactPoints].normal.y; mContactConstraints[mNbContactManifolds].normal.z += mContactPoints[mNbContactPoints].normal.z; mNbContactPoints++; } mContactConstraints[mNbContactManifolds].frictionPointBody1 /=static_cast<decimal>(mContactConstraints[mNbContactManifolds].nbContacts); mContactConstraints[mNbContactManifolds].frictionPointBody2 /=static_cast<decimal>(mContactConstraints[mNbContactManifolds].nbContacts); mContactConstraints[mNbContactManifolds].r1Friction.x = mContactConstraints[mNbContactManifolds].frictionPointBody1.x - x1.x; mContactConstraints[mNbContactManifolds].r1Friction.y = mContactConstraints[mNbContactManifolds].frictionPointBody1.y - x1.y; mContactConstraints[mNbContactManifolds].r1Friction.z = mContactConstraints[mNbContactManifolds].frictionPointBody1.z - x1.z; mContactConstraints[mNbContactManifolds].r2Friction.x = mContactConstraints[mNbContactManifolds].frictionPointBody2.x - x2.x; mContactConstraints[mNbContactManifolds].r2Friction.y = mContactConstraints[mNbContactManifolds].frictionPointBody2.y - x2.y; mContactConstraints[mNbContactManifolds].r2Friction.z = mContactConstraints[mNbContactManifolds].frictionPointBody2.z - x2.z; mContactConstraints[mNbContactManifolds].oldFrictionVector1 = externalManifold.frictionVector1; mContactConstraints[mNbContactManifolds].oldFrictionVector2 = externalManifold.frictionVector2; // Initialize the accumulated impulses with the previous step accumulated impulses mContactConstraints[mNbContactManifolds].friction1Impulse = externalManifold.frictionImpulse1; mContactConstraints[mNbContactManifolds].friction2Impulse = externalManifold.frictionImpulse2; mContactConstraints[mNbContactManifolds].frictionTwistImpulse = externalManifold.frictionTwistImpulse; // Compute the inverse K matrix for the rolling resistance constraint bool isBody1DynamicType = body1->getType() == BodyType::DYNAMIC; bool isBody2DynamicType = body2->getType() == BodyType::DYNAMIC; mContactConstraints[mNbContactManifolds].inverseRollingResistance.setToZero(); if (mContactConstraints[mNbContactManifolds].rollingResistanceFactor > 0 && (isBody1DynamicType || isBody2DynamicType)) { mContactConstraints[mNbContactManifolds].inverseRollingResistance = mContactConstraints[mNbContactManifolds].inverseInertiaTensorBody1 + mContactConstraints[mNbContactManifolds].inverseInertiaTensorBody2; decimal det = mContactConstraints[mNbContactManifolds].inverseRollingResistance.getDeterminant(); // If the matrix is not inversible if (approxEqual(det, decimal(0.0))) { mContactConstraints[mNbContactManifolds].inverseRollingResistance.setToZero(); } else { mContactConstraints[mNbContactManifolds].inverseRollingResistance = mContactConstraints[mNbContactManifolds].inverseRollingResistance.getInverse(); } } mContactConstraints[mNbContactManifolds].normal.normalize(); // deltaVFrictionPoint = v2 + w2.cross(mContactConstraints[mNbContactManifolds].r2Friction) - // v1 - w1.cross(mContactConstraints[mNbContactManifolds].r1Friction); Vector3 deltaVFrictionPoint(v2.x + w2.y * mContactConstraints[mNbContactManifolds].r2Friction.z - w2.z * mContactConstraints[mNbContactManifolds].r2Friction.y - v1.x - w1.y * mContactConstraints[mNbContactManifolds].r1Friction.z - w1.z * mContactConstraints[mNbContactManifolds].r1Friction.y, v2.y + w2.z * mContactConstraints[mNbContactManifolds].r2Friction.x - w2.x * mContactConstraints[mNbContactManifolds].r2Friction.z - v1.y - w1.z * mContactConstraints[mNbContactManifolds].r1Friction.x - w1.x * mContactConstraints[mNbContactManifolds].r1Friction.z, v2.z + w2.x * mContactConstraints[mNbContactManifolds].r2Friction.y - w2.y * mContactConstraints[mNbContactManifolds].r2Friction.x - v1.z - w1.x * mContactConstraints[mNbContactManifolds].r1Friction.y - w1.y * mContactConstraints[mNbContactManifolds].r1Friction.x); // Compute the friction vectors computeFrictionVectors(deltaVFrictionPoint, mContactConstraints[mNbContactManifolds]); // Compute the inverse mass matrix K for the friction constraints at the center of // the contact manifold mContactConstraints[mNbContactManifolds].r1CrossT1 = mContactConstraints[mNbContactManifolds].r1Friction.cross(mContactConstraints[mNbContactManifolds].frictionVector1); mContactConstraints[mNbContactManifolds].r1CrossT2 = mContactConstraints[mNbContactManifolds].r1Friction.cross(mContactConstraints[mNbContactManifolds].frictionVector2); mContactConstraints[mNbContactManifolds].r2CrossT1 = mContactConstraints[mNbContactManifolds].r2Friction.cross(mContactConstraints[mNbContactManifolds].frictionVector1); mContactConstraints[mNbContactManifolds].r2CrossT2 = mContactConstraints[mNbContactManifolds].r2Friction.cross(mContactConstraints[mNbContactManifolds].frictionVector2); decimal friction1Mass = mContactConstraints[mNbContactManifolds].massInverseBody1 + mContactConstraints[mNbContactManifolds].massInverseBody2 + ((mContactConstraints[mNbContactManifolds].inverseInertiaTensorBody1 * mContactConstraints[mNbContactManifolds].r1CrossT1).cross(mContactConstraints[mNbContactManifolds].r1Friction)).dot( mContactConstraints[mNbContactManifolds].frictionVector1) + ((mContactConstraints[mNbContactManifolds].inverseInertiaTensorBody2 * mContactConstraints[mNbContactManifolds].r2CrossT1).cross(mContactConstraints[mNbContactManifolds].r2Friction)).dot( mContactConstraints[mNbContactManifolds].frictionVector1); decimal friction2Mass = mContactConstraints[mNbContactManifolds].massInverseBody1 + mContactConstraints[mNbContactManifolds].massInverseBody2 + ((mContactConstraints[mNbContactManifolds].inverseInertiaTensorBody1 * mContactConstraints[mNbContactManifolds].r1CrossT2).cross(mContactConstraints[mNbContactManifolds].r1Friction)).dot( mContactConstraints[mNbContactManifolds].frictionVector2) + ((mContactConstraints[mNbContactManifolds].inverseInertiaTensorBody2 * mContactConstraints[mNbContactManifolds].r2CrossT2).cross(mContactConstraints[mNbContactManifolds].r2Friction)).dot( mContactConstraints[mNbContactManifolds].frictionVector2); decimal frictionTwistMass = mContactConstraints[mNbContactManifolds].normal.dot(mContactConstraints[mNbContactManifolds].inverseInertiaTensorBody1 * mContactConstraints[mNbContactManifolds].normal) + mContactConstraints[mNbContactManifolds].normal.dot(mContactConstraints[mNbContactManifolds].inverseInertiaTensorBody2 * mContactConstraints[mNbContactManifolds].normal); mContactConstraints[mNbContactManifolds].inverseFriction1Mass = friction1Mass > decimal(0.0) ? decimal(1.0) / friction1Mass : decimal(0.0); mContactConstraints[mNbContactManifolds].inverseFriction2Mass = friction2Mass > decimal(0.0) ? decimal(1.0) / friction2Mass : decimal(0.0); mContactConstraints[mNbContactManifolds].inverseTwistFrictionMass = frictionTwistMass > decimal(0.0) ? decimal(1.0) / frictionTwistMass : decimal(0.0); mNbContactManifolds++; } } // Warm start the solver. /// For each constraint, we apply the previous impulse (from the previous step) /// at the beginning. With this technique, we will converge faster towards /// the solution of the linear system void ContactSolverSystem::warmStart() { RP3D_PROFILE("ContactSolver::warmStart()", mProfiler); uint contactPointIndex = 0; // For each constraint for (uint c=0; c<mNbContactManifolds; c++) { bool atLeastOneRestingContactPoint = false; for (short int i=0; i<mContactConstraints[c].nbContacts; i++) { // If it is not a new contact (this contact was already existing at last time step) if (mContactPoints[contactPointIndex].isRestingContact) { atLeastOneRestingContactPoint = true; // --------- Penetration --------- // // Update the velocities of the body 1 by applying the impulse P Vector3 impulsePenetration(mContactPoints[contactPointIndex].normal.x * mContactPoints[contactPointIndex].penetrationImpulse, mContactPoints[contactPointIndex].normal.y * mContactPoints[contactPointIndex].penetrationImpulse, mContactPoints[contactPointIndex].normal.z * mContactPoints[contactPointIndex].penetrationImpulse); mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1].x -= mContactConstraints[c].massInverseBody1 * impulsePenetration.x; mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1].y -= mContactConstraints[c].massInverseBody1 * impulsePenetration.y; mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1].z -= mContactConstraints[c].massInverseBody1 * impulsePenetration.z; mRigidBodyComponents.mConstrainedAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1].x -= mContactPoints[contactPointIndex].i1TimesR1CrossN.x * mContactPoints[contactPointIndex].penetrationImpulse; mRigidBodyComponents.mConstrainedAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1].y -= mContactPoints[contactPointIndex].i1TimesR1CrossN.y * mContactPoints[contactPointIndex].penetrationImpulse; mRigidBodyComponents.mConstrainedAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1].z -= mContactPoints[contactPointIndex].i1TimesR1CrossN.z * mContactPoints[contactPointIndex].penetrationImpulse; // Update the velocities of the body 2 by applying the impulse P mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2].x += mContactConstraints[c].massInverseBody2 * impulsePenetration.x; mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2].y += mContactConstraints[c].massInverseBody2 * impulsePenetration.y; mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2].z += mContactConstraints[c].massInverseBody2 * impulsePenetration.z; mRigidBodyComponents.mConstrainedAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2].x += mContactPoints[contactPointIndex].i2TimesR2CrossN.x * mContactPoints[contactPointIndex].penetrationImpulse; mRigidBodyComponents.mConstrainedAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2].y += mContactPoints[contactPointIndex].i2TimesR2CrossN.y * mContactPoints[contactPointIndex].penetrationImpulse; mRigidBodyComponents.mConstrainedAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2].z += mContactPoints[contactPointIndex].i2TimesR2CrossN.z * mContactPoints[contactPointIndex].penetrationImpulse; } else { // If it is a new contact point // Initialize the accumulated impulses to zero mContactPoints[contactPointIndex].penetrationImpulse = 0.0; } contactPointIndex++; } // If we solve the friction constraints at the center of the contact manifold and there is // at least one resting contact point in the contact manifold if (atLeastOneRestingContactPoint) { // Project the old friction impulses (with old friction vectors) into the new friction // vectors to get the new friction impulses Vector3 oldFrictionImpulse(mContactConstraints[c].friction1Impulse * mContactConstraints[c].oldFrictionVector1.x + mContactConstraints[c].friction2Impulse * mContactConstraints[c].oldFrictionVector2.x, mContactConstraints[c].friction1Impulse * mContactConstraints[c].oldFrictionVector1.y + mContactConstraints[c].friction2Impulse * mContactConstraints[c].oldFrictionVector2.y, mContactConstraints[c].friction1Impulse * mContactConstraints[c].oldFrictionVector1.z + mContactConstraints[c].friction2Impulse * mContactConstraints[c].oldFrictionVector2.z); mContactConstraints[c].friction1Impulse = oldFrictionImpulse.dot(mContactConstraints[c].frictionVector1); mContactConstraints[c].friction2Impulse = oldFrictionImpulse.dot(mContactConstraints[c].frictionVector2); // ------ First friction constraint at the center of the contact manifold ------ // // Compute the impulse P = J^T * lambda Vector3 angularImpulseBody1(-mContactConstraints[c].r1CrossT1.x * mContactConstraints[c].friction1Impulse, -mContactConstraints[c].r1CrossT1.y * mContactConstraints[c].friction1Impulse, -mContactConstraints[c].r1CrossT1.z * mContactConstraints[c].friction1Impulse); Vector3 linearImpulseBody2(mContactConstraints[c].frictionVector1.x * mContactConstraints[c].friction1Impulse, mContactConstraints[c].frictionVector1.y * mContactConstraints[c].friction1Impulse, mContactConstraints[c].frictionVector1.z * mContactConstraints[c].friction1Impulse); Vector3 angularImpulseBody2(mContactConstraints[c].r2CrossT1.x * mContactConstraints[c].friction1Impulse, mContactConstraints[c].r2CrossT1.y * mContactConstraints[c].friction1Impulse, mContactConstraints[c].r2CrossT1.z * mContactConstraints[c].friction1Impulse); // Update the velocities of the body 1 by applying the impulse P mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1] -= mContactConstraints[c].massInverseBody1 * linearImpulseBody2; mRigidBodyComponents.mConstrainedAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1] += mContactConstraints[c].inverseInertiaTensorBody1 * angularImpulseBody1; // Update the velocities of the body 1 by applying the impulse P mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2] += mContactConstraints[c].massInverseBody2 * linearImpulseBody2; mRigidBodyComponents.mConstrainedAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2] += mContactConstraints[c].inverseInertiaTensorBody2 * angularImpulseBody2; // ------ Second friction constraint at the center of the contact manifold ----- // // Compute the impulse P = J^T * lambda angularImpulseBody1.x = -mContactConstraints[c].r1CrossT2.x * mContactConstraints[c].friction2Impulse; angularImpulseBody1.y = -mContactConstraints[c].r1CrossT2.y * mContactConstraints[c].friction2Impulse; angularImpulseBody1.z = -mContactConstraints[c].r1CrossT2.z * mContactConstraints[c].friction2Impulse; linearImpulseBody2.x = mContactConstraints[c].frictionVector2.x * mContactConstraints[c].friction2Impulse; linearImpulseBody2.y = mContactConstraints[c].frictionVector2.y * mContactConstraints[c].friction2Impulse; linearImpulseBody2.z = mContactConstraints[c].frictionVector2.z * mContactConstraints[c].friction2Impulse; angularImpulseBody2.x = mContactConstraints[c].r2CrossT2.x * mContactConstraints[c].friction2Impulse; angularImpulseBody2.y = mContactConstraints[c].r2CrossT2.y * mContactConstraints[c].friction2Impulse; angularImpulseBody2.z = mContactConstraints[c].r2CrossT2.z * mContactConstraints[c].friction2Impulse; // Update the velocities of the body 1 by applying the impulse P mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1].x -= mContactConstraints[c].massInverseBody1 * linearImpulseBody2.x; mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1].y -= mContactConstraints[c].massInverseBody1 * linearImpulseBody2.y; mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1].z -= mContactConstraints[c].massInverseBody1 * linearImpulseBody2.z; mRigidBodyComponents.mConstrainedAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1] += mContactConstraints[c].inverseInertiaTensorBody1 * angularImpulseBody1; // Update the velocities of the body 2 by applying the impulse P mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2].x += mContactConstraints[c].massInverseBody2 * linearImpulseBody2.x; mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2].y += mContactConstraints[c].massInverseBody2 * linearImpulseBody2.y; mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2].z += mContactConstraints[c].massInverseBody2 * linearImpulseBody2.z; mRigidBodyComponents.mConstrainedAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2] += mContactConstraints[c].inverseInertiaTensorBody2 * angularImpulseBody2; // ------ Twist friction constraint at the center of the contact manifold ------ // // Compute the impulse P = J^T * lambda angularImpulseBody1.x = -mContactConstraints[c].normal.x * mContactConstraints[c].frictionTwistImpulse; angularImpulseBody1.y = -mContactConstraints[c].normal.y * mContactConstraints[c].frictionTwistImpulse; angularImpulseBody1.z = -mContactConstraints[c].normal.z * mContactConstraints[c].frictionTwistImpulse; angularImpulseBody2.x = mContactConstraints[c].normal.x * mContactConstraints[c].frictionTwistImpulse; angularImpulseBody2.y = mContactConstraints[c].normal.y * mContactConstraints[c].frictionTwistImpulse; angularImpulseBody2.z = mContactConstraints[c].normal.z * mContactConstraints[c].frictionTwistImpulse; // Update the velocities of the body 1 by applying the impulse P mRigidBodyComponents.mConstrainedAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1] += mContactConstraints[c].inverseInertiaTensorBody1 * angularImpulseBody1; // Update the velocities of the body 2 by applying the impulse P mRigidBodyComponents.mConstrainedAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2] += mContactConstraints[c].inverseInertiaTensorBody2 * angularImpulseBody2; // ------ Rolling resistance at the center of the contact manifold ------ // // Compute the impulse P = J^T * lambda angularImpulseBody2 = mContactConstraints[c].rollingResistanceImpulse; // Update the velocities of the body 1 by applying the impulse P mRigidBodyComponents.mConstrainedAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1] -= mContactConstraints[c].inverseInertiaTensorBody1 * angularImpulseBody2; // Update the velocities of the body 1 by applying the impulse P mRigidBodyComponents.mConstrainedAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2] += mContactConstraints[c].inverseInertiaTensorBody2 * angularImpulseBody2; } else { // If it is a new contact manifold // Initialize the accumulated impulses to zero mContactConstraints[c].friction1Impulse = 0.0; mContactConstraints[c].friction2Impulse = 0.0; mContactConstraints[c].frictionTwistImpulse = 0.0; mContactConstraints[c].rollingResistanceImpulse.setToZero(); } } } // Solve the contacts void ContactSolverSystem::solve() { RP3D_PROFILE("ContactSolverSystem::solve()", mProfiler); decimal deltaLambda; decimal lambdaTemp; uint contactPointIndex = 0; const decimal beta = mIsSplitImpulseActive ? BETA_SPLIT_IMPULSE : BETA; // For each contact manifold for (uint c=0; c<mNbContactManifolds; c++) { decimal sumPenetrationImpulse = 0.0; // Get the constrained velocities const Vector3& v1 = mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1]; const Vector3& w1 = mRigidBodyComponents.mConstrainedAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1]; const Vector3& v2 = mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2]; const Vector3& w2 = mRigidBodyComponents.mConstrainedAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2]; for (short int i=0; i<mContactConstraints[c].nbContacts; i++) { // --------- Penetration --------- // // Compute J*v //Vector3 deltaV = v2 + w2.cross(mContactPoints[contactPointIndex].r2) - v1 - w1.cross(mContactPoints[contactPointIndex].r1); Vector3 deltaV(v2.x + w2.y * mContactPoints[contactPointIndex].r2.z - w2.z * mContactPoints[contactPointIndex].r2.y - v1.x - w1.y * mContactPoints[contactPointIndex].r1.z + w1.z * mContactPoints[contactPointIndex].r1.y, v2.y + w2.z * mContactPoints[contactPointIndex].r2.x - w2.x * mContactPoints[contactPointIndex].r2.z - v1.y - w1.z * mContactPoints[contactPointIndex].r1.x + w1.x * mContactPoints[contactPointIndex].r1.z, v2.z + w2.x * mContactPoints[contactPointIndex].r2.y - w2.y * mContactPoints[contactPointIndex].r2.x - v1.z - w1.x * mContactPoints[contactPointIndex].r1.y + w1.y * mContactPoints[contactPointIndex].r1.x); decimal deltaVDotN = deltaV.x * mContactPoints[contactPointIndex].normal.x + deltaV.y * mContactPoints[contactPointIndex].normal.y + deltaV.z * mContactPoints[contactPointIndex].normal.z; decimal Jv = deltaVDotN; // Compute the bias "b" of the constraint decimal biasPenetrationDepth = 0.0; if (mContactPoints[contactPointIndex].penetrationDepth > SLOP) { biasPenetrationDepth = -(beta/mTimeStep) * max(0.0f, float(mContactPoints[contactPointIndex].penetrationDepth - SLOP)); } decimal b = biasPenetrationDepth + mContactPoints[contactPointIndex].restitutionBias; // Compute the Lagrange multiplier lambda if (mIsSplitImpulseActive) { deltaLambda = - (Jv + mContactPoints[contactPointIndex].restitutionBias) * mContactPoints[contactPointIndex].inversePenetrationMass; } else { deltaLambda = - (Jv + b) * mContactPoints[contactPointIndex].inversePenetrationMass; } lambdaTemp = mContactPoints[contactPointIndex].penetrationImpulse; mContactPoints[contactPointIndex].penetrationImpulse = std::max(mContactPoints[contactPointIndex].penetrationImpulse + deltaLambda, decimal(0.0)); deltaLambda = mContactPoints[contactPointIndex].penetrationImpulse - lambdaTemp; Vector3 linearImpulse(mContactPoints[contactPointIndex].normal.x * deltaLambda, mContactPoints[contactPointIndex].normal.y * deltaLambda, mContactPoints[contactPointIndex].normal.z * deltaLambda); // Update the velocities of the body 1 by applying the impulse P mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1].x -= mContactConstraints[c].massInverseBody1 * linearImpulse.x; mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1].y -= mContactConstraints[c].massInverseBody1 * linearImpulse.y; mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1].z -= mContactConstraints[c].massInverseBody1 * linearImpulse.z; mRigidBodyComponents.mConstrainedAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1].x -= mContactPoints[contactPointIndex].i1TimesR1CrossN.x * deltaLambda; mRigidBodyComponents.mConstrainedAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1].y -= mContactPoints[contactPointIndex].i1TimesR1CrossN.y * deltaLambda; mRigidBodyComponents.mConstrainedAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1].z -= mContactPoints[contactPointIndex].i1TimesR1CrossN.z * deltaLambda; // Update the velocities of the body 2 by applying the impulse P mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2].x += mContactConstraints[c].massInverseBody2 * linearImpulse.x; mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2].y += mContactConstraints[c].massInverseBody2 * linearImpulse.y; mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2].z += mContactConstraints[c].massInverseBody2 * linearImpulse.z; mRigidBodyComponents.mConstrainedAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2].x += mContactPoints[contactPointIndex].i2TimesR2CrossN.x * deltaLambda; mRigidBodyComponents.mConstrainedAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2].y += mContactPoints[contactPointIndex].i2TimesR2CrossN.y * deltaLambda; mRigidBodyComponents.mConstrainedAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2].z += mContactPoints[contactPointIndex].i2TimesR2CrossN.z * deltaLambda; sumPenetrationImpulse += mContactPoints[contactPointIndex].penetrationImpulse; // If the split impulse position correction is active if (mIsSplitImpulseActive) { // Split impulse (position correction) const Vector3& v1Split = mRigidBodyComponents.mSplitLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1]; const Vector3& w1Split = mRigidBodyComponents.mSplitAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1]; const Vector3& v2Split = mRigidBodyComponents.mSplitLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2]; const Vector3& w2Split = mRigidBodyComponents.mSplitAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2]; //Vector3 deltaVSplit = v2Split + w2Split.cross(mContactPoints[contactPointIndex].r2) - v1Split - w1Split.cross(mContactPoints[contactPointIndex].r1); Vector3 deltaVSplit(v2Split.x + w2Split.y * mContactPoints[contactPointIndex].r2.z - w2Split.z * mContactPoints[contactPointIndex].r2.y - v1Split.x - w1Split.y * mContactPoints[contactPointIndex].r1.z + w1Split.z * mContactPoints[contactPointIndex].r1.y, v2Split.y + w2Split.z * mContactPoints[contactPointIndex].r2.x - w2Split.x * mContactPoints[contactPointIndex].r2.z - v1Split.y - w1Split.z * mContactPoints[contactPointIndex].r1.x + w1Split.x * mContactPoints[contactPointIndex].r1.z, v2Split.z + w2Split.x * mContactPoints[contactPointIndex].r2.y - w2Split.y * mContactPoints[contactPointIndex].r2.x - v1Split.z - w1Split.x * mContactPoints[contactPointIndex].r1.y + w1Split.y * mContactPoints[contactPointIndex].r1.x); decimal JvSplit = deltaVSplit.x * mContactPoints[contactPointIndex].normal.x + deltaVSplit.y * mContactPoints[contactPointIndex].normal.y + deltaVSplit.z * mContactPoints[contactPointIndex].normal.z; decimal deltaLambdaSplit = - (JvSplit + biasPenetrationDepth) * mContactPoints[contactPointIndex].inversePenetrationMass; decimal lambdaTempSplit = mContactPoints[contactPointIndex].penetrationSplitImpulse; mContactPoints[contactPointIndex].penetrationSplitImpulse = std::max( mContactPoints[contactPointIndex].penetrationSplitImpulse + deltaLambdaSplit, decimal(0.0)); deltaLambdaSplit = mContactPoints[contactPointIndex].penetrationSplitImpulse - lambdaTempSplit; Vector3 linearImpulse(mContactPoints[contactPointIndex].normal.x * deltaLambdaSplit, mContactPoints[contactPointIndex].normal.y * deltaLambdaSplit, mContactPoints[contactPointIndex].normal.z * deltaLambdaSplit); // Update the velocities of the body 1 by applying the impulse P mRigidBodyComponents.mSplitLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1].x -= mContactConstraints[c].massInverseBody1 * linearImpulse.x; mRigidBodyComponents.mSplitLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1].y -= mContactConstraints[c].massInverseBody1 * linearImpulse.y; mRigidBodyComponents.mSplitLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1].z -= mContactConstraints[c].massInverseBody1 * linearImpulse.z; mRigidBodyComponents.mSplitAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1].x -= mContactPoints[contactPointIndex].i1TimesR1CrossN.x * deltaLambdaSplit; mRigidBodyComponents.mSplitAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1].y -= mContactPoints[contactPointIndex].i1TimesR1CrossN.y * deltaLambdaSplit; mRigidBodyComponents.mSplitAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1].z -= mContactPoints[contactPointIndex].i1TimesR1CrossN.z * deltaLambdaSplit; // Update the velocities of the body 1 by applying the impulse P mRigidBodyComponents.mSplitLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2].x += mContactConstraints[c].massInverseBody2 * linearImpulse.x; mRigidBodyComponents.mSplitLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2].y += mContactConstraints[c].massInverseBody2 * linearImpulse.y; mRigidBodyComponents.mSplitLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2].z += mContactConstraints[c].massInverseBody2 * linearImpulse.z; mRigidBodyComponents.mSplitAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2].x += mContactPoints[contactPointIndex].i2TimesR2CrossN.x * deltaLambdaSplit; mRigidBodyComponents.mSplitAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2].y += mContactPoints[contactPointIndex].i2TimesR2CrossN.y * deltaLambdaSplit; mRigidBodyComponents.mSplitAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2].z += mContactPoints[contactPointIndex].i2TimesR2CrossN.z * deltaLambdaSplit; } contactPointIndex++; } // ------ First friction constraint at the center of the contact manifold ------ // // Compute J*v // deltaV = v2 + w2.cross(mContactConstraints[c].r2Friction) - v1 - w1.cross(mContactConstraints[c].r1Friction); Vector3 deltaV(v2.x + w2.y * mContactConstraints[c].r2Friction.z - w2.z * mContactConstraints[c].r2Friction.y - v1.x - w1.y * mContactConstraints[c].r1Friction.z + w1.z * mContactConstraints[c].r1Friction.y, v2.y + w2.z * mContactConstraints[c].r2Friction.x - w2.x * mContactConstraints[c].r2Friction.z - v1.y - w1.z * mContactConstraints[c].r1Friction.x + w1.x * mContactConstraints[c].r1Friction.z, v2.z + w2.x * mContactConstraints[c].r2Friction.y - w2.y * mContactConstraints[c].r2Friction.x - v1.z - w1.x * mContactConstraints[c].r1Friction.y + w1.y * mContactConstraints[c].r1Friction.x); decimal Jv = deltaV.x * mContactConstraints[c].frictionVector1.x + deltaV.y * mContactConstraints[c].frictionVector1.y + deltaV.z * mContactConstraints[c].frictionVector1.z; // Compute the Lagrange multiplier lambda decimal deltaLambda = -Jv * mContactConstraints[c].inverseFriction1Mass; decimal frictionLimit = mContactConstraints[c].frictionCoefficient * sumPenetrationImpulse; lambdaTemp = mContactConstraints[c].friction1Impulse; mContactConstraints[c].friction1Impulse = std::max(-frictionLimit, std::min(mContactConstraints[c].friction1Impulse + deltaLambda, frictionLimit)); deltaLambda = mContactConstraints[c].friction1Impulse - lambdaTemp; // Compute the impulse P=J^T * lambda Vector3 angularImpulseBody1(-mContactConstraints[c].r1CrossT1.x * deltaLambda, -mContactConstraints[c].r1CrossT1.y * deltaLambda, -mContactConstraints[c].r1CrossT1.z * deltaLambda); Vector3 linearImpulseBody2(mContactConstraints[c].frictionVector1.x * deltaLambda, mContactConstraints[c].frictionVector1.y * deltaLambda, mContactConstraints[c].frictionVector1.z * deltaLambda); Vector3 angularImpulseBody2(mContactConstraints[c].r2CrossT1.x * deltaLambda, mContactConstraints[c].r2CrossT1.y * deltaLambda, mContactConstraints[c].r2CrossT1.z * deltaLambda); // Update the velocities of the body 1 by applying the impulse P mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1].x -= mContactConstraints[c].massInverseBody1 * linearImpulseBody2.x; mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1].y -= mContactConstraints[c].massInverseBody1 * linearImpulseBody2.y; mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1].z -= mContactConstraints[c].massInverseBody1 * linearImpulseBody2.z; mRigidBodyComponents.mConstrainedAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1] += mContactConstraints[c].inverseInertiaTensorBody1 * angularImpulseBody1; // Update the velocities of the body 2 by applying the impulse P mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2].x += mContactConstraints[c].massInverseBody2 * linearImpulseBody2.x; mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2].y += mContactConstraints[c].massInverseBody2 * linearImpulseBody2.y; mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2].z += mContactConstraints[c].massInverseBody2 * linearImpulseBody2.z; mRigidBodyComponents.mConstrainedAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2] += mContactConstraints[c].inverseInertiaTensorBody2 * angularImpulseBody2; // ------ Second friction constraint at the center of the contact manifold ----- // // Compute J*v //deltaV = v2 + w2.cross(mContactConstraints[c].r2Friction) - v1 - w1.cross(mContactConstraints[c].r1Friction); deltaV.x = v2.x + w2.y * mContactConstraints[c].r2Friction.z - w2.z * mContactConstraints[c].r2Friction.y - v1.x - w1.y * mContactConstraints[c].r1Friction.z + w1.z * mContactConstraints[c].r1Friction.y; deltaV.y = v2.y + w2.z * mContactConstraints[c].r2Friction.x - w2.x * mContactConstraints[c].r2Friction.z - v1.y - w1.z * mContactConstraints[c].r1Friction.x + w1.x * mContactConstraints[c].r1Friction.z; deltaV.z = v2.z + w2.x * mContactConstraints[c].r2Friction.y - w2.y * mContactConstraints[c].r2Friction.x - v1.z - w1.x * mContactConstraints[c].r1Friction.y + w1.y * mContactConstraints[c].r1Friction.x; Jv = deltaV.x * mContactConstraints[c].frictionVector2.x + deltaV.y * mContactConstraints[c].frictionVector2.y + deltaV.z * mContactConstraints[c].frictionVector2.z; // Compute the Lagrange multiplier lambda deltaLambda = -Jv * mContactConstraints[c].inverseFriction2Mass; frictionLimit = mContactConstraints[c].frictionCoefficient * sumPenetrationImpulse; lambdaTemp = mContactConstraints[c].friction2Impulse; mContactConstraints[c].friction2Impulse = std::max(-frictionLimit, std::min(mContactConstraints[c].friction2Impulse + deltaLambda, frictionLimit)); deltaLambda = mContactConstraints[c].friction2Impulse - lambdaTemp; // Compute the impulse P=J^T * lambda angularImpulseBody1.x = -mContactConstraints[c].r1CrossT2.x * deltaLambda; angularImpulseBody1.y = -mContactConstraints[c].r1CrossT2.y * deltaLambda; angularImpulseBody1.z = -mContactConstraints[c].r1CrossT2.z * deltaLambda; linearImpulseBody2.x = mContactConstraints[c].frictionVector2.x * deltaLambda; linearImpulseBody2.y = mContactConstraints[c].frictionVector2.y * deltaLambda; linearImpulseBody2.z = mContactConstraints[c].frictionVector2.z * deltaLambda; angularImpulseBody2.x = mContactConstraints[c].r2CrossT2.x * deltaLambda; angularImpulseBody2.y = mContactConstraints[c].r2CrossT2.y * deltaLambda; angularImpulseBody2.z = mContactConstraints[c].r2CrossT2.z * deltaLambda; // Update the velocities of the body 1 by applying the impulse P mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1].x -= mContactConstraints[c].massInverseBody1 * linearImpulseBody2.x; mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1].y -= mContactConstraints[c].massInverseBody1 * linearImpulseBody2.y; mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1].z -= mContactConstraints[c].massInverseBody1 * linearImpulseBody2.z; mRigidBodyComponents.mConstrainedAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1] += mContactConstraints[c].inverseInertiaTensorBody1 * angularImpulseBody1; // Update the velocities of the body 2 by applying the impulse P mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2].x += mContactConstraints[c].massInverseBody2 * linearImpulseBody2.x; mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2].y += mContactConstraints[c].massInverseBody2 * linearImpulseBody2.y; mRigidBodyComponents.mConstrainedLinearVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2].z += mContactConstraints[c].massInverseBody2 * linearImpulseBody2.z; mRigidBodyComponents.mConstrainedAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2] += mContactConstraints[c].inverseInertiaTensorBody2 * angularImpulseBody2; // ------ Twist friction constraint at the center of the contact manifol ------ // // Compute J*v deltaV = w2 - w1; Jv = deltaV.x * mContactConstraints[c].normal.x + deltaV.y * mContactConstraints[c].normal.y + deltaV.z * mContactConstraints[c].normal.z; deltaLambda = -Jv * (mContactConstraints[c].inverseTwistFrictionMass); frictionLimit = mContactConstraints[c].frictionCoefficient * sumPenetrationImpulse; lambdaTemp = mContactConstraints[c].frictionTwistImpulse; mContactConstraints[c].frictionTwistImpulse = std::max(-frictionLimit, std::min(mContactConstraints[c].frictionTwistImpulse + deltaLambda, frictionLimit)); deltaLambda = mContactConstraints[c].frictionTwistImpulse - lambdaTemp; // Compute the impulse P=J^T * lambda angularImpulseBody2.x = mContactConstraints[c].normal.x * deltaLambda; angularImpulseBody2.y = mContactConstraints[c].normal.y * deltaLambda; angularImpulseBody2.z = mContactConstraints[c].normal.z * deltaLambda; // Update the velocities of the body 1 by applying the impulse P mRigidBodyComponents.mConstrainedAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1] -= mContactConstraints[c].inverseInertiaTensorBody1 * angularImpulseBody2; // Update the velocities of the body 1 by applying the impulse P mRigidBodyComponents.mConstrainedAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2] += mContactConstraints[c].inverseInertiaTensorBody2 * angularImpulseBody2; // --------- Rolling resistance constraint at the center of the contact manifold --------- // if (mContactConstraints[c].rollingResistanceFactor > 0) { // Compute J*v const Vector3 JvRolling = w2 - w1; // Compute the Lagrange multiplier lambda Vector3 deltaLambdaRolling = mContactConstraints[c].inverseRollingResistance * (-JvRolling); decimal rollingLimit = mContactConstraints[c].rollingResistanceFactor * sumPenetrationImpulse; Vector3 lambdaTempRolling = mContactConstraints[c].rollingResistanceImpulse; mContactConstraints[c].rollingResistanceImpulse = clamp(mContactConstraints[c].rollingResistanceImpulse + deltaLambdaRolling, rollingLimit); deltaLambdaRolling = mContactConstraints[c].rollingResistanceImpulse - lambdaTempRolling; // Update the velocities of the body 1 by applying the impulse P mRigidBodyComponents.mConstrainedAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody1] -= mContactConstraints[c].inverseInertiaTensorBody1 * deltaLambdaRolling; // Update the velocities of the body 2 by applying the impulse P mRigidBodyComponents.mConstrainedAngularVelocities[mContactConstraints[c].rigidBodyComponentIndexBody2] += mContactConstraints[c].inverseInertiaTensorBody2 * deltaLambdaRolling; } } } // Compute the collision restitution factor from the restitution factor of each collider decimal ContactSolverSystem::computeMixedRestitutionFactor(Collider* collider1, Collider* collider2) const { decimal restitution1 = collider1->getMaterial().getBounciness(); decimal restitution2 = collider2->getMaterial().getBounciness(); // Return the largest restitution factor return (restitution1 > restitution2) ? restitution1 : restitution2; } // Compute the mixed friction coefficient from the friction coefficient of each collider decimal ContactSolverSystem::computeMixedFrictionCoefficient(Collider* collider1, Collider* collider2) const { // Use the geometric mean to compute the mixed friction coefficient return std::sqrt(collider1->getMaterial().getFrictionCoefficient() * collider2->getMaterial().getFrictionCoefficient()); } // Compute th mixed rolling resistance factor between two colliders inline decimal ContactSolverSystem::computeMixedRollingResistance(Collider* collider1, Collider* collider2) const { return decimal(0.5f) * (collider1->getMaterial().getRollingResistance() + collider2->getMaterial().getRollingResistance()); } // Store the computed impulses to use them to // warm start the solver at the next iteration void ContactSolverSystem::storeImpulses() { RP3D_PROFILE("ContactSolver::storeImpulses()", mProfiler); uint contactPointIndex = 0; // For each contact manifold for (uint c=0; c<mNbContactManifolds; c++) { for (short int i=0; i<mContactConstraints[c].nbContacts; i++) { mContactPoints[contactPointIndex].externalContact->setPenetrationImpulse(mContactPoints[contactPointIndex].penetrationImpulse); contactPointIndex++; } mContactConstraints[c].externalContactManifold->frictionImpulse1 = mContactConstraints[c].friction1Impulse; mContactConstraints[c].externalContactManifold->frictionImpulse2 = mContactConstraints[c].friction2Impulse; mContactConstraints[c].externalContactManifold->frictionTwistImpulse = mContactConstraints[c].frictionTwistImpulse; mContactConstraints[c].externalContactManifold->rollingResistanceImpulse = mContactConstraints[c].rollingResistanceImpulse; mContactConstraints[c].externalContactManifold->frictionVector1 = mContactConstraints[c].frictionVector1; mContactConstraints[c].externalContactManifold->frictionVector2 = mContactConstraints[c].frictionVector2; } } // Compute the two unit orthogonal vectors "t1" and "t2" that span the tangential friction plane // for a contact manifold. The two vectors have to be such that : t1 x t2 = contactNormal. void ContactSolverSystem::computeFrictionVectors(const Vector3& deltaVelocity, ContactManifoldSolver& contact) const { RP3D_PROFILE("ContactSolver::computeFrictionVectors()", mProfiler); assert(contact.normal.length() > decimal(0.0)); // Compute the velocity difference vector in the tangential plane decimal deltaVDotNormal = deltaVelocity.dot(contact.normal); Vector3 normalVelocity = deltaVDotNormal * contact.normal; Vector3 tangentVelocity(deltaVelocity.x - normalVelocity.x, deltaVelocity.y - normalVelocity.y, deltaVelocity.z - normalVelocity.z); // If the velocty difference in the tangential plane is not zero decimal lengthTangentVelocity = tangentVelocity.length(); if (lengthTangentVelocity > MACHINE_EPSILON) { // Compute the first friction vector in the direction of the tangent // velocity difference contact.frictionVector1 = tangentVelocity / lengthTangentVelocity; } else { // Get any orthogonal vector to the normal as the first friction vector contact.frictionVector1 = contact.normal.getOneUnitOrthogonalVector(); } // The second friction vector is computed by the cross product of the first // friction vector and the contact normal contact.frictionVector2 = contact.normal.cross(contact.frictionVector1).getUnit(); }
27,239
1,467
<reponame>Deluxe123123/aws-sdk-java-v2 /* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package software.amazon.awssdk.awscore.presigner; import java.time.Duration; import software.amazon.awssdk.annotations.SdkProtectedApi; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.utils.Validate; /** * The base class for all presign requests. */ @SdkPublicApi public abstract class PresignRequest { private final Duration signatureDuration; protected PresignRequest(DefaultBuilder<?> builder) { this.signatureDuration = Validate.paramNotNull(builder.signatureDuration, "signatureDuration"); } /** * Retrieves the duration for which this presigned request should be valid. After this time has * expired, attempting to use the presigned request will fail.  */ public Duration signatureDuration() { return this.signatureDuration; } /** * The base interface for all presign request builders. */ @SdkPublicApi public interface Builder { /** * Specifies the duration for which this presigned request should be valid. After this time has * expired, attempting to use the presigned request will fail.  */ Builder signatureDuration(Duration signatureDuration); /** * Build the presigned request, based on the configuration on this builder. */ PresignRequest build(); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } PresignRequest that = (PresignRequest) o; return signatureDuration.equals(that.signatureDuration); } @Override public int hashCode() { return signatureDuration.hashCode(); } @SdkProtectedApi protected abstract static class DefaultBuilder<B extends DefaultBuilder<B>> implements Builder { private Duration signatureDuration; protected DefaultBuilder() { } protected DefaultBuilder(PresignRequest request) { this.signatureDuration = request.signatureDuration; } @Override public B signatureDuration(Duration signatureDuration) { this.signatureDuration = signatureDuration; return thisBuilder(); } @SuppressWarnings("unchecked") private B thisBuilder() { return (B) this; } } }
1,065
14,668
// Copyright 2021 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "remoting/host/ipc_url_forwarder_configurator.h" #include "remoting/host/desktop_session_proxy.h" namespace remoting { IpcUrlForwarderConfigurator::IpcUrlForwarderConfigurator( scoped_refptr<DesktopSessionProxy> desktop_session_proxy) : desktop_session_proxy_(desktop_session_proxy) {} IpcUrlForwarderConfigurator::~IpcUrlForwarderConfigurator() = default; void IpcUrlForwarderConfigurator::IsUrlForwarderSetUp( IsUrlForwarderSetUpCallback callback) { desktop_session_proxy_->IsUrlForwarderSetUp(std::move(callback)); } void IpcUrlForwarderConfigurator::SetUpUrlForwarder( const SetUpUrlForwarderCallback& callback) { desktop_session_proxy_->SetUpUrlForwarder(callback); } } // namespace remoting
289
670
<filename>library/src/main/java/com/hw/photomovie/segment/ScaleSegment.java<gh_stars>100-1000 package com.hw.photomovie.segment; import android.graphics.Bitmap; import com.hw.photomovie.model.PhotoData; import com.hw.photomovie.opengl.BitmapTexture; import com.hw.photomovie.opengl.GLESCanvas; import com.hw.photomovie.segment.animation.SrcAnimation; import com.hw.photomovie.segment.animation.SrcScaleAnimation; import com.hw.photomovie.util.Utils; /** * Created by huangwei on 2015/5/30. */ public class ScaleSegment extends SingleBitmapSegment { private SrcAnimation mSrcAnimation; private float mFrom = 1f; private float mTo = 1f; public ScaleSegment(int duration, float from, float to) { this.mDuration = duration; mFrom = from; mTo = to; } public void onPrepare() { PhotoData photoData = this.getPhoto(0); if (photoData != null) { photoData.prepareData(4, new PluginListener(this)); } else { throw new NullPointerException("PhotoData is null"); } } protected void onDataPrepared() { mBitmapInfo.applyScaleType(mViewportRect); mSrcAnimation = new SrcScaleAnimation(mBitmapInfo.srcRect, mBitmapInfo.srcShowRect, mViewportRect, mFrom, mTo); mDataPrepared = true; } public void drawFrame(float segmentProgress) { } public void drawFrame(GLESCanvas canvas, float segmentRate) { if (!mDataPrepared) { return; } mSrcAnimation.update(segmentRate); if (this.mBitmapInfo != null && mBitmapInfo.bitmapTexture != null) { canvas.drawTexture(this.mBitmapInfo.bitmapTexture, this.mBitmapInfo.srcShowRect, this.mViewportRect); } } public int getRequiredPhotoNum() { return 1; } private class PluginListener extends PhotoData.SimpleOnDataLoadListener { private ScaleSegment segment; public PluginListener(ScaleSegment segment) { this.segment = segment; } @Override public void onDataLoaded(PhotoData photoData, Bitmap bitmap) { boolean success = false; if (Utils.isBitmapAvailable(bitmap)) { segment.mBitmapInfo = new BitmapInfo(); segment.mBitmapInfo.bitmapTexture = new BitmapTexture(bitmap); segment.mBitmapInfo.srcRect.set(0, 0, bitmap.getWidth(), bitmap.getHeight()); segment.mBitmapInfo.srcShowRect.set(0, 0, bitmap.getWidth(), bitmap.getHeight()); segment.onDataPrepared(); success = true; } if (segment.mOnSegmentPrepareListener != null) { segment.mOnSegmentPrepareListener.onSegmentPrepared(success); } } } }
1,199
4,826
<filename>LayoutManagerGroup/src/main/java/com/dingmouren/layoutmanagergroup/echelon/EchelonLayoutManager.java<gh_stars>1000+ package com.dingmouren.layoutmanagergroup.echelon; import android.content.Context; import android.support.v7.widget.RecyclerView; import android.view.View; import java.util.ArrayList; /** * Created by 钉某人 * github: https://github.com/DingMouRen * email: <EMAIL> */ public class EchelonLayoutManager extends RecyclerView.LayoutManager { private static final String TAG = "EchelonLayoutManager"; private Context mContext; private int mItemViewWidth; private int mItemViewHeight; private int mItemCount; private int mScrollOffset = Integer.MAX_VALUE; private float mScale = 0.9f; public EchelonLayoutManager(Context context) { this.mContext = context; mItemViewWidth = (int) (getHorizontalSpace() * 0.87f);//item的宽 mItemViewHeight = (int) (mItemViewWidth * 1.46f);//item的高 } @Override public RecyclerView.LayoutParams generateDefaultLayoutParams() { return new RecyclerView.LayoutParams(RecyclerView.LayoutParams.WRAP_CONTENT, RecyclerView.LayoutParams.WRAP_CONTENT); } @Override public void onLayoutChildren(RecyclerView.Recycler recycler, RecyclerView.State state) { if (state.getItemCount() == 0 || state.isPreLayout()) return; removeAndRecycleAllViews(recycler); mItemViewWidth = (int) (getHorizontalSpace() * 0.87f); mItemViewHeight = (int) (mItemViewWidth * 1.46f); mItemCount = getItemCount(); mScrollOffset = Math.min(Math.max(mItemViewHeight, mScrollOffset), mItemCount * mItemViewHeight); layoutChild(recycler); } @Override public int scrollVerticallyBy(int dy, RecyclerView.Recycler recycler, RecyclerView.State state) { int pendingScrollOffset = mScrollOffset + dy; mScrollOffset = Math.min(Math.max(mItemViewHeight, mScrollOffset + dy), mItemCount * mItemViewHeight); layoutChild(recycler); return mScrollOffset - pendingScrollOffset + dy; } @Override public boolean canScrollVertically() { return true; } private void layoutChild(RecyclerView.Recycler recycler) { if (getItemCount() == 0 ) return; int bottomItemPosition = (int) Math.floor(mScrollOffset / mItemViewHeight); int remainSpace = getVerticalSpace() - mItemViewHeight; int bottomItemVisibleHeight = mScrollOffset % mItemViewHeight; final float offsetPercentRelativeToItemView = bottomItemVisibleHeight * 1.0f / mItemViewHeight; ArrayList<ItemViewInfo> layoutInfos = new ArrayList<>(); for (int i = bottomItemPosition - 1, j = 1; i >= 0; i--, j++) { double maxOffset = (getVerticalSpace() - mItemViewHeight) / 2 * Math.pow(0.8, j); int start = (int) (remainSpace - offsetPercentRelativeToItemView * maxOffset); float scaleXY = (float) (Math.pow(mScale, j - 1) * (1 - offsetPercentRelativeToItemView * (1 - mScale))); float positonOffset = offsetPercentRelativeToItemView; float layoutPercent = start * 1.0f / getVerticalSpace(); ItemViewInfo info = new ItemViewInfo(start, scaleXY, positonOffset, layoutPercent); layoutInfos.add(0, info); remainSpace = (int) (remainSpace - maxOffset); if (remainSpace <= 0) { info.setTop((int) (remainSpace + maxOffset)); info.setPositionOffset(0); info.setLayoutPercent(info.getTop() / getVerticalSpace()); info.setScaleXY((float) Math.pow(mScale, j - 1)); ; break; } } if (bottomItemPosition < mItemCount) { final int start = getVerticalSpace() - bottomItemVisibleHeight; layoutInfos.add(new ItemViewInfo(start, 1.0f, bottomItemVisibleHeight * 1.0f / mItemViewHeight, start * 1.0f / getVerticalSpace()) .setIsBottom()); } else { bottomItemPosition = bottomItemPosition - 1;//99 } int layoutCount = layoutInfos.size(); final int startPos = bottomItemPosition - (layoutCount - 1); final int endPos = bottomItemPosition; final int childCount = getChildCount(); for (int i = childCount - 1; i >= 0; i--) { View childView = getChildAt(i); int pos = getPosition(childView); if (pos > endPos || pos < startPos) { removeAndRecycleView(childView, recycler); } } detachAndScrapAttachedViews(recycler); for (int i = 0; i < layoutCount; i++) { View view = recycler.getViewForPosition(startPos + i); ItemViewInfo layoutInfo = layoutInfos.get(i); addView(view); measureChildWithExactlySize(view); int left = (getHorizontalSpace() - mItemViewWidth) / 2; layoutDecoratedWithMargins(view, left, layoutInfo.getTop(), left + mItemViewWidth, layoutInfo.getTop() + mItemViewHeight); view.setPivotX(view.getWidth() / 2); view.setPivotY(0); view.setScaleX(layoutInfo.getScaleXY()); view.setScaleY(layoutInfo.getScaleXY()); } } /** * 测量itemview的确切大小 */ private void measureChildWithExactlySize(View child ) { final int widthSpec = View.MeasureSpec.makeMeasureSpec(mItemViewWidth, View.MeasureSpec.EXACTLY); final int heightSpec = View.MeasureSpec.makeMeasureSpec(mItemViewHeight, View.MeasureSpec.EXACTLY); child.measure(widthSpec, heightSpec); } /** * 获取RecyclerView的显示高度 */ public int getVerticalSpace() { return getHeight() - getPaddingTop() - getPaddingBottom(); } /** * 获取RecyclerView的显示宽度 */ public int getHorizontalSpace() { return getWidth() - getPaddingLeft() - getPaddingRight(); } }
2,507
306
// Copyright (C) 2011 - <NAME>. All rights reserved. package com.lambdaworks.crypto.test; import com.lambdaworks.codec.Base64; import com.lambdaworks.crypto.SCryptUtil; import org.junit.Assert; import org.junit.Test; import static org.junit.Assert.*; public class SCryptUtilTest { String passwd = "secret"; @Test public void scrypt() { int N = 16384; int r = 8; int p = 1; String hashed = SCryptUtil.scrypt(passwd, N, r, p); String[] parts = hashed.split("\\$"); assertEquals(5, parts.length); assertEquals("s0", parts[1]); Assert.assertEquals(16, Base64.decode(parts[3].toCharArray()).length); assertEquals(32, Base64.decode(parts[4].toCharArray()).length); int params = Integer.valueOf(parts[2], 16); assertEquals(N, (int) Math.pow(2, params >> 16 & 0xffff)); assertEquals(r, params >> 8 & 0xff); assertEquals(p, params >> 0 & 0xff); } @Test public void check() { String hashed = SCryptUtil.scrypt(passwd, 16384, 8, 1); assertTrue(SCryptUtil.check(passwd, hashed)); assertFalse(SCryptUtil.check("s3cr3t", hashed)); } @Test public void format_0_rp_max() throws Exception { int N = 2; int r = 255; int p = 255; String hashed = SCryptUtil.scrypt(passwd, N, r, p); assertTrue(SCryptUtil.check(passwd, hashed)); String[] parts = hashed.split("\\$"); int params = Integer.valueOf(parts[2], 16); assertEquals(N, (int) Math.pow(2, params >>> 16 & 0xffff)); assertEquals(r, params >> 8 & 0xff); assertEquals(p, params >> 0 & 0xff); } }
764
348
{"nom":"Beaufort","circ":"2ème circonscription","dpt":"Savoie","inscrits":1726,"abs":872,"votants":854,"blancs":19,"nuls":3,"exp":832,"res":[{"nuance":"LR","nom":"<NAME>","voix":314},{"nuance":"DIV","nom":"<NAME>","voix":209},{"nuance":"FI","nom":"Mme <NAME>","voix":102},{"nuance":"FN","nom":"M. <NAME>","voix":68},{"nuance":"ECO","nom":"M. <NAME>","voix":65},{"nuance":"COM","nom":"Mme <NAME>","voix":17},{"nuance":"DIV","nom":"Mme <NAME>","voix":17},{"nuance":"ECO","nom":"Mme <NAME>","voix":13},{"nuance":"DLF","nom":"Mme <NAME>","voix":11},{"nuance":"DIV","nom":"Mme <NAME>","voix":8},{"nuance":"DIV","nom":"Mme <NAME>","voix":7},{"nuance":"EXG","nom":"M. <NAME>","voix":1},{"nuance":"DIV","nom":"<NAME>","voix":0}]}
293
4,879
#include "search/highlighting.hpp" using namespace std; namespace { // Makes continuous range for tokens and prefix. template <class Iter, class Value> class CombinedIterator { Iter m_cur; Iter m_end; Value const * m_val; public: CombinedIterator(Iter cur, Iter end, Value const * val) : m_cur(cur), m_end(end), m_val(val) {} Value const & operator*() const { ASSERT(m_val != nullptr || m_cur != m_end, ("dereferencing of an empty iterator")); if (m_cur != m_end) return *m_cur; return *m_val; } CombinedIterator & operator++() { if (m_cur != m_end) ++m_cur; else m_val = nullptr; return *this; } bool operator==(CombinedIterator const & other) const { return m_val == other.m_val && m_cur == other.m_cur; } bool operator!=(CombinedIterator const & other) const { return !(*this == other); } }; } // namespace namespace search { void HighlightResult(QueryTokens const & tokens, strings::UniString const & prefix, Result & res) { using Iter = QueryTokens::const_iterator; using CombinedIter = CombinedIterator<Iter, strings::UniString>; CombinedIter beg(tokens.begin(), tokens.end(), prefix.empty() ? nullptr : &prefix); CombinedIter end(tokens.end() /* cur */, tokens.end() /* end */, nullptr); auto assignHighlightRange = [&](pair<uint16_t, uint16_t> const & range) { res.AddHighlightRange(range); }; SearchStringTokensIntersectionRanges(res.GetString(), beg, end, assignHighlightRange); } } // namespace search
530
1,546
<gh_stars>1000+ package org.libpag; import android.graphics.Bitmap; import android.util.Log; import java.nio.ByteBuffer; public class TraceImage { public static void Trace(String tag, ByteBuffer byteBuffer, int width, int height) { Bitmap bitmap = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888); bitmap.copyPixelsFromBuffer(byteBuffer); Log.i(tag, "Image(width = " + bitmap.getWidth() + ", height = " + bitmap.getHeight() + ")"); } }
182
459
<filename>app/src/main/java/com/knight/arch/ui/fragment/SettingsFragment.java package com.knight.arch.ui.fragment; import android.app.Activity; import android.app.AlertDialog; import android.app.ProgressDialog; import android.content.DialogInterface; import android.content.Intent; import android.net.Uri; import android.os.Bundle; import android.preference.Preference; import android.preference.PreferenceFragment; import android.widget.Toast; import com.knight.arch.BuildConfig; import com.knight.arch.R; import com.knight.arch.api.FirService; import com.knight.arch.model.FirVersion; import com.knight.arch.module.Injector; import com.umeng.analytics.MobclickAgent; import javax.inject.Inject; import de.psdev.licensesdialog.LicensesDialog; import rx.Subscriber; import rx.android.app.AppObservable; /** * @author andyiac * @date 15-9-16 * @web http://blog.andyiac.com * @github https://github.com/andyiac */ public class SettingsFragment extends PreferenceFragment { private boolean injected = false; @Inject FirService firService; @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); getActivity(); addPreferencesFromResource(R.xml.preference); findPreference(getString(R.string.pref_build_time)) .setSummary(BuildConfig.BUILD_TIME); findPreference(getString(R.string.open_source_licence)) .setOnPreferenceClickListener(new Preference.OnPreferenceClickListener() { @Override public boolean onPreferenceClick(Preference preference) { new LicensesDialog.Builder(getActivity()) .setNotices(R.raw.notices) .setIncludeOwnLicense(true) .setThemeResourceId(R.style.custom_theme) .build() .show(); return false; } }); Preference checkVersionPref = findPreference(getString(R.string.pref_check_version)); checkVersionPref.setOnPreferenceClickListener(new Preference.OnPreferenceClickListener() { @Override public boolean onPreferenceClick(Preference preference) { checkVersion(); return true; } }); checkVersionPref.setSummary(getString(R.string.s_check_version, BuildConfig.VERSION_NAME)); } public void onResume() { super.onResume(); MobclickAgent.onPageStart("SettingsFragment"); //统计页面 } public void onPause() { super.onPause(); MobclickAgent.onPageEnd("SettingsFragment"); } private void checkVersion() { final ProgressDialog dialog = new ProgressDialog(getActivity()); dialog.setMessage(getString(R.string.msg_checking_version)); dialog.show(); AppObservable.bindFragment(this, firService.checkVersion(BuildConfig.FIR_APPLICATION_ID, BuildConfig.FIR_API_TOKEN)) .subscribe(new Subscriber<FirVersion>() { @Override public void onCompleted() { } @Override public void onError(Throwable e) { Toast.makeText(getActivity(), e.getMessage(), Toast.LENGTH_SHORT).show(); dialog.dismiss(); } @Override public void onNext(FirVersion firVersion) { if (firVersion.getVersion() > BuildConfig.VERSION_CODE) { dialog.dismiss(); showNewVersionFoundDialog(firVersion); } else { Toast.makeText(getActivity(), R.string.msg_this_is_latest_version, Toast.LENGTH_SHORT).show(); dialog.dismiss(); } } }); } private void showNewVersionFoundDialog(final FirVersion newFirVersion) { new AlertDialog.Builder(getActivity()) .setTitle(R.string.title_new_version_found) .setMessage(getString(R.string.msg_new_version_found, newFirVersion.getVersionShort(), newFirVersion.getVersion(), newFirVersion.getChangeLog())) .setPositiveButton(R.string.btn_dialog_update, new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { Intent downloadPageIntent = new Intent(Intent.ACTION_VIEW); downloadPageIntent.setData(Uri.parse(newFirVersion.getUpdateUrl())); getActivity().startActivity(downloadPageIntent); } }) .setNegativeButton(android.R.string.cancel, null) .create() .show(); } @Override public void onAttach(Activity activity) { super.onAttach(activity); if (!injected) { injected = true; Injector injector = (Injector) getActivity(); injector.inject(this); } } }
2,525
412
<reponame>mauguignard/cbmc<filename>regression/goto-instrument/generate-function-body-havoc-params-simple-null/main.c #include <assert.h> #include <stdlib.h> void func(int *p); void main() { int *p; p = NULL; func(p); }
100
336
<filename>system/apps/109_radsense/source/radsens/radSens1v2.h #ifndef _RADSENS1V2_H_ #define _RADSENS1V2_H_ #include <stdint.h> #if defined(ARDUINO) #include <Arduino.h> #include <Wire.h> #elif defined(__arm__) #include <wiringPiI2C.h> #include <stdio.h> #endif #include "defines.h" class ClimateGuard_RadSens1v2 { private: #if defined(__arm__) int _fd = 0; #endif uint8_t _sensor_address; uint8_t _data[RS_TOTAL_RG] = { 0 }; uint32_t _pulse_cnt; bool updateData(); bool hasData(); public: ClimateGuard_RadSens1v2(uint8_t sensorAddress); ~ClimateGuard_RadSens1v2(); //Methods for get or set data bool radSens_init(); bool getData(); uint8_t getChipId(); uint8_t getFirmwareVersion(); float getRadIntensyDyanmic(); float getRadIntensyStatic(); uint32_t getNumberOfPulses(); uint8_t getSensorAddress(); bool getHVGeneratorState(); uint8_t getSensitivity(); bool setHVGeneratorState(bool state); bool setSensitivity(uint8_t sens); }; #endif // _RADSENS1V2_H_
463
852
#include "CondFormats/OptAlignObjects/interface/PXsensors.h" PXsensors::PXsensors() {} PXsensors::~PXsensors() {}
54
575
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef COMPONENTS_PASSWORD_MANAGER_CORE_BROWSER_BROWSER_SAVE_PASSWORD_PROGRESS_LOGGER_H_ #define COMPONENTS_PASSWORD_MANAGER_CORE_BROWSER_BROWSER_SAVE_PASSWORD_PROGRESS_LOGGER_H_ #include <string> #include "base/macros.h" #include "components/autofill/core/common/mojom/autofill_types.mojom.h" #include "components/autofill/core/common/save_password_progress_logger.h" #include "url/gurl.h" namespace autofill { class FormStructure; class LogManager; } namespace password_manager { struct PasswordForm; // This is the SavePasswordProgressLogger specialization for the browser code, // where the LogManager can be directly called. class BrowserSavePasswordProgressLogger : public autofill::SavePasswordProgressLogger { public: explicit BrowserSavePasswordProgressLogger( const autofill::LogManager* log_manager); ~BrowserSavePasswordProgressLogger() override; // Browser-specific addition to the base class' Log* methods. The input is // sanitized and passed to SendLog for display. void LogFormStructure(StringID label, const autofill::FormStructure& form); // Browser-specific addition to the base class' Log* methods. The input is // sanitized and passed to SendLog for display. void LogSuccessiveOrigins(StringID label, const GURL& old_origin, const GURL& new_origin); // Browser-specific addition to the base class' Log* methods. The input is // passed to SendLog for display. void LogString(StringID label, const std::string& s); // Log a password successful submission event. void LogSuccessfulSubmissionIndicatorEvent( autofill::mojom::SubmissionIndicatorEvent event); void LogPasswordForm(StringID label, const PasswordForm& form); protected: // autofill::SavePasswordProgressLogger: void SendLog(const std::string& log) override; private: // The LogManager to which logs can be sent for display. The log_manager must // outlive this logger. const autofill::LogManager* const log_manager_; // Returns string representation for |FormStructure|. std::string FormStructureToFieldsLogString( const autofill::FormStructure& form); // Returns string representation of password attributes for |FormStructure|. std::string FormStructurePasswordAttributesLogString( const autofill::FormStructure& form); // Returns the string representation of a password attribute. std::string PasswordAttributeLogString(StringID string_id, const std::string& attribute_value); // Returns the string representation of a binary password attribute. std::string BinaryPasswordAttributeLogString(StringID string_id, bool attribute_value); DISALLOW_COPY_AND_ASSIGN(BrowserSavePasswordProgressLogger); }; } // namespace password_manager #endif // COMPONENTS_PASSWORD_MANAGER_CORE_BROWSER_BROWSER_SAVE_PASSWORD_PROGRESS_LOGGER_H_
1,029
841
package cgeo.geocaching.connector.trackable; import cgeo.geocaching.CgeoApplication; import cgeo.geocaching.R; import cgeo.geocaching.log.LogEntry; import cgeo.geocaching.log.LogType; import cgeo.geocaching.models.Image; import cgeo.geocaching.models.Trackable; import cgeo.geocaching.utils.Log; import cgeo.geocaching.utils.SynchronizedDateFormat; import androidx.annotation.NonNull; import androidx.annotation.Nullable; import java.io.IOException; import java.io.StringReader; import java.text.ParseException; import java.util.ArrayList; import java.util.Collections; import java.util.Date; import java.util.List; import java.util.Locale; import java.util.TimeZone; import javax.xml.parsers.ParserConfigurationException; import javax.xml.parsers.SAXParser; import javax.xml.parsers.SAXParserFactory; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.tuple.ImmutablePair; import org.xml.sax.Attributes; import org.xml.sax.InputSource; import org.xml.sax.SAXException; import org.xml.sax.XMLReader; import org.xml.sax.helpers.DefaultHandler; import org.xmlpull.v1.XmlPullParser; import org.xmlpull.v1.XmlPullParserException; import org.xmlpull.v1.XmlPullParserFactory; class GeokretyParser { private GeokretyParser() { // Utility class } private static class GeokretyHandler extends DefaultHandler { private static final SynchronizedDateFormat DATE_FORMAT = new SynchronizedDateFormat("yyyy-MM-dd kk:mm", TimeZone.getTimeZone("UTC"), Locale.US); private static final SynchronizedDateFormat DATE_FORMAT_SECONDS = new SynchronizedDateFormat("yyyy-MM-dd kk:mm:ss", TimeZone.getTimeZone("UTC"), Locale.US); private final List<Trackable> trackables = new ArrayList<>(); private Trackable trackable; private LogEntry.Builder logEntryBuilder; private final List<LogEntry> logsEntries = new ArrayList<>(); private Image.Builder imageBuilder; private boolean isMultiline = false; private boolean isInMoves = false; private boolean isInImages = false; private boolean isInComments = false; private String content; @NonNull public final List<Trackable> getTrackables() { return trackables; } @Override public final void startElement(final String uri, final String localName, final String qName, final Attributes attributes) throws SAXException { content = ""; if (localName.equalsIgnoreCase("geokret")) { trackable = new Trackable(); trackable.forceSetBrand(TrackableBrand.GEOKRETY); trackables.add(trackable); trackable.setSpottedType(Trackable.SPOTTED_OWNER); } try { if (localName.equalsIgnoreCase("geokret")) { final String kretyId = attributes.getValue("id"); if (StringUtils.isNumeric(kretyId)) { trackable.setGeocode(GeokretyConnector.geocode(Integer.parseInt(kretyId))); } final String distance = attributes.getValue("dist"); if (StringUtils.isNotBlank(distance)) { trackable.setDistance(Float.parseFloat(distance)); } final String trackingcode = attributes.getValue("nr"); if (StringUtils.isNotBlank(trackingcode)) { trackable.setTrackingcode(trackingcode); } final String kretyType = attributes.getValue("type"); if (StringUtils.isNotBlank(kretyType)) { trackable.setType(getType(Integer.parseInt(kretyType))); } final String kretyState = attributes.getValue("state"); if (StringUtils.isNotBlank(kretyState)) { trackable.setSpottedType(getSpottedType(Integer.parseInt(kretyState))); } final String waypointCode = attributes.getValue("waypoint"); if (StringUtils.isNotBlank(waypointCode)) { trackable.setSpottedName(waypointCode); } final String imageName = attributes.getValue("image"); if (StringUtils.isNotBlank(imageName)) { trackable.setImage("http://geokrety.org/obrazki/" + imageName); } final String ownerId = attributes.getValue("owner_id"); if (StringUtils.isNotBlank(ownerId)) { trackable.setOwner(CgeoApplication.getInstance().getString(R.string.init_geokrety_userid, ownerId)); } final String missing = attributes.getValue("missing"); if (StringUtils.isNotBlank(missing)) { trackable.setMissing("1".equalsIgnoreCase(missing)); } } if (localName.equalsIgnoreCase("owner")) { final String ownerId = attributes.getValue("id"); if (StringUtils.isNotBlank(ownerId)) { trackable.setOwner(CgeoApplication.getInstance().getString(R.string.init_geokrety_userid, ownerId)); } } if (localName.equalsIgnoreCase("type")) { final String kretyType = attributes.getValue("id"); if (StringUtils.isNotBlank(kretyType)) { trackable.setType(getType(Integer.parseInt(kretyType))); } } if (localName.equalsIgnoreCase("description")) { isMultiline = true; } // TODO: latitude/longitude could be parsed, but trackable doesn't support it, yet... //if (localName.equalsIgnoreCase("position")) { //final String latitude = attributes.getValue("latitude"); //if (StringUtils.isNotBlank(latitude) { // trackable.setLatitude(latitude); //} //final String longitude = attributes.getValue("longitude"); //if (StringUtils.isNotBlank(longitude) { // trackable.setLongitude(longitude); //} //} if (localName.equalsIgnoreCase("move")) { logEntryBuilder = new LogEntry.Builder(); isInMoves = true; } if (localName.equalsIgnoreCase("date")) { final String movedDate = attributes.getValue("moved"); if (StringUtils.isNotBlank(movedDate)) { logEntryBuilder.setDate(DATE_FORMAT.parse(movedDate).getTime()); } } if (localName.equalsIgnoreCase("user") && !isInComments) { final String userId = attributes.getValue("id"); if (StringUtils.isNotBlank(userId)) { logEntryBuilder.setAuthor(CgeoApplication.getInstance().getString(R.string.init_geokrety_userid, userId)); } } if (localName.equalsIgnoreCase("comments")) { isInComments = true; } if (localName.equalsIgnoreCase("comment")) { isMultiline = true; } if (localName.equalsIgnoreCase("logtype")) { final String logtype = attributes.getValue("id"); logEntryBuilder.setLogType(getLogType(Integer.parseInt(logtype))); } if (localName.equalsIgnoreCase("images")) { isInImages = true; } if (localName.equalsIgnoreCase("image")) { imageBuilder = new Image.Builder(); final String title = attributes.getValue("title"); if (StringUtils.isNotBlank(title)) { imageBuilder.setTitle(title); } } } catch (final ParseException | NumberFormatException e) { Log.e("Parsing GeoKret", e); } } @Override public final void endElement(final String uri, final String localName, final String qName) throws SAXException { try { if (localName.equalsIgnoreCase("geokret")) { if (StringUtils.isNotEmpty(content) && StringUtils.isBlank(trackable.getName())) { trackable.setName(content); } // This is a special case. Deal it at the end of the "geokret" parsing (xml close) if (trackable.getSpottedType() == Trackable.SPOTTED_USER) { if (trackable.getDistance() == 0) { trackable.setSpottedType(Trackable.SPOTTED_OWNER); trackable.setSpottedName(trackable.getOwner()); } else { trackable.setSpottedName(getLastSpottedUsername(logsEntries)); } } trackable.setLogs(logsEntries); } if (localName.equalsIgnoreCase("name")) { trackable.setName(content); } if (localName.equalsIgnoreCase("description")) { trackable.setDetails(content); isMultiline = false; } if (localName.equalsIgnoreCase("owner")) { trackable.setOwner(content); } if (StringUtils.isNotBlank(content) && localName.equalsIgnoreCase("datecreated")) { final Date date = DATE_FORMAT_SECONDS.parse(content); trackable.setReleased(date); } if (StringUtils.isNotBlank(content) && !isInMoves && ( localName.equalsIgnoreCase("distancetravelled") || localName.equalsIgnoreCase("distancetraveled") )) { trackable.setDistance(Float.parseFloat(content)); } if (localName.equalsIgnoreCase("images")) { isInImages = false; } if (StringUtils.isNotBlank(content) && localName.equalsIgnoreCase("image")) { if (isInMoves) { imageBuilder.setUrl("http://geokrety.org/obrazki/" + content); logEntryBuilder.addLogImage(imageBuilder.build()); } else if (!isInImages) { // TODO: Trackable doesn't support multiple image yet, so ignore other image tags if we're not in moves trackable.setImage("http://geokrety.org/obrazki/" + content); } } if (StringUtils.isNotBlank(content) && localName.equalsIgnoreCase("state")) { trackable.setSpottedType(getSpottedType(Integer.parseInt(content))); } if (StringUtils.isNotBlank(content) && localName.equalsIgnoreCase("missing")) { trackable.setMissing("1".equalsIgnoreCase(content)); } if (StringUtils.isNotBlank(content) && localName.equalsIgnoreCase("waypoint")) { trackable.setSpottedName(content); } if (StringUtils.isNotBlank(content) && localName.equalsIgnoreCase("user") && !isInComments) { logEntryBuilder.setAuthor(content); } if (localName.equalsIgnoreCase("move")) { isInMoves = false; logsEntries.add(logEntryBuilder.build()); } if (localName.equalsIgnoreCase("comments")) { isInComments = false; } if (localName.equalsIgnoreCase("comment") && !isInComments) { isMultiline = false; logEntryBuilder.setLog(content); } if (StringUtils.isNotBlank(content) && localName.equalsIgnoreCase("wpt")) { logEntryBuilder.setCacheGeocode(content); logEntryBuilder.setCacheName(content); } if (localName.equalsIgnoreCase("id")) { logEntryBuilder.setId(Integer.parseInt(content)); } } catch (final ParseException | NumberFormatException e) { Log.e("Parsing GeoKret", e); } } @Override public final void characters(final char[] ch, final int start, final int length) throws SAXException { final String text = new String(ch, start, length); if (isMultiline) { content = StringUtils.join(content, text.replaceAll("(\r\n|\n)", "<br />")); } else { content = StringUtils.trim(text); } } /** * Convert states from GK to c:geo spotted types. See: http://geokrety.org/api.php * * @param state * the GK state read from xml * @return * The spotted types as defined in Trackables */ private static int getSpottedType(final int state) { switch (state) { case 0: // Dropped case 3: // Seen in return Trackable.SPOTTED_CACHE; case 1: // Grabbed from case 5: // Visiting return Trackable.SPOTTED_USER; case 4: // Archived return Trackable.SPOTTED_ARCHIVED; //case 2: // A comment (however this case doesn't exists in db) } return Trackable.SPOTTED_UNKNOWN; } /** * Convert states from GK to c:geo spotted types. * * @param type * the GK Log type * @return * The LogType */ private static LogType getLogType(final int type) { switch (type) { case 0: // Dropped return LogType.PLACED_IT; case 1: // Grabbed from return LogType.GRABBED_IT; case 2: // A comment return LogType.NOTE; case 3: // Seen in return LogType.DISCOVERED_IT; case 4: // Archived return LogType.ARCHIVE; case 5: // Visiting return LogType.VISIT; } return LogType.UNKNOWN; } } @NonNull public static List<Trackable> parse(final InputSource page) { if (page != null) { try { // Create a new instance of the SAX parser final SAXParserFactory saxPF = SAXParserFactory.newInstance(); final SAXParser saxP = saxPF.newSAXParser(); final XMLReader xmlR = saxP.getXMLReader(); // Create the Handler to handle each of the XML tags. final GeokretyHandler gkXMLHandler = new GeokretyHandler(); xmlR.setContentHandler(gkXMLHandler); xmlR.parse(page); return gkXMLHandler.getTrackables(); } catch (final SAXException | IOException | ParserConfigurationException e) { Log.w("Cannot parse GeoKrety", e); } } return Collections.emptyList(); } private static class GeokretyRuchyXmlParser { private int gkid; private final List<String> errors; private String text; GeokretyRuchyXmlParser() { errors = new ArrayList<>(); gkid = 0; } public List<String> getErrors() { return errors; } int getGkid() { return gkid; } @NonNull public List<String> parse(final String page) { try { final XmlPullParserFactory factory = XmlPullParserFactory.newInstance(); factory.setNamespaceAware(true); final XmlPullParser parser = factory.newPullParser(); parser.setInput(new StringReader(page)); int eventType = parser.getEventType(); while (eventType != XmlPullParser.END_DOCUMENT) { final String tagname = parser.getName(); switch (eventType) { case XmlPullParser.START_TAG: if (tagname.equalsIgnoreCase("geokret")) { gkid = Integer.parseInt(parser.getAttributeValue(null, "id")); } break; case XmlPullParser.TEXT: text = parser.getText(); break; case XmlPullParser.END_TAG: if (tagname.equalsIgnoreCase("error") && StringUtils.isNotBlank(text)) { errors.add(text); } break; default: break; } eventType = parser.next(); } } catch (XmlPullParserException | IOException e) { Log.e("GeokretyRuchyXmlParser: Error Parsing geokret", e); errors.add(CgeoApplication.getInstance().getString(R.string.geokrety_parsing_failed)); } return errors; } } @Nullable protected static String getType(final int type) { switch (type) { case 0: return CgeoApplication.getInstance().getString(R.string.geokret_type_traditional); case 1: return CgeoApplication.getInstance().getString(R.string.geokret_type_book_or_media); case 2: return CgeoApplication.getInstance().getString(R.string.geokret_type_human); case 3: return CgeoApplication.getInstance().getString(R.string.geokret_type_coin); case 4: return CgeoApplication.getInstance().getString(R.string.geokret_type_post); } return null; } @Nullable static ImmutablePair<Integer, List<String>> parseResponse(final String page) { if (page != null) { try { final GeokretyRuchyXmlParser parser = new GeokretyRuchyXmlParser(); parser.parse(page); return new ImmutablePair<>(parser.getGkid(), parser.getErrors()); } catch (final Exception e) { Log.w("Cannot parse response for the GeoKret", e); } } return null; } /** * Determine from the newest logs (ignoring Notes) if the GK is spotted * in the hand of someone. * * @param logsEntries * the log entries to analyze * @return * The spotted username (or unknown) */ static String getLastSpottedUsername(final List<LogEntry> logsEntries) { for (final LogEntry log: logsEntries) { final LogType logType = log.getType(); if (logType == LogType.GRABBED_IT || logType == LogType.VISIT) { return log.author; } if (logType != LogType.NOTE) { break; } } return CgeoApplication.getInstance().getString(R.string.user_unknown); } }
10,440
4,054
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #pragma once #include "fieldvalue.h" namespace document { /** * Represent the value in a field of type 'bool' which can be either true or false. **/ class BoolFieldValue : public FieldValue { bool _value; bool _altered; public: BoolFieldValue(bool value=false); ~BoolFieldValue() override; void accept(FieldValueVisitor &visitor) override { visitor.visit(*this); } void accept(ConstFieldValueVisitor &visitor) const override { visitor.visit(*this); } FieldValue *clone() const override; int compare(const FieldValue &rhs) const override; void printXml(XmlOutputStream &out) const override; void print(std::ostream &out, bool verbose, const std::string &indent) const override; const DataType *getDataType() const override; bool hasChanged() const override; bool getValue() const { return _value; } void setValue(bool v) { _value = v; } FieldValue &assign(const FieldValue &rhs) override; char getAsByte() const override; int32_t getAsInt() const override; int64_t getAsLong() const override; float getAsFloat() const override; double getAsDouble() const override; vespalib::string getAsString() const override; BoolFieldValue& operator=(vespalib::stringref) override; BoolFieldValue& operator=(int32_t) override; BoolFieldValue& operator=(int64_t) override; BoolFieldValue& operator=(float) override; BoolFieldValue& operator=(double) override; DECLARE_IDENTIFIABLE(BoolFieldValue); }; }
532
471
from corehq.project_limits.rate_limiter import RateDefinition STANDARD_RATIO = RateDefinition( per_week=115, per_day=23, per_hour=3, per_minute=0.07, per_second=0.005, ).times(1 / 23) def get_standard_ratio_rate_definition(events_per_day): return STANDARD_RATIO.times(events_per_day)
132
2,305
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. _multi_phase = False def enable_multi_phase(): global _multi_phase _multi_phase = True def multi_phase_enabled(): return _multi_phase
71
449
<gh_stars>100-1000 _base_ = [ '../rotated_retinanet/rotated_retinanet_obb_r50_fpn_1x_dota_le135.py' ] model = dict( bbox_head=dict( reg_decoded_bbox=True, loss_bbox=dict(type='GDLoss', loss_type='gwd', loss_weight=5.0)))
129
13,249
/* * Copyright 2015-2020 The OpenZipkin Authors * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package zipkin2.storage.cassandra; import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.Version; import com.datastax.oss.driver.api.core.metadata.Metadata; import com.datastax.oss.driver.api.core.metadata.Node; import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata; import java.util.Collections; import java.util.LinkedHashMap; import java.util.Map; import java.util.Optional; import java.util.UUID; import org.junit.Test; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class SchemaTest { @Test public void ensureKeyspaceMetadata_failsWhenVersionLessThan3_11_3() { CqlSession session = mock(CqlSession.class); Metadata metadata = mock(Metadata.class); Node node = mock(Node.class); when(session.getMetadata()).thenReturn(metadata); when(metadata.getNodes()).thenReturn(Collections.singletonMap( UUID.fromString("11111111-1111-1111-1111-111111111111"), node )); when(node.getCassandraVersion()).thenReturn(Version.parse("3.11.2")); assertThatThrownBy(() -> Schema.ensureKeyspaceMetadata(session, "zipkin2")) .isInstanceOf(RuntimeException.class) .hasMessage( "Node 11111111-1111-1111-1111-111111111111 is running Cassandra 3.11.2, but minimum version is 3.11.3"); } @Test public void ensureKeyspaceMetadata_failsWhenOneVersionLessThan3_11_3() { CqlSession session = mock(CqlSession.class); Metadata metadata = mock(Metadata.class); Node node1 = mock(Node.class); Node node2 = mock(Node.class); Map<UUID, Node> nodes = new LinkedHashMap<>(); nodes.put(UUID.fromString("11111111-1111-1111-1111-111111111111"), node1); nodes.put(UUID.fromString("22222222-2222-2222-2222-222222222222"), node2); when(session.getMetadata()).thenReturn(metadata); when(metadata.getNodes()).thenReturn(nodes); when(node1.getCassandraVersion()).thenReturn(Version.parse("3.11.3")); when(node2.getCassandraVersion()).thenReturn(Version.parse("3.11.2")); assertThatThrownBy(() -> Schema.ensureKeyspaceMetadata(session, "zipkin2")) .isInstanceOf(RuntimeException.class) .hasMessage( "Node 22222222-2222-2222-2222-222222222222 is running Cassandra 3.11.2, but minimum version is 3.11.3"); } @Test public void ensureKeyspaceMetadata_passesWhenVersion3_11_3AndKeyspaceMetadataIsNotNull() { CqlSession session = mock(CqlSession.class); Metadata metadata = mock(Metadata.class); Node node = mock(Node.class); KeyspaceMetadata keyspaceMetadata = mock(KeyspaceMetadata.class); when(session.getMetadata()).thenReturn(metadata); when(metadata.getNodes()).thenReturn(Collections.singletonMap( UUID.fromString("11111111-1111-1111-1111-111111111111"), node )); when(node.getCassandraVersion()).thenReturn(Version.parse("3.11.3")); when(metadata.getKeyspace("zipkin2")).thenReturn(Optional.of(keyspaceMetadata)); assertThat(Schema.ensureKeyspaceMetadata(session, "zipkin2")) .isSameAs(keyspaceMetadata); } @Test public void ensureKeyspaceMetadata_passesWhenVersion3_11_4AndKeyspaceMetadataIsNotNull() { CqlSession session = mock(CqlSession.class); Metadata metadata = mock(Metadata.class); Node node = mock(Node.class); KeyspaceMetadata keyspaceMetadata = mock(KeyspaceMetadata.class); when(session.getMetadata()).thenReturn(metadata); when(metadata.getNodes()).thenReturn(Collections.singletonMap( UUID.fromString("11111111-1111-1111-1111-111111111111"), node )); when(node.getCassandraVersion()).thenReturn(Version.parse("3.11.4")); when(metadata.getKeyspace("zipkin2")).thenReturn(Optional.of(keyspaceMetadata)); assertThat(Schema.ensureKeyspaceMetadata(session, "zipkin2")) .isSameAs(keyspaceMetadata); } @Test public void ensureKeyspaceMetadata_failsWhenKeyspaceMetadataIsNotNull() { CqlSession session = mock(CqlSession.class); Metadata metadata = mock(Metadata.class); Node node = mock(Node.class); when(session.getMetadata()).thenReturn(metadata); when(metadata.getNodes()).thenReturn(Collections.singletonMap( UUID.fromString("11111111-1111-1111-1111-111111111111"), node )); when(node.getCassandraVersion()).thenReturn(Version.parse("3.11.3")); assertThatThrownBy(() -> Schema.ensureKeyspaceMetadata(session, "zipkin2")) .isInstanceOf(RuntimeException.class) .hasMessageStartingWith("Cannot read keyspace metadata for keyspace"); } String schemaWithReadRepair = "" + "CREATE TABLE IF NOT EXISTS zipkin2.remote_service_by_service (\n" + " service text,\n" + " remote_service text,\n" + " PRIMARY KEY (service, remote_service)\n" + ")\n" + " WITH compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'unchecked_tombstone_compaction': 'true', 'tombstone_threshold': '0.2'}\n" + " AND caching = {'rows_per_partition': 'ALL'}\n" + " AND default_time_to_live = 259200\n" + " AND gc_grace_seconds = 3600\n" + " AND read_repair_chance = 0\n" + " AND dclocal_read_repair_chance = 0\n" + " AND speculative_retry = '95percentile'\n" + " AND comment = 'Secondary table for looking up remote service names by a service name.';"; @Test public void reviseCql_leaves_read_repair_chance_on_v3() { assertThat(Schema.reviseCQL(Version.parse("3.11.9"), schemaWithReadRepair)) .isSameAs(schemaWithReadRepair); } @Test public void reviseCql_removes_dclocal_read_repair_chance_on_v4() { assertThat(Schema.reviseCQL(Version.V4_0_0, schemaWithReadRepair)) // literal used to show newlines etc are in-tact .isEqualTo("" + "CREATE TABLE IF NOT EXISTS zipkin2.remote_service_by_service (\n" + " service text,\n" + " remote_service text,\n" + " PRIMARY KEY (service, remote_service)\n" + ")\n" + " WITH compaction = {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'unchecked_tombstone_compaction': 'true', 'tombstone_threshold': '0.2'}\n" + " AND caching = {'rows_per_partition': 'ALL'}\n" + " AND default_time_to_live = 259200\n" + " AND gc_grace_seconds = 3600\n" + " AND speculative_retry = '95percentile'\n" + " AND comment = 'Secondary table for looking up remote service names by a service name.';"); } }
2,672
1,273
<reponame>sunboy0523/gatk<gh_stars>1000+ package org.broadinstitute.hellbender.utils.read; import htsjdk.samtools.SAMFileHeader; import htsjdk.samtools.SAMRecordQueryNameComparator; import htsjdk.samtools.SAMTag; import org.broadinstitute.hellbender.GATKBaseTest; import org.broadinstitute.hellbender.engine.ReadsDataSource; import org.broadinstitute.hellbender.engine.ReadsPathDataSource; import org.broadinstitute.hellbender.utils.io.IOUtils; import org.testng.Assert; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; import java.io.IOException; import java.util.*; public class ReadQueryNameComparatorUnitTest extends GATKBaseTest { public static final SAMFileHeader HEADER =ArtificialReadUtils.createArtificialSamHeader(); public static final String NAME = "NAME"; /** * Tests that the ordering produced by {@link ReadQueryNameComparator} matches queryname ordering * as produced by htsjdk's {@link SAMRecordQueryNameComparator} for a representative selection of reads. Ignores * differences in tie-breaking done for reads with the same position -- just asserts that the reads are * queryname-sorted according to htsjdk, including unmapped reads with and without an assigned position. */ @Test public void testComparatorOrderingMatchesHtsjdkFileOrdering() throws IOException { final String inputBam = publicTestDir + "org/broadinstitute/hellbender/utils/read/comparator_test_with_unmapped.bam"; final List<GATKRead> reads = new ArrayList<>(); SAMFileHeader header; try ( final ReadsDataSource readsSource = new ReadsPathDataSource(IOUtils.getPath(inputBam)) ) { header = readsSource.getHeader(); for ( GATKRead read : readsSource ) { reads.add(read); } } // Randomize ordering and then re-sort Collections.shuffle(reads); reads.sort(new ReadQueryNameComparator()); final SAMRecordQueryNameComparator samComparator = new SAMRecordQueryNameComparator(); GATKRead previousRead = null; for ( final GATKRead currentRead : reads ) { if ( previousRead != null ) { Assert.assertTrue(samComparator.compare(previousRead.convertToSAMRecord(header), currentRead.convertToSAMRecord(header)) <= 0, "Reads are out of order: " + previousRead + " and " + currentRead); } previousRead = currentRead; } } @DataProvider public Object[][] getNames(){ return new Object[][]{ {"A", "B", -1}, {"A","A", 0}, {"AA", "A", 1}, {"1","10", -1}, {"2", "10", 1} }; } @Test(dataProvider = "getNames") public void testCompareNames(String firstName, String secondName, int expected) throws Exception { ReadQueryNameComparator comp = new ReadQueryNameComparator(); GATKRead first = getRead(firstName); GATKRead second = getRead(secondName); Assert.assertEquals(comp.compareReadNames(first, second ), expected); Assert.assertEquals(comp.compareReadNames(second, first), -expected); Assert.assertEquals(comp.compareReadNames(first, first), 0); Assert.assertEquals(comp.compareReadNames(second, second), 0); } private static GATKRead getRead(String firstName) { final GATKRead read = ArtificialReadUtils.createArtificialRead(HEADER, firstName, 1, 100, 10); return read; } @DataProvider public Iterator<Object[]> getReads(){ final GATKRead differentName = getRead(NAME+NAME); final GATKRead unpaired = getRead(NAME); unpaired.setIsPaired(false); final GATKRead paired = getRead(NAME); paired.setIsPaired(true); final GATKRead firstOfPair = getRead(NAME); firstOfPair.setIsFirstOfPair(); final GATKRead secondOfPair = getRead(NAME); secondOfPair.setIsSecondOfPair(); final GATKRead reverseStrand = getRead(NAME); reverseStrand.setIsReverseStrand(true); final GATKRead supplementary = getRead(NAME); supplementary.setIsSupplementaryAlignment(true); final GATKRead secondary = getRead(NAME); secondary.setIsSecondaryAlignment(true); final GATKRead tagHI1 = getRead(NAME); tagHI1.setAttribute(SAMTag.HI.name(), 1); final GATKRead tagHI2 = getRead(NAME); tagHI2.setAttribute(SAMTag.HI.name(), 2); List<GATKRead> reads = Arrays.asList(differentName, unpaired, paired, firstOfPair, secondOfPair, reverseStrand, supplementary, secondary, tagHI1, tagHI2); List<Object[]> tests = new ArrayList<>(); for(GATKRead left: reads){ for(GATKRead right: reads){ tests.add(new Object[]{left, right}); } }; return tests.iterator(); } @Test(dataProvider = "getReads") public void testTieBreakers(GATKRead left, GATKRead right){ ReadQueryNameComparator readComparator = new ReadQueryNameComparator(); SAMRecordQueryNameComparator samComparator = new SAMRecordQueryNameComparator(); Assert.assertEquals(readComparator.compare(left, right), samComparator.compare(left.convertToSAMRecord(HEADER), right.convertToSAMRecord(HEADER))); } }
2,134
389
<reponame>tcmoore32/sheer-madness<gh_stars>100-1000 /* * Copyright 2014 Guidewire Software, Inc. */ package gw.lang.reflect.interval; import gw.test.TestClass; import java.util.List; import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; import java.util.NoSuchElementException; /** */ public class IntegerIntervalTest extends TestClass { public void testGoodLeftGoodRight() { IntegerInterval ivl = new IntegerInterval( 1, 10 ); assertEquals( (Integer)1, ivl.getLeftEndpoint() ); assertEquals( (Integer)10, ivl.getRightEndpoint() ); assertEquals( (Integer)1, ivl.getStep() ); } public void testClosedEndpoints() { IntegerInterval ivl = new IntegerInterval( 1, 10 ); assertEquals( true, ivl.isLeftClosed() ); assertEquals( true, ivl.isRightClosed() ); } public void testBadLeftGoodRightFails() { try { new IntegerInterval( null, 10 ); } catch( IllegalArgumentException e ) { return; } fail(); } public void testGoodLeftBadRightFails() { try { new IntegerInterval( 1, null ); } catch( IllegalArgumentException e ) { return; } fail(); } public void testBadLeftBadRightFails() { try { new IntegerInterval( null, null ); } catch( IllegalArgumentException e ) { return; } fail(); } public void testLeftGreaterThanRightFails() { try { new IntegerInterval( 10, 1 ); } catch( IllegalArgumentException e ) { return; } fail(); } public void testLeftEqualsRight() { IntegerInterval ivl = new IntegerInterval( 1, 1 ); assertEquals( (Integer)1, ivl.getLeftEndpoint() ); assertEquals( (Integer)1, ivl.getRightEndpoint() ); } public void testLeftEqualsRightEqualsZero() { IntegerInterval ivl = new IntegerInterval( 0, 0 ); assertEquals( (Integer)0, ivl.getLeftEndpoint() ); assertEquals( (Integer)0, ivl.getRightEndpoint() ); } public void testLeftNegRightPos() { IntegerInterval ivl = new IntegerInterval( -1, 1 ); assertEquals( (Integer)(-1), ivl.getLeftEndpoint() ); assertEquals( (Integer)1, ivl.getRightEndpoint() ); } public void testLeftNegRightNeg() { IntegerInterval ivl = new IntegerInterval( -3, -1 ); assertEquals( (Integer)(-3), ivl.getLeftEndpoint() ); assertEquals( (Integer)(-1), ivl.getRightEndpoint() ); } public void testGoodLeftGoodRightWithStep() { IntegerInterval ivl = new IntegerInterval( 1, 10, 2 ); assertEquals( (Integer)1, ivl.getLeftEndpoint() ); assertEquals( (Integer)10, ivl.getRightEndpoint() ); assertEquals( (Integer)2, ivl.getStep() ); } public void testGoodLeftGoodRightZeroStepFails() { try { new IntegerInterval( 1, 10, 0 ); } catch( IllegalArgumentException e ) { return; } fail(); } public void testGoodLeftGoodRightNegativeStepFails() { try { new IntegerInterval( 1, 10, -1 ); } catch( IllegalArgumentException e ) { return; } fail(); } public void testGetFromLeftStep1() { IntegerInterval ivl = new IntegerInterval( -3, 3 ); for( int i = 0; i <= 6; i++ ) { Integer e = ivl.getFromLeft( i ); assertEquals( (Integer)(-3 + i), e ); } } public void testGetFromLeftStep1NegStepIndex() { try { IntegerInterval ivl = new IntegerInterval( -3, 3 ); ivl.getFromLeft( -1 ); } catch( IllegalArgumentException e ) { return; } fail(); } public void testGetFromLeftStep1OutOfBoundsStepIndex() { IntegerInterval ivl = new IntegerInterval( -3, 3 ); Integer e = ivl.getFromLeft( 7 ); assertEquals( null, e ); } public void testGetFromRightStep1() { IntegerInterval ivl = new IntegerInterval( -3, 3 ); for( int i = 0; i <= 6; i++ ) { Integer e = ivl.getFromRight( i ); assertEquals( (Integer)(3 - i), e ); } } public void testGetFromRightStep1NegStepIndex() { try { IntegerInterval ivl = new IntegerInterval( -3, 3 ); ivl.getFromRight( -1 ); } catch( IllegalArgumentException e ) { return; } fail(); } public void testGetFromRightStep1OutOfBoundsStepIndex() { IntegerInterval ivl = new IntegerInterval( -3, 3 ); Integer e = ivl.getFromRight( 7 ); assertEquals( null, e ); } public void testIteratorStep1() { List<Integer> result = new ArrayList<Integer>(); for( Integer e : new IntegerInterval( -3, 3 ) ) { result.add( e ); } assertListEquals( Arrays.asList( -3,-2,-1,0,1,2,3 ), result ); } public void testIterateFromLeftStep1() { List<Integer> result = new ArrayList<Integer>(); for( Iterator<Integer> iter = new IntegerInterval( -3, 3 ).iterateFromLeft(); iter.hasNext(); ) { result.add( iter.next() ); } assertListEquals( Arrays.asList( -3,-2,-1,0,1,2,3 ), result ); } public void testIterateFromLeftStep1NoSuchElement() { Iterator<Integer> iter = new IntegerInterval( -3, 3 ).iterateFromLeft(); for( ;iter.hasNext(); ) { iter.next(); } try { iter.next(); } catch( NoSuchElementException e ) { return; } fail(); } public void testIterateFromRightStep1() { List<Integer> result = new ArrayList<Integer>(); for( Iterator<Integer> iter = new IntegerInterval( -3, 3 ).iterateFromRight(); iter.hasNext(); ) { result.add( iter.next() ); } assertListEquals( Arrays.asList( 3,2,1,0,-1,-2,-3 ), result ); } public void testIterateFromRightStep1NoSuchElement() { Iterator<Integer> iter = new IntegerInterval( -3, 3 ).iterateFromRight(); for( ;iter.hasNext(); ) { iter.next(); } try { iter.next(); } catch( NoSuchElementException e ) { return; } fail(); } public void testGetFromLeftStep2() { IntegerInterval ivl = new IntegerInterval( -3, 3, 2 ); for( int i = 0; i <= 6/2; i++ ) { Integer e = ivl.getFromLeft( i ); assertEquals( (Integer)(-3 + i*2), e ); } } public void testGetFromLeftStep2NegStepIndex() { try { IntegerInterval ivl = new IntegerInterval( -3, 3, 2 ); ivl.getFromLeft( -1 ); } catch( IllegalArgumentException e ) { return; } fail(); } public void testGetFromLeftStep2OutOfBoundsStepIndex() { IntegerInterval ivl = new IntegerInterval( -3, 3, 2 ); Integer e = ivl.getFromLeft( 4 ); assertEquals( null, e ); } public void testGetFromRightStep2() { IntegerInterval ivl = new IntegerInterval( -3, 3, 2 ); for( int i = 0; i <= 6/2; i++ ) { Integer e = ivl.getFromRight( i ); assertEquals( (Integer)(3 - i*2), e ); } } public void testGetFromRightStep2NegStepIndex() { try { IntegerInterval ivl = new IntegerInterval( -3, 3, 2 ); ivl.getFromRight( -1 ); } catch( IllegalArgumentException e ) { return; } fail(); } public void testGetFromRightStep2OutOfBoundsStepIndex() { IntegerInterval ivl = new IntegerInterval( -3, 3, 2 ); Integer e = ivl.getFromRight( 4 ); assertEquals( null, e ); } public void testIteratorStep2() { List<Integer> result = new ArrayList<Integer>(); for( Integer e : new IntegerInterval( -3, 3, 2 ) ) { result.add( e ); } assertListEquals( Arrays.asList( -3,-1,1,3 ), result ); } public void testIteratorStep2OpenRight() { List<Integer> result = new ArrayList<Integer>(); for( Integer e : new IntegerInterval( -4, 5, 2, true, false, false ) ) { result.add( e ); } assertListEquals( Arrays.asList( -4,-2,0,2,4 ), result ); } public void testIteratorStep2OpenLeft() { List<Integer> result = new ArrayList<Integer>(); for( Integer e : new IntegerInterval( -5, 5, 2, false, true, false ) ) { result.add( e ); } assertListEquals( Arrays.asList( -3,-1,1,3,5 ), result ); } public void testIteratorStep2OpenLeftOpenRight() { List<Integer> result = new ArrayList<Integer>(); for( Integer e : new IntegerInterval( -5, 5, 2, false, false, false ) ) { result.add( e ); } assertListEquals( Arrays.asList( -3,-1,1,3 ), result ); } public void testIterateFromLeftStep2() { List<Integer> result = new ArrayList<Integer>(); for( Iterator<Integer> iter = new IntegerInterval( -3, 3, 2 ).iterateFromLeft(); iter.hasNext(); ) { result.add( iter.next() ); } assertListEquals( Arrays.asList( -3,-1,1,3 ), result ); } public void testIterateFromLeftStep2NoSuchElement() { Iterator<Integer> iter = new IntegerInterval( -3, 3, 2 ).iterateFromLeft(); for( ;iter.hasNext(); ) { iter.next(); } try { iter.next(); } catch( NoSuchElementException e ) { return; } fail(); } public void testIterateFromRightStep2() { List<Integer> result = new ArrayList<Integer>(); for( Iterator<Integer> iter = new IntegerInterval( -3, 3, 2 ).iterateFromRight(); iter.hasNext(); ) { result.add( iter.next() ); } assertListEquals( Arrays.asList( 3,1,-1,-3 ), result ); } public void testIterateFromRightStep2NoSuchElement() { Iterator<Integer> iter = new IntegerInterval( -3, 3, 2 ).iterateFromRight(); for( ;iter.hasNext(); ) { iter.next(); } try { iter.next(); } catch( NoSuchElementException e ) { return; } fail(); } }
4,015
3,358
/* This file is automatically generated; do not edit. */ /* Add the files to be included into Makefile.am instead. */ #include <ql/models/marketmodels/models/abcdvol.hpp> #include <ql/models/marketmodels/models/alphafinder.hpp> #include <ql/models/marketmodels/models/alphaform.hpp> #include <ql/models/marketmodels/models/alphaformconcrete.hpp> #include <ql/models/marketmodels/models/capletcoterminalalphacalibration.hpp> #include <ql/models/marketmodels/models/capletcoterminalmaxhomogeneity.hpp> #include <ql/models/marketmodels/models/capletcoterminalperiodic.hpp> #include <ql/models/marketmodels/models/capletcoterminalswaptioncalibration.hpp> #include <ql/models/marketmodels/models/cotswaptofwdadapter.hpp> #include <ql/models/marketmodels/models/ctsmmcapletcalibration.hpp> #include <ql/models/marketmodels/models/flatvol.hpp> #include <ql/models/marketmodels/models/fwdperiodadapter.hpp> #include <ql/models/marketmodels/models/fwdtocotswapadapter.hpp> #include <ql/models/marketmodels/models/piecewiseconstantabcdvariance.hpp> #include <ql/models/marketmodels/models/piecewiseconstantvariance.hpp> #include <ql/models/marketmodels/models/pseudorootfacade.hpp> #include <ql/models/marketmodels/models/volatilityinterpolationspecifier.hpp> #include <ql/models/marketmodels/models/volatilityinterpolationspecifierabcd.hpp>
460
675
/* * Copyright 2016 The Bazel Authors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.idea.blaze.android.run.test; import com.android.ddmlib.Client; import com.android.tools.idea.run.AndroidDebugState; import com.android.tools.idea.run.AndroidProcessText; import com.android.tools.idea.run.AndroidRunConfiguration; import com.android.tools.idea.run.AndroidSessionInfo; import com.android.tools.idea.run.LaunchInfo; import com.android.tools.idea.run.ProcessHandlerConsolePrinter; import com.android.tools.idea.run.tasks.ConnectJavaDebuggerTask; import com.android.tools.idea.run.util.ProcessHandlerLaunchStatus; import com.intellij.debugger.engine.RemoteDebugProcessHandler; import com.intellij.debugger.ui.DebuggerPanelsManager; import com.intellij.execution.ExecutionException; import com.intellij.execution.configurations.RemoteConnection; import com.intellij.execution.configurations.RunConfiguration; import com.intellij.execution.configurations.RunProfile; import com.intellij.execution.process.ProcessHandler; import com.intellij.execution.runners.ExecutionEnvironment; import com.intellij.execution.runners.ExecutionEnvironmentBuilder; import com.intellij.execution.ui.RunContentDescriptor; import com.intellij.openapi.diagnostic.Logger; import com.intellij.openapi.project.Project; import java.util.Locale; /** Connects the blaze debugger during execution. */ class ConnectBlazeTestDebuggerTaskHelper { private ConnectBlazeTestDebuggerTaskHelper() {} /** * Nearly a clone of {@link ConnectJavaDebuggerTask#launchDebugger}. There are a few changes to * account for null variables that could occur in our implementation. */ public static ProcessHandler launchDebugger( Project project, LaunchInfo currentLaunchInfo, Client client, ProcessHandlerLaunchStatus launchStatus, ProcessHandlerConsolePrinter printer) { String debugPort = Integer.toString(client.getDebuggerListenPort()); int pid = client.getClientData().getPid(); Logger.getInstance(ConnectJavaDebuggerTask.class) .info( String.format( Locale.US, "Attempting to connect debugger to port %1$s [client %2$d]", debugPort, pid)); // create a new process handler RemoteConnection connection = new RemoteConnection(true, "localhost", debugPort, false); RemoteDebugProcessHandler debugProcessHandler = new RemoteDebugProcessHandler(project); // switch the launch status and console printers to point to the new process handler // this is required, esp. for AndroidTestListener which holds a // reference to the launch status and printers, and those should // be updated to point to the new process handlers, // otherwise test results will not be forwarded appropriately ProcessHandler oldProcessHandler = launchStatus.getProcessHandler(); launchStatus.setProcessHandler(debugProcessHandler); printer.setProcessHandler(debugProcessHandler); // Detach old process handler after the launch status // has been updated to point to the new process handler. oldProcessHandler.detachProcess(); AndroidDebugState debugState = new AndroidDebugState( project, debugProcessHandler, connection, currentLaunchInfo.consoleProvider); RunContentDescriptor oldDescriptor; AndroidSessionInfo oldSession = oldProcessHandler.getUserData(AndroidSessionInfo.KEY); if (oldSession != null) { oldDescriptor = oldSession.getDescriptor(); } else { // This is the first time we are attaching the debugger; get it from the environment instead. oldDescriptor = currentLaunchInfo.env.getContentToReuse(); } RunContentDescriptor debugDescriptor; try { // @formatter:off ExecutionEnvironment debugEnv = new ExecutionEnvironmentBuilder(currentLaunchInfo.env) .executor(currentLaunchInfo.executor) .runner(currentLaunchInfo.runner) .contentToReuse(oldDescriptor) .build(); debugDescriptor = DebuggerPanelsManager.getInstance(project) .attachVirtualMachine(debugEnv, debugState, connection, false); // @formatter:on } catch (ExecutionException e) { printer.stderr("ExecutionException: " + e.getMessage() + '.'); return null; } // Based on the above try block we shouldn't get here unless we have assigned to debugDescriptor assert debugDescriptor != null; // re-run the collected text from the old process handler to the new // TODO: is there a race between messages received once the debugger has been connected, // and these messages that are printed out? final AndroidProcessText oldText = AndroidProcessText.get(oldProcessHandler); if (oldText != null) { oldText.printTo(debugProcessHandler); } RunProfile runProfile = currentLaunchInfo.env.getRunProfile(); RunConfiguration runConfiguration = runProfile instanceof AndroidRunConfiguration ? (AndroidRunConfiguration) runProfile : null; AndroidSessionInfo sessionInfo = AndroidSessionInfo.create( debugProcessHandler, debugDescriptor, runConfiguration, currentLaunchInfo.env.getExecutor().getId(), currentLaunchInfo.env.getExecutor().getActionName(), currentLaunchInfo.env.getExecutionTarget()); debugProcessHandler.putUserData(AndroidSessionInfo.KEY, sessionInfo); debugProcessHandler.putUserData(AndroidSessionInfo.ANDROID_DEBUG_CLIENT, client); debugProcessHandler.putUserData( AndroidSessionInfo.ANDROID_DEVICE_API_LEVEL, client.getDevice().getVersion()); return debugProcessHandler; } }
2,011
384
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.tez.runtime.library.processor; import java.nio.ByteBuffer; import java.nio.charset.CharacterCodingException; import java.nio.charset.Charset; import java.util.List; import java.util.Map; import com.google.common.base.Charsets; import org.apache.tez.common.ProgressHelper; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.tez.dag.api.UserPayload; import org.apache.tez.runtime.api.AbstractLogicalIOProcessor; import org.apache.tez.runtime.api.Event; import org.apache.tez.runtime.api.LogicalInput; import org.apache.tez.runtime.api.LogicalOutput; import org.apache.tez.runtime.api.ProcessorContext; /** * A simple sleep processor implementation that sleeps for the configured * time in milliseconds. * * @see SleepProcessorConfig for configuring the SleepProcessor */ @Private public class SleepProcessor extends AbstractLogicalIOProcessor { private static final Logger LOG = LoggerFactory.getLogger(SleepProcessor.class); private int timeToSleepMS; protected Map<String, LogicalInput> inputs; protected Map<String, LogicalOutput> outputs; private ProgressHelper progressHelper; public SleepProcessor(ProcessorContext context) { super(context); } @Override public void initialize() throws Exception { if (getContext().getUserPayload() == null) { LOG.info("No processor user payload specified" + ", using default timeToSleep of 1 ms"); timeToSleepMS = 1; } else { SleepProcessorConfig cfg = new SleepProcessorConfig(); cfg.fromUserPayload(getContext().getUserPayload()); timeToSleepMS = cfg.getTimeToSleepMS(); } LOG.info("Initialized SleepProcessor, timeToSleepMS=" + timeToSleepMS); } @Override public void run(Map<String, LogicalInput> _inputs, Map<String, LogicalOutput> _outputs) throws Exception { inputs = _inputs; outputs = _outputs; progressHelper = new ProgressHelper(this.inputs, getContext(),this.getClass().getSimpleName()); LOG.info("Running the Sleep Processor, sleeping for " + timeToSleepMS + " ms"); for (LogicalInput input : _inputs.values()) { input.start(); } progressHelper.scheduleProgressTaskService(0, 100); for (LogicalOutput output : _outputs.values()) { output.start(); } try { Thread.sleep(timeToSleepMS); } catch (InterruptedException ie) { // ignore } } @Override public void handleEvents(List<Event> processorEvents) { // Nothing to do } @Override public void close() throws Exception { if (progressHelper != null) { progressHelper.shutDownProgressTaskService(); } } /** * Configuration for the Sleep Processor. * Only configuration option is time to sleep in milliseconds. */ public static class SleepProcessorConfig { private int timeToSleepMS; private final Charset charSet = Charsets.UTF_8; public SleepProcessorConfig() { } /** * @param timeToSleepMS Time to sleep in milliseconds */ public SleepProcessorConfig (int timeToSleepMS) { this.timeToSleepMS = timeToSleepMS; } public UserPayload toUserPayload() { return UserPayload.create(ByteBuffer.wrap(Integer.toString(timeToSleepMS).getBytes( charSet))); } public void fromUserPayload(UserPayload userPayload) throws CharacterCodingException { timeToSleepMS = Integer.parseInt(charSet.newDecoder().decode(userPayload.getPayload()).toString()); } public int getTimeToSleepMS() { return timeToSleepMS; } } }
1,452
348
{"nom":"Aussurucq","circ":"4ème circonscription","dpt":"Pyrénées-Atlantiques","inscrits":208,"abs":108,"votants":100,"blancs":11,"nuls":5,"exp":84,"res":[{"nuance":"REM","nom":"<NAME>","voix":46},{"nuance":"DVD","nom":"<NAME>","voix":38}]}
97
1,842
#import "FORMField.h" @interface FORMField (Tests) + (FORMField *)firstNameField; + (FORMField *)lastNameField; + (FORMField *)displayNameField; + (FORMField *)addressField; + (FORMField *)emailField; + (FORMField *)usernameField; + (FORMField *)workHoursField; + (FORMField *)startDateField; + (FORMField *)endDateField; + (FORMField *)contractTypeField; + (FORMField *)baseSalaryTypeField; + (FORMField *)bonusEnabledField; + (FORMField *)bonusField; + (FORMField *)totalField; @end
167
794
<reponame>sorennelson/uncertainty-baselines<filename>baselines/jft/checkpoint_utils_test.py # coding=utf-8 # Copyright 2021 The Uncertainty Baselines Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the checkpointing utilities used in the ViT experiments.""" import os import tempfile from absl.testing import parameterized import flax import jax import jax.numpy as jnp import ml_collections import tensorflow as tf import uncertainty_baselines as ub import checkpoint_utils # local file import def _make_model(num_classes=21843, representation_size=2): config = ml_collections.ConfigDict() config.num_classes = num_classes # Model section config.model = ml_collections.ConfigDict() config.model.patches = ml_collections.ConfigDict() config.model.patches.size = [16, 16] config.model.hidden_size = 4 config.model.transformer = ml_collections.ConfigDict() config.model.transformer.attention_dropout_rate = 0. config.model.transformer.dropout_rate = 0. config.model.transformer.mlp_dim = 3 config.model.transformer.num_heads = 2 config.model.transformer.num_layers = 1 config.model.classifier = "token" config.model.representation_size = representation_size model = ub.models.vision_transformer( num_classes=config.num_classes, **config.get("model", {})) return model, config def _make_pytree(key): key1, key2, key3, key4, key5 = jax.random.split(key, num=5) tree = { "a": jax.random.normal(key1, (3, 2)), "b": { "c": jax.random.uniform(key2, (3, 2)), "d": jax.random.normal(key3, (2, 4)), "e": { "f": jax.random.uniform(key4, (1, 5)) } } } # Create bfloat params to test saving/loading. bfloat_params = jax.random.normal(key5, (3, 2), dtype=jax.dtypes.bfloat16) tree = {"a": tree, "b": bfloat_params} return tree class CheckpointUtilsTest(parameterized.TestCase, tf.test.TestCase): def test_checkpointing(self): output_dir = tempfile.mkdtemp(dir=self.get_temp_dir()) checkpoint_path = os.path.join(output_dir, "checkpoint.npz") self.assertFalse(os.path.exists(checkpoint_path)) key = jax.random.PRNGKey(42) key, subkey = jax.random.split(key) tree = _make_pytree(subkey) checkpoint_utils.save_checkpoint(tree, checkpoint_path) key, subkey = jax.random.split(key) new_tree = _make_pytree(subkey) leaves = jax.tree_util.tree_leaves(tree) new_leaves = jax.tree_util.tree_leaves(new_tree) for arr, new_arr in zip(leaves, new_leaves): self.assertNotAllClose(arr, new_arr) restored_tree = checkpoint_utils.load_checkpoint(new_tree, checkpoint_path) restored_leaves = jax.tree_util.tree_leaves(restored_tree) for arr, restored_arr in zip(leaves, restored_leaves): self.assertAllClose(arr, restored_arr) def test_checkpointing_model(self): output_dir = tempfile.mkdtemp(dir=self.get_temp_dir()) checkpoint_path = os.path.join(output_dir, "checkpoint.npz") self.assertFalse(os.path.exists(checkpoint_path)) key = jax.random.PRNGKey(42) model, _ = _make_model() input_shape = (2, 224, 224, 3) dummy_input = jnp.zeros(input_shape, jnp.float32) key, subkey = jax.random.split(key) params = model.init(subkey, dummy_input, train=False)["params"] checkpoint_utils.save_checkpoint(params, checkpoint_path) key, subkey = jax.random.split(key) new_params = model.init(subkey, dummy_input, train=False)["params"] restored_params = checkpoint_utils.load_checkpoint(new_params, checkpoint_path) restored_leaves = jax.tree_util.tree_leaves(restored_params) leaves = jax.tree_util.tree_leaves(params) for arr, restored_arr in zip(leaves, restored_leaves): self.assertAllClose(arr, restored_arr) key, subkey = jax.random.split(key) inputs = jax.random.normal(subkey, input_shape, jnp.float32) _, out = model.apply({"params": params}, inputs, train=False) _, new_out = model.apply({"params": new_params}, inputs, train=False) _, restored_out = model.apply({"params": restored_params}, inputs, train=False) self.assertNotAllClose(out["pre_logits"], new_out["pre_logits"]) self.assertAllClose(out["pre_logits"], restored_out["pre_logits"]) if __name__ == "__main__": tf.test.main()
1,905
403
#!/usr/bin/env python from distutils.core import setup import subprocess import sys import nipap_whoisd # return all the extra data files def get_data_files(): # generate man pages using rst2man try: subprocess.call(["rst2man", "nipap-whoisd.man.rst", "nipap-whoisd.8"]) except OSError as exc: print >> sys.stderr, "rst2man failed to run:", str(exc) sys.exit(1) files = [ ('/etc/nipap/', ['whoisd.conf.dist']), ('/usr/sbin/', ['nipap-whoisd']), ('/usr/share/man/man8/', ['nipap-whoisd.8']) ] return files long_desc = open('README.rst').read() short_desc = long_desc.split('\n')[0].split(' - ')[1].strip() setup( name = 'nipap-whoisd', version = nipap_whoisd.__version__, description = short_desc, long_description = long_desc, author = nipap_whoisd.__author__, author_email = nipap_whoisd.__author_email__, license = nipap_whoisd.__license__, url = nipap_whoisd.__url__, py_modules = ['nipap_whoisd'], keywords = ['nipap-whoisd'], requires = ['pynipap'], data_files = get_data_files(), classifiers = [ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'Intended Audience :: Telecommunications Industry', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python :: 2.6' ] )
667
1,374
package def.dom; import def.js.Object; public class MimeType extends def.js.Object { public java.lang.String description; public Plugin enabledPlugin; public java.lang.String suffixes; public java.lang.String type; public static MimeType prototype; public MimeType(){} }
98
516
// // Bridge.h // XMGTV // // Created by apple on 16/11/21. // Copyright © 2016年 coderwhy. All rights reserved. // #import <sqlite3.h>
58
1,449
#include "danbooru-downloader-importer.h" #include <QFile> #include <QFileInfo> #include <QMap> #include <QRegularExpression> #include <QSettings> DanbooruDownloaderImporter::DanbooruDownloaderImporter() : m_firefoxProfilePath(QString()) { QSettings cfg(QSettings::IniFormat, QSettings::UserScope, "Mozilla", "Firefox"); const QString path = QFileInfo(cfg.fileName()).absolutePath() + "/Firefox"; if (QFile::exists(path + "/profiles.ini")) { QSettings profiles(path + "/profiles.ini", QSettings::IniFormat); m_firefoxProfilePath = path + "/" + profiles.value("Profile0/Path").toString(); } } bool DanbooruDownloaderImporter::isInstalled() const { return !m_firefoxProfilePath.isEmpty() && QFile::exists(m_firefoxProfilePath + "/extensions/[email protected]"); } void DanbooruDownloaderImporter::import(QSettings *dest) const { QFile prefs(m_firefoxProfilePath + "/prefs.js"); if (prefs.exists() && prefs.open(QIODevice::ReadOnly | QIODevice::Text)) { return; } const QString source = prefs.readAll(); static const QRegularExpression rx("user_pref\\(\"danbooru.downloader.([^\"]+)\", ([^\\)]+)\\);"); static const QMap<QString, QString> assoc { { "blacklist", "blacklistedtags" }, { "generalTagsSeparator", "separator" }, { "multipleArtistsAll", "artist_useall" }, { "multipleArtistsDefault", "artist_value" }, { "multipleArtistsSeparator", "artist_sep" }, { "multipleCharactersAll", "character_useall" }, { "multipleCharactersDefault", "character_value" }, { "multipleCharactersSeparator", "character_sep" }, { "multipleCopyrightsAll", "copyright_useall" }, { "multipleCopyrightsDefault", "copyright_value" }, { "multipleCopyrightsSeparator", "copyright_sep" }, { "noArtist", "artist_empty" }, { "noCharacter", "character_empty" }, { "noCopyright", "copyright_empty" }, { "targetFolder", "path" }, { "targetName", "filename" }, }; QMap<QString, QString> firefox; auto matches = rx.globalMatch(source); while (matches.hasNext()) { auto match = matches.next(); QString value = match.captured(2); if (value.startsWith('"')) { value = value.right(value.length() - 1); } if (value.endsWith('"')) { value = value.left(value.length() - 1); } firefox[match.captured(1)] = value; } dest->beginGroup("Save"); if (firefox.contains("useBlacklist")) { dest->setValue("downloadblacklist", firefox["useBlacklist"] != "true"); } for (auto it = firefox.constBegin(); it != firefox.constEnd(); ++it) { if (assoc.contains(it.key())) { QString v(it.value()); v.replace("\\\\", "\\"); dest->setValue(assoc[it.key()], v); } } dest->endGroup(); prefs.close(); }
1,000
5,169
<reponame>Ray0218/Specs<filename>Specs/baiduMap/2.8.1/baiduMap.podspec.json { "name": "baiduMap", "version": "2.8.1", "summary": "百度地图2.8.1", "description": "A longer description of baiduMap in Markdown format.\n\n* Think: Why did you write this? What is the focus? What does it do?\n* CocoaPods will be using this to generate tags, and improve search results.\n* Try to keep it short, snappy and to the point.\n* Finally, don't worry about the indent, CocoaPods strips it!", "homepage": "https://github.com/myz1104/baiduMap", "license": { "type": "Copyright", "text": "百度 版权所有" }, "authors": { "马远征": "<EMAIL>" }, "platforms": { "ios": "7.0" }, "source": { "git": "https://github.com/myz1104/baiduMap.git", "tag": "2.8.1" }, "source_files": [ "MapFrameWork", "MapFrameWork/BaiduMapAPI.framework/**/*.h" ], "resources": "MapFrameWork/BaiduMapAPI.framework/Resources/mapapi.bundle", "vendored_frameworks": "MapFrameWork/BaiduMapAPI.framework", "frameworks": [ "CoreLocation", "QuartzCore", "OpenGLES", "SystemConfiguration", "CoreGraphics", "Security" ], "requires_arc": true }
502
5,421
<filename>venv/lib/python3.8/site-packages/tqdm/_utils.py from warnings import warn from .std import TqdmDeprecationWarning from .utils import ( # NOQA, pylint: disable=unused-import CUR_OS, IS_NIX, IS_WIN, RE_ANSI, Comparable, FormatReplace, SimpleTextIOWrapper, _basestring, _environ_cols_wrapper, _is_ascii, _is_utf, _range, _screen_shape_linux, _screen_shape_tput, _screen_shape_windows, _screen_shape_wrapper, _supports_unicode, _term_move_up, _unich, _unicode, colorama) warn("This function will be removed in tqdm==5.0.0\n" "Please use `tqdm.utils.*` instead of `tqdm._utils.*`", TqdmDeprecationWarning, stacklevel=2)
264
943
<gh_stars>100-1000 package eu.inloop.viewmodel.fixture.fragment; import eu.inloop.viewmodel.IView; public interface IVMTestFragmentView extends IView { void onLoadData(boolean loaded); }
72
3,705
#pragma once namespace chainerx { enum class CopyKind { kCopy = 1, kView, }; } // namespace chainerx
47
393
#include <stdlib.h> #include <stdio.h> #include <string.h> #include "render/spotlight.h" #include "render/dirlight.h" #include "render/shader.h" #define SHADOW_MAP_SIZE 1024 mat4x4 spot_shadow_projection; GLuint spot_light_shader; void ex_spot_light_init() { spot_light_shader = ex_dir_light_shader; float aspect = (float)SHADOW_MAP_SIZE/(float)SHADOW_MAP_SIZE; mat4x4_perspective(spot_shadow_projection, rad(90.0f), aspect, 0.1f, EX_SPOT_FAR_PLANE); } ex_spot_light_t* ex_spot_light_new(vec3 pos, vec3 color, int dynamic) { ex_spot_light_t *l = malloc(sizeof(ex_spot_light_t)); memcpy(l->position, pos, sizeof(vec3)); memcpy(l->color, color, sizeof(vec3)); memset(l->direction, 0, sizeof(vec3)); // generate depth map glGenTextures(1, &l->depth_map); glBindTexture(GL_TEXTURE_2D, l->depth_map); glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, SHADOW_MAP_SIZE, SHADOW_MAP_SIZE, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_BORDER); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER); GLfloat border[] = {1.0, 1.0, 1.0, 1.0}; glTexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_BORDER_COLOR, border); glGenFramebuffers(1, &l->depth_map_fbo); glBindFramebuffer(GL_FRAMEBUFFER, l->depth_map_fbo); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, l->depth_map, 0); glDrawBuffer(GL_NONE); glReadBuffer(GL_NONE); if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) printf("Error! Spot light framebuffer is not complete!\n"); glBindFramebuffer(GL_FRAMEBUFFER, 0); l->shader = spot_light_shader; l->dynamic = dynamic; l->update = 1; l->inner = rad(12.5f); l->outer = rad(16.5f); l->is_shadow = 1; l->is_visible = 1; return l; } void ex_spot_light_begin(ex_spot_light_t *l) { l->update = 0; // setup projection vec3 target; vec3_add(target, l->position, l->direction); mat4x4_look_at(l->transform, l->position, target, (vec3){0.0f, 1.0f, 0.0f}); mat4x4_mul(l->transform, spot_shadow_projection, l->transform); glViewport(0, 0, SHADOW_MAP_SIZE, SHADOW_MAP_SIZE); glBindFramebuffer(GL_FRAMEBUFFER, l->depth_map_fbo); glClear(GL_DEPTH_BUFFER_BIT); glEnable(GL_DEPTH_TEST); glUseProgram(l->shader); glUniformMatrix4fv(ex_uniform(l->shader, "u_light_transform"), 1, GL_FALSE, &l->transform[0][0]); } void ex_spot_light_draw(ex_spot_light_t *l, GLuint shader, const char *prefix) { if (l->is_shadow) { glUniform1i(ex_uniform(shader, "u_spot_light.is_shadow"), 1); glUniform1i(ex_uniform(shader, "u_spot_depth"), 5); glActiveTexture(GL_TEXTURE5); glBindTexture(GL_TEXTURE_2D, l->depth_map); } else if (prefix != NULL) { char buff[64]; sprintf(buff, "%s.is_shadow", prefix); glUniform1i(ex_uniform(shader, buff), 0); } if (prefix != NULL) { char buff[64]; sprintf(buff, "%s.far", prefix); glUniform1f(ex_uniform(shader, buff), EX_SPOT_FAR_PLANE); sprintf(buff, "%s.position", prefix); glUniform3fv(ex_uniform(shader, buff), 1, l->position); sprintf(buff, "%s.color", prefix); glUniform3fv(ex_uniform(shader, buff), 1, l->color); } else { glUniform1i(ex_uniform(shader, "u_spot_active"), 1); glUniform1f(ex_uniform(shader, "u_spot_light.far"), EX_SPOT_FAR_PLANE); glUniform3fv(ex_uniform(shader, "u_spot_light.position"), 1, l->position); glUniform3fv(ex_uniform(shader, "u_spot_light.direction"), 1, l->direction); glUniform3fv(ex_uniform(shader, "u_spot_light.color"), 1, l->color); glUniform1f(ex_uniform(shader, "u_spot_light.inner"), l->inner); glUniform1f(ex_uniform(shader, "u_spot_light.outer"), l->outer); } } void ex_spot_light_destroy(ex_spot_light_t *l) { glDeleteFramebuffers(1, &l->depth_map_fbo); glDeleteTextures(1, &l->depth_map); free(l); }
1,811
3,362
package com.zendesk.maxwell.util; import org.junit.Test; import java.util.concurrent.TimeoutException; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.core.IsEqual.equalTo; public class StoppableTaskStateTest { @Test public void testStateTransition() { StoppableTaskState state = new StoppableTaskState("task"); assertThat(state.getState(), equalTo(RunState.RUNNING)); assertThat(state.isRunning(), equalTo(true)); state.requestStop(); assertThat(state.getState(), equalTo(RunState.REQUEST_STOP)); assertThat(state.isRunning(), equalTo(false)); state.stopped(); assertThat(state.getState(), equalTo(RunState.STOPPED)); assertThat(state.isRunning(), equalTo(false)); } @Test public void requestStopIsIgnoredIfAlreadyStopped() { StoppableTaskState state = new StoppableTaskState("task"); state.stopped(); state.requestStop(); assertThat(state.getState(), equalTo(RunState.STOPPED)); } @Test public void awaitSucceedsWhenAlreadyStopped() throws TimeoutException { StoppableTaskState state = new StoppableTaskState("task"); state.stopped(); state.awaitStop(null, 0L); } @Test public void awaitThrowsWhenNotStopped() { StoppableTaskState state = new StoppableTaskState("task"); TimeoutException e = null; try { state.awaitStop(null, 0L); } catch (TimeoutException _e) { e = _e; } assertThat(e, notNullValue()); assertThat(state.getState(), equalTo(RunState.RUNNING)); } }
531
839
<reponame>kimjand/cxf<filename>tools/javato/ws/src/test/java/org/apache/cxf/tools/fortest/cxf774/ListTestImpl.java<gh_stars>100-1000 /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.cxf.tools.fortest.cxf774; import java.util.List; import javax.jws.WebService; @WebService(serviceName = "SOAPService", portName = "SoapPort", endpointInterface = "org.apache.cxf.tools.fortest.cxf774.ListTest", targetNamespace = "http://cxf.apache/") public class ListTestImpl implements ListTest { public List<?> sayHi(String hi) { return null; } }
426
314
<filename>examples/src/main/java/lemongrenade/examples/adapters/MyBangAdapter.java package lemongrenade.examples.adapters; import lemongrenade.core.models.LGPayload; import lemongrenade.core.templates.LGJavaAdapter; import org.json.JSONObject; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.HashMap; import java.util.List; import java.util.UUID; /***/ public class MyBangAdapter extends LGJavaAdapter { public MyBangAdapter(String id) { super(id); } private static final Logger log = LoggerFactory.getLogger(MyBangAdapter.class); @Override public void process(LGPayload input, LGCallback callback) { List<JSONObject> requests = input.getRequestNodes(); JSONObject request = null; try { for(int i = 0; i < requests.size(); i++) { request = requests.get(i); request.put("hello_my_bang", request.get("type")); input.addResponseNode(request); JSONObject edgeMeta = new JSONObject().put("type", "edge").put("value", "connects"); JSONObject dstMeta = new JSONObject().put("type", "node1").put("value", UUID.randomUUID().toString()); JSONObject dstMeta2 = new JSONObject().put("type", "node2").put("value", UUID.randomUUID().toString()); JSONObject dstMeta3 = new JSONObject().put("type", "node3").put("value", UUID.randomUUID().toString()); JSONObject dstMeta4 = new JSONObject().put("type", "node4").put("value", UUID.randomUUID().toString()); JSONObject dstMeta5 = new JSONObject().put("type", "node5").put("value", UUID.randomUUID().toString()); JSONObject dstMeta6 = new JSONObject().put("type", "node6").put("value", UUID.randomUUID().toString()); JSONObject dstMeta7 = new JSONObject().put("type", "node4").put("value", UUID.randomUUID().toString()); JSONObject dstMeta8 = new JSONObject().put("type", "node5").put("value", UUID.randomUUID().toString()); JSONObject dstMeta9 = new JSONObject().put("type", "node6").put("value", UUID.randomUUID().toString()); // Add an edge input.addResponseEdge(request, edgeMeta, dstMeta); input.addResponseEdge(request, edgeMeta, dstMeta2); input.addResponseEdge(request, edgeMeta, dstMeta3); input.addResponseEdge(request, edgeMeta, dstMeta4); input.addResponseEdge(request, edgeMeta, dstMeta5); input.addResponseEdge(request, edgeMeta, dstMeta6); input.addResponseEdge(request, edgeMeta, dstMeta7); input.addResponseEdge(request, edgeMeta, dstMeta8); input.addResponseEdge(request, edgeMeta, dstMeta9); } log.info("MyBang Success!"); callback.emit(input); } catch (Exception e) { e.printStackTrace(); callback.fail(e); } } @Override public String getAdapterName() { return "MyBang"; } @Override public String getAdapterQuery() { return "n(type~/.*/i,value~/.*/i,type!:number)"; } @Override public HashMap<String, String> getRequiredAttributes() { HashMap<String, String> temp = new HashMap<String, String>(); temp.put("status", ".*"); return temp; } public static void main(String[] args) throws Exception { if (args.length < 1 ) { System.out.println("ERROR: Missing adapter ID"); System.exit(-1); } MyBangAdapter adapter = new MyBangAdapter(args[1]); System.out.println("AdapterName:"+adapter.getAdapterName()); System.out.println("Adapter Id :"+adapter.getAdapterId()); adapter.submitTopology(args); } }
1,629
4,569
<reponame>adouhuges/java-learning package com.brianway.learning.java.base; /** * Created by brian on 16/11/10. * * TODO 补码/反码相关知识 * https://www.cnblogs.com/zhangziqiu/archive/2011/03/30/ComputerCode.html * http://weihe6666.iteye.com/blog/1190033 * * 在计算机中,负数以原码的补码形式表达。 */ public class Binary { public static void main(String[] args) { int i = 5; int j = 10; System.out.println(i + ~j); int[] arr = new int[] {3, -2}; for (int a : arr) { //原数 System.out.println("a:" + a + " 二进制:" + Integer.toBinaryString(a)); // 按位取反 System.out.println("~a:" + ~a + " 二进制:" + Integer.toBinaryString(~a)); // 相反数 System.out.println("-a:" + -a + " 二进制:" + Integer.toBinaryString(-a)); System.out.println(-a == ~a + 1); System.out.println(~a == -a - 1); } } }
529
432
<reponame>GoogeTan/CubicChunks /* * This file is part of Cubic Chunks Mod, licensed under the MIT License (MIT). * * Copyright (c) 2015-2019 OpenCubicChunks * Copyright (c) 2015-2019 contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ package io.github.opencubicchunks.cubicchunks.core.asm.mixin.selectable.client.optifine; import io.github.opencubicchunks.cubicchunks.api.util.Coords; import io.github.opencubicchunks.cubicchunks.api.world.IColumn; import io.github.opencubicchunks.cubicchunks.api.world.ICube; import io.github.opencubicchunks.cubicchunks.api.world.ICubicWorld; import io.github.opencubicchunks.cubicchunks.api.world.IMinMaxHeight; import io.github.opencubicchunks.cubicchunks.core.CubicChunksConfig; import net.minecraft.entity.Entity; import net.minecraft.tileentity.TileEntity; import net.minecraft.util.ClassInheritanceMultiMap; import net.minecraft.util.math.BlockPos; import net.minecraft.util.math.MathHelper; import net.minecraft.world.World; import net.minecraft.world.chunk.Chunk; import net.minecraft.world.chunk.storage.ExtendedBlockStorage; import org.spongepowered.asm.mixin.Dynamic; import org.spongepowered.asm.mixin.Mixin; import org.spongepowered.asm.mixin.Pseudo; import org.spongepowered.asm.mixin.Shadow; import org.spongepowered.asm.mixin.injection.At; import org.spongepowered.asm.mixin.injection.Inject; import org.spongepowered.asm.mixin.injection.callback.CallbackInfoReturnable; import java.util.Map; import java.util.Set; @Pseudo @Mixin(targets = "net.optifine.render.ChunkVisibility") public class MixinChunkVisibility { /** * Quadrant counter */ @Dynamic @Shadow(remap = false) private static int counter = 0; /** * Current max Y for quadrants already scanned in this scan. */ @Dynamic @Shadow(remap = false) private static int iMaxStatic = -1; /** * Max Y after final test of all quadrants */ @Dynamic @Shadow(remap = false) private static int iMaxStaticFinal = Coords.blockToCube(Integer.MAX_VALUE) - 1; @Dynamic @Shadow(remap = false) private static World worldLast = null; @Dynamic @Shadow(remap = false) private static int pcxLast = -2147483648; private static int pcyLast = -2147483648; @Dynamic @Shadow(remap = false) private static int pczLast = -2147483648; @Dynamic @Inject(method = "getMaxChunkY", at = @At("HEAD"), cancellable = true, remap = false) private static void getMaxChunkYCC(World world, Entity viewEntity, int renderDistanceChunks, CallbackInfoReturnable<Integer> cbi) { if (!((ICubicWorld) world).isCubicWorld()) { return; } cbi.cancel(); if (true) { cbi.setReturnValue(Integer.MAX_VALUE - 1); return; } int pcx = MathHelper.floor(viewEntity.posX) >> 4; int pcy = MathHelper.floor(viewEntity.posY) >> 4; int pcz = MathHelper.floor(viewEntity.posZ) >> 4; Chunk playerChunk = world.getChunk(pcx, pcz); int cxStart = pcx - renderDistanceChunks; int cxEnd = pcx + renderDistanceChunks; int cyStart = pcy - CubicChunksConfig.verticalCubeLoadDistance; int cyEnd = pcy + CubicChunksConfig.verticalCubeLoadDistance; int czStart = pcz - renderDistanceChunks; int czEnd = pcz + renderDistanceChunks; if (world != worldLast || pcx != pcxLast || pcy != pcyLast || pcz != pczLast) { counter = 0; iMaxStaticFinal = Coords.blockToCube(((IMinMaxHeight) world).getMaxHeight()); worldLast = world; pcxLast = pcx; pcyLast = pcy; pczLast = pcz; } if (counter == 0) { iMaxStatic = Coords.blockToCube(Integer.MIN_VALUE) + 1; } int iMax = iMaxStatic; if ((counter & 1) == 0) { cxEnd = pcx; } else { cxStart = pcx; } if ((counter & 2) == 0) { cyEnd = pcy; } else { cyStart = pcy; } if ((counter & 4) == 0) { czEnd = pcz; } else { czStart = pcz; } for (int cx = cxStart; cx < cxEnd; ++cx) { for (int cz = czStart; cz < czEnd; ++cz) { Chunk chunk = world.getChunk(cx, cz); if (chunk.isEmpty()) { continue; } Iterable<? extends ICube> cubes = ((IColumn) chunk).getLoadedCubes(cyEnd, cyStart); for (ICube cube : cubes) { ExtendedBlockStorage ebs = cube.getStorage(); if (ebs != null && !ebs.isEmpty()) { iMax = Math.max(iMax, cube.getY()); // it's sorted, in reverse, so can break when the first one is found break; } ClassInheritanceMultiMap<Entity> cimm = cube.getEntitySet(); if (!cimm.isEmpty() && (chunk != playerChunk || cimm.size() != 1)) { iMax = Math.max(iMax, cube.getY()); break; } Map<BlockPos, TileEntity> mapTileEntities = cube.getTileEntityMap(); if (!mapTileEntities.isEmpty()) { Set<BlockPos> keys = mapTileEntities.keySet(); for (BlockPos pos : keys) { int i = pos.getY() >> 4; if (i > iMax) { iMax = i; } } } } } } if (counter < 7) { iMaxStatic = iMax; iMax = iMaxStaticFinal; } else { iMaxStaticFinal = iMax; iMaxStatic = -1; } counter = (counter + 1) % 8; cbi.setReturnValue(iMax << 4); } }
3,104
1,306
/* * Unresolved class. * * "happy" version. */ public class UnresClass { int foo; }
37
2,023
<gh_stars>1000+ #!/usr/bin/env python """ Kinect Demo using minimum values from the depth image. @Author = <NAME> @Date = 17 March, 2012 @Version = 1.1 @Filename = KinectJoystickMin.py """ from freenect import sync_get_depth as get_depth, sync_get_video as get_video, init, close_device, open_device, set_led import cv import numpy as np import pygame from math import * from numpy import mean def doloop(): #Series of commands to do pointer operations on the kinect (motor, led, accelerometer) ctx = init() #Initiates device mdev = open_device(ctx, 0) #Opens the device for commands set_led(mdev, 1) #Sets LED to green close_device(mdev #Closes device. Device must be closed immediately after usage #Mean filter caches yList = [0,0,0,0,0,0] xList = [0,0,0,0,0,0] #Sets color tuples RED = (255,0,0) BLUE = (0,0,255) TEAL = (0,200,100) BLACK = (0,0,0) #Sets the size of the screen xSize = 640 ySize = 480 done = False #Main while loop bool counter pygame.init() #Initiates pygame screen = pygame.display.set_mode((xSize, ySize), pygame.RESIZABLE) #Creates the pygame window screen.fill(BLACK) #Fills the window black #Initiates the xTempPos and yTempPos values so that the point will remain stationary #if the minimum value is larger than 600 xTempPos = xSize/2 yTempPos = ySize/2 global depth, rgb #Makes the depth and rgb variables global while not done: screen.fill(BLACK) #Makes the pygame window black after each iteration # Get a fresh frame (depth,_) = get_depth() (rgb, _) = get_video() minVal = np.min(depth) #This is the minimum value from the depth image minPos = np.argmin(depth) #This is the raw index of the minimum value above xPos = np.mod(minPos, xSize) #This is the x component of the raw index yPos = minPos//xSize #This is the y component of the raw index #This is the mean filter process """ A mean filter works by collecting values in a cache list and taking the mean of them to determine the final value. It works in this case to decrease the amount of volatility the minimum position experiences to get a smoother display with a more consistent value. My computer works smoothly with a 5 bit cache where as a faster computer may need a larger cache and a slower computer may need a smaller cache """ xList.append(xPos) del xList[0] xPos = int(mean(xList)) yList.append(yPos) del yList[0] yPos = int(mean(yList)) """ This if statement says that if the minimum value is below 600 to store the minimum positions in xTempPos and yTempPos and to make the dot color red. Also if the minimum value is larger than 600, xPos and yPos become the last stored minimum and maximum positions. It also changes the color to purple """ if minVal < 600: xTempPos = xPos yTempPos = yPos COLOR = cv.RGB(255,0,0) else: xPos = xTempPos yPos = yTempPos COLOR = cv.RGB(100,0,100) cv.Circle(rgb, (xPos, yPos), 2, COLOR, 40) #draws a circle of a certain color at minimum position cv.ShowImage('Image',rgb) #Shows the image cv.WaitKey(5) #Keyboard interupt """ The if statement below sets up the virtual joystick by basically breaking the pygame window into four parts. A dot representing the minimum position is drawn on the window and the corresponding button based on the position is "pressed". The quarter of the window in which the button "pressed" corresponds to turns teal after being "pressed" Top Right : A Bottom Right: B Bottom Left : Y Top Right : X """ if xPos <= xSize/2 and yPos <= ySize/2: command = 'A' rect1 = pygame.Rect((xSize/2,0),(xSize/2,ySize/2)) pygame.draw.rect(screen,TEAL,rect1) elif xPos <= xSize/2 and yPos > ySize/2: command = 'B' rect1 = pygame.Rect((xSize/2,ySize/2),(xSize/2,ySize/2)) pygame.draw.rect(screen,TEAL,rect1) elif xPos > xSize/2 and yPos <= ySize/2: command = 'X' rect1 = pygame.Rect((0,0),(xSize/2,ySize/2)) pygame.draw.rect(screen,TEAL,rect1) else: command = 'Y' rect1 = pygame.Rect((0,ySize/2),(xSize/2,ySize/2)) pygame.draw.rect(screen,TEAL,rect1) pygame.draw.line(screen, BLUE, (xSize/2, ySize/2), (xSize - xPos,yPos)) #Draws a line from the middle to the minimum position pygame.draw.circle(screen, RED, (xSize - xPos,yPos), 10) #Draws the circle on pygame window pygame.display.flip() #Displays the processed pygame window print command, minVal #Prints the "pressed" button and the minimum value for e in pygame.event.get(): #Itertates through current events if e.type is pygame.QUIT: #If the close button is pressed, the while loop ends done = True doloop()
2,279
7,883
// Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma // de Barcelona (UAB). // // This work is licensed under the terms of the MIT license. // For a copy, see <https://opensource.org/licenses/MIT>. #include "carla/client/DebugHelper.h" #include "carla/client/detail/Simulator.h" #include "carla/rpc/DebugShape.h" namespace carla { namespace client { using Shape = rpc::DebugShape; template <typename T> static void DrawShape( detail::EpisodeProxy &episode, const T &primitive, rpc::Color color, float life_time, bool persistent_lines) { const Shape shape{primitive, color, life_time, persistent_lines}; episode.Lock()->DrawDebugShape(shape); } void DebugHelper::DrawPoint( const geom::Location &location, float size, sensor::data::Color color, float life_time, bool persistent_lines) { Shape::Point point{location, size}; DrawShape(_episode, point, color, life_time, persistent_lines); } void DebugHelper::DrawLine( const geom::Location &begin, const geom::Location &end, float thickness, sensor::data::Color color, float life_time, bool persistent_lines) { Shape::Line line{begin, end, thickness}; DrawShape(_episode, line, color, life_time, persistent_lines); } void DebugHelper::DrawArrow( const geom::Location &begin, const geom::Location &end, float thickness, float arrow_size, sensor::data::Color color, float life_time, bool persistent_lines) { Shape::Line line{begin, end, thickness}; Shape::Arrow arrow{line, arrow_size}; DrawShape(_episode, arrow, color, life_time, persistent_lines); } void DebugHelper::DrawBox( const geom::BoundingBox &box, const geom::Rotation &rotation, float thickness, sensor::data::Color color, float life_time, bool persistent_lines) { Shape::Box the_box{box, rotation, thickness}; DrawShape(_episode, the_box, color, life_time, persistent_lines); } void DebugHelper::DrawString( const geom::Location &location, const std::string &text, bool draw_shadow, sensor::data::Color color, float life_time, bool persistent_lines) { Shape::String string{location, text, draw_shadow}; DrawShape(_episode, string, color, life_time, persistent_lines); } } // namespace client } // namespace carla
891
890
<reponame>shahzadlone/vireo /* * MIT License * * Copyright (c) 2017 Twitter * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "fdk-aac/FDK_audio.h" #include "fdk-aac/aacdecoder_lib.h" #include "vireo/base_cpp.h" #include "vireo/constants.h" #include "vireo/common/security.h" #include "vireo/error/error.h" #include "vireo/internal/decode/aac.h" #include "vireo/sound/pcm.h" #include "vireo/util/util.h" namespace vireo { namespace internal{ namespace decode { struct _AAC { // size of 0x10000 = 65536 is based on this comment in fdk-aac/libMpegTPDec/src/tpdec_lib.cpp /* For packet based transport, pass input buffer to bitbuffer without copying the data. Unfortunately we do not know the actual buffer size. And the FDK bit buffer implementation needs a number 2^x. So we assume the maximum of 48 channels with 6144 bits per channel and round it up to the next power of 2 => 65536 bytes FDKinitBitStream(hBs, pBuffer, 0x10000, (*pBytesValid)<<3, BS_READER); */ // note that numeric_limits<uint16_t>::max(); is 65535 constexpr static const uint32_t kMaxBufferSize = 0x10000; // 65536 unique_ptr<AAC_DECODER_INSTANCE, function<void(AAC_DECODER_INSTANCE*)>> decoder = { nullptr, [](AAC_DECODER_INSTANCE* p) { if(p != nullptr) { aacDecoder_Close(p); } }}; // This scratch buffer is used to stage the data before it gets passed to the decoder. // It is needed because fdk-aac internal code sometimes reads up to 4 extra bytes past the end of the bitstream. common::Data32 scratch_buffer = { (uint8_t*)calloc(kMaxBufferSize, sizeof(uint8_t)), kMaxBufferSize, [](uint8_t* p) { free(p); } }; common::Sample16 decoded_sample = { (int16_t*)calloc(kMaxBufferSize, sizeof(uint16_t)), kMaxBufferSize, [](int16_t* p) { free(p); } }; settings::Audio audio_settings; functional::Audio<Sample> samples; int64_t last_index = -1; _AAC(const settings::Audio& audio_settings) : audio_settings(audio_settings) { THROW_IF(audio_settings.channels != 1 && audio_settings.channels != 2, Unsupported); THROW_IF(audio_settings.codec != settings::Audio::Codec::AAC_LC && audio_settings.codec != settings::Audio::Codec::AAC_LC_SBR, Unsupported); THROW_IF(find(kSampleRate.begin(), kSampleRate.end(), audio_settings.sample_rate) == kSampleRate.end(), Unsupported); } void init() { decoder.reset(aacDecoder_Open(TT_MP4_RAW, 1)); CHECK(decoder); const auto extradata = audio_settings.as_extradata(settings::Audio::ExtraDataType::aac); // Copy to a padded size buffer to prevent uninitialized reads. const uint16_t padded_size = extradata.count() + 4; common::Data16 padded_buffer = { (uint8_t*)calloc(padded_size, sizeof(uint8_t)), padded_size, [](uint8_t* p) { free(p); } }; padded_buffer.copy(extradata); const UINT size = extradata.count(); CHECK(aacDecoder_ConfigRaw(decoder.get(), (UCHAR**)util::get_addr(padded_buffer.data()), &size) == AAC_DEC_OK); CHECK(aacDecoder_SetParam(decoder.get(), AAC_CONCEAL_METHOD, 1) == AAC_DEC_OK); } void reset() { last_index = -1; init(); } }; AAC::AAC(const functional::Audio<Sample>& track) : functional::DirectAudio<AAC, sound::Sound>(track.a(), track.b()), _this(new _AAC(track.settings())) { THROW_IF(track.count() >= security::kMaxSampleCount, Unsafe); _settings = (settings::Audio) { settings::Audio::Codec::Unknown, track.settings().timescale, track.settings().sample_rate, track.settings().channels, // TODO: Fix channels to take SBR into account 0, }; _this->init(); _this->samples = track; } AAC::AAC(const AAC& aac) : functional::DirectAudio<AAC, sound::Sound>(aac.a(), aac.b(), aac.settings()), _this(aac._this) { } auto AAC::operator()(uint32_t index) const -> sound::Sound { THROW_IF(index >= count(), OutOfRange); THROW_IF(index >= _this->samples.count(), OutOfRange); const Sample& sample = _this->samples(index); sound::Sound sound; sound.pts = sample.pts; struct audio_info { uint16_t frame_size; uint8_t channels; }; sound.pcm = [_this = _this, index]() -> sound::PCM { auto decode_sample = [&_this](uint32_t index) -> audio_info { const Sample& sample = _this->samples(index); const auto sample_data = sample.nal(); THROW_IF(sample_data.count() + 4 > _AAC::kMaxBufferSize, Unsafe); _this->scratch_buffer.copy(sample_data); const UINT size = _this->scratch_buffer.count(); UINT valid_bytes_left = size; CHECK(aacDecoder_Fill(_this->decoder.get(), (UCHAR**)util::get_addr(_this->scratch_buffer.data()), &size, &valid_bytes_left) == AAC_DEC_OK); CHECK(valid_bytes_left == 0); THROW_IF(aacDecoder_DecodeFrame(_this->decoder.get(), (INT_PCM*)_this->decoded_sample.data(), _this->decoded_sample.capacity(), 0) != AAC_DEC_OK, Invalid); CStreamInfo* stream_info = aacDecoder_GetStreamInfo(_this->decoder.get()); const uint8_t audio_object_type = stream_info->aot; const uint8_t extension_object_type = stream_info->extAot; const uint16_t frame_size = stream_info->frameSize; const uint8_t channels = stream_info->numChannels; const uint32_t sample_rate = stream_info->sampleRate; // MPEG-4 AAC Low Complexity only with optional SBR supported THROW_IF(audio_object_type != 2, Unsupported); if (extension_object_type != 5) { // AAC-LC CHECK(frame_size == AUDIO_FRAME_SIZE); CHECK(channels == _this->audio_settings.channels); CHECK(sample_rate == _this->audio_settings.sample_rate); } else { // AAC-LC SBR CHECK(frame_size == AUDIO_FRAME_SIZE * SBR_FACTOR); // CHECK(channels == _this->audio_settings.channels * SBR_FACTOR); CHECK(sample_rate == _this->audio_settings.sample_rate * SBR_FACTOR); } const uint16_t decoded_size = frame_size * channels; _this->decoded_sample.set_bounds(0, decoded_size); return {frame_size, channels}; }; if (index - _this->last_index != 1) { _this->reset(); if (index > 0) { decode_sample(index - 1); // There is 1 sample dependency between samples } } const auto info = decode_sample(index); common::Sample16 decoded_sample_copy(_this->decoded_sample); _this->last_index = index; sound::PCM pcm(info.frame_size, info.channels, move(decoded_sample_copy)); if (pcm.size() == AUDIO_FRAME_SIZE * SBR_FACTOR) { return pcm.downsample(2); } else { return move(pcm); } }; return sound; } }}}
2,948
1,382
<reponame>cropinghigh/pipewire /* PipeWire * * Copyright © 2018 <NAME> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef PIPEWIRE_LINK_H #define PIPEWIRE_LINK_H #ifdef __cplusplus extern "C" { #endif #include <spa/utils/defs.h> #include <spa/utils/hook.h> #include <pipewire/proxy.h> /** \defgroup pw_link Link * * A link is the connection between 2 nodes (\ref pw_node). Nodes are * linked together on ports. * * The link is responsible for negotiating the format and buffers for * the nodes. * */ /** * \addtogroup pw_link * \{ */ #define PW_TYPE_INTERFACE_Link PW_TYPE_INFO_INTERFACE_BASE "Link" #define PW_VERSION_LINK 3 struct pw_link; /** \enum pw_link_state The different link states */ enum pw_link_state { PW_LINK_STATE_ERROR = -2, /**< the link is in error */ PW_LINK_STATE_UNLINKED = -1, /**< the link is unlinked */ PW_LINK_STATE_INIT = 0, /**< the link is initialized */ PW_LINK_STATE_NEGOTIATING = 1, /**< the link is negotiating formats */ PW_LINK_STATE_ALLOCATING = 2, /**< the link is allocating buffers */ PW_LINK_STATE_PAUSED = 3, /**< the link is paused */ PW_LINK_STATE_ACTIVE = 4, /**< the link is active */ }; /** Convert a \ref pw_link_state to a readable string */ const char * pw_link_state_as_string(enum pw_link_state state); /** The link information. Extra information can be added in later versions */ struct pw_link_info { uint32_t id; /**< id of the global */ uint32_t output_node_id; /**< server side output node id */ uint32_t output_port_id; /**< output port id */ uint32_t input_node_id; /**< server side input node id */ uint32_t input_port_id; /**< input port id */ #define PW_LINK_CHANGE_MASK_STATE (1 << 0) #define PW_LINK_CHANGE_MASK_FORMAT (1 << 1) #define PW_LINK_CHANGE_MASK_PROPS (1 << 2) #define PW_LINK_CHANGE_MASK_ALL ((1 << 3)-1) uint64_t change_mask; /**< bitfield of changed fields since last call */ enum pw_link_state state; /**< the current state of the link */ const char *error; /**< an error reason if \a state is error */ struct spa_pod *format; /**< format over link */ struct spa_dict *props; /**< the properties of the link */ }; struct pw_link_info * pw_link_info_update(struct pw_link_info *info, const struct pw_link_info *update); struct pw_link_info * pw_link_info_merge(struct pw_link_info *info, const struct pw_link_info *update, bool reset); void pw_link_info_free(struct pw_link_info *info); #define PW_LINK_EVENT_INFO 0 #define PW_LINK_EVENT_NUM 1 /** Link events */ struct pw_link_events { #define PW_VERSION_LINK_EVENTS 0 uint32_t version; /** * Notify link info * * \param info info about the link */ void (*info) (void *object, const struct pw_link_info *info); }; #define PW_LINK_METHOD_ADD_LISTENER 0 #define PW_LINK_METHOD_NUM 1 /** Link methods */ struct pw_link_methods { #define PW_VERSION_LINK_METHODS 0 uint32_t version; int (*add_listener) (void *object, struct spa_hook *listener, const struct pw_link_events *events, void *data); }; #define pw_link_method(o,method,version,...) \ ({ \ int _res = -ENOTSUP; \ spa_interface_call_res((struct spa_interface*)o, \ struct pw_link_methods, _res, \ method, version, ##__VA_ARGS__); \ _res; \ }) #define pw_link_add_listener(c,...) pw_link_method(c,add_listener,0,__VA_ARGS__) /** * \} */ #ifdef __cplusplus } /* extern "C" */ #endif #endif /* PIPEWIRE_LINK_H */
1,663
677
#include "library_loader/lib_manager.h" #include "library_loader/lib_handle.h" namespace elf { ElfLibraryManager::ElfLibraryManager(){} ElfLibraryManager::~ElfLibraryManager() { Clear(); } void ElfLibraryManager::Add(ElfLibHandle* handle) { LibIterator iter = active_libs_.find(handle->GetName()); if (iter == active_libs_.end()) { active_libs_.insert(std::make_pair(handle->GetName(), handle)); } std::vector<ElfLibHandle*>* libs = handle->depend_libs(); if(!libs->empty()) { for(int i = 0; i < libs->size(); ++i) { ElfLibHandle* lib = (*libs)[i]; Add(lib); } } handle->AddRef(); } void ElfLibraryManager::Release(ElfLibHandle* handle) { std::vector<ElfLibHandle*>* libs = handle->depend_libs(); if(!libs->empty()) { for(int i = 0; i < libs->size(); ++i) { ElfLibHandle* lib = (*libs)[i]; Release(lib); } } handle->Release(); } void ElfLibraryManager::Delete(ElfLibHandle* handle) { LibIterator iter = active_libs_.find(handle->GetName()); if (iter == active_libs_.end()) return; active_libs_.erase(iter); } ElfLibHandle* ElfLibraryManager::Get(const std::string& name) { LibIterator iter = active_libs_.find(name); if (iter == active_libs_.end()) return nullptr; return iter->second; } void ElfLibraryManager::Clear() { std::vector<ElfLibHandle*> collector; for(LibIterator iter = active_libs_.begin(); iter != active_libs_.end(); ++iter) { collector.push_back(iter->second); } active_libs_.clear(); for(int i = 0; i < collector.size(); ++i) { delete collector[i]; } } } // namespace elf
605
634
<filename>modules/base/platform-api/src/main/java/consulo/ide/eap/EarlyAccessProgramManager.java /* * Copyright 2013-2016 consulo.io * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package consulo.ide.eap; import com.intellij.openapi.components.PersistentStateComponent; import com.intellij.openapi.components.ServiceManager; import com.intellij.openapi.components.State; import com.intellij.openapi.components.Storage; import consulo.logging.Logger; import jakarta.inject.Inject; import jakarta.inject.Singleton; import org.jdom.Element; import javax.annotation.Nonnull; import javax.annotation.Nullable; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.Map; /** * @author VISTALL * @since 17:11/15.10.13 */ @Singleton @State(name = "EarlyAccessProgramManager", storages = @Storage("eap.xml")) public class EarlyAccessProgramManager implements PersistentStateComponent<Element> { @Nonnull public static EarlyAccessProgramManager getInstance() { return ServiceManager.getService(EarlyAccessProgramManager.class); } public static boolean is(@Nonnull Class<? extends EarlyAccessProgramDescriptor> key) { return getInstance().getState(key); } private static final Logger LOG = Logger.getInstance(EarlyAccessProgramManager.class); private final Map<Class<? extends EarlyAccessProgramDescriptor>, Boolean> myStates = new LinkedHashMap<>(); @Inject public EarlyAccessProgramManager() { } public boolean getState(@Nonnull Class<? extends EarlyAccessProgramDescriptor> key) { Boolean value = myStates.get(key); if (value == null) { EarlyAccessProgramDescriptor extension = EarlyAccessProgramDescriptor.EP_NAME.findExtensionOrFail(key); return extension.getDefaultState(); } return value; } public void setState(Class<? extends EarlyAccessProgramDescriptor> key, boolean itemSelected) { EarlyAccessProgramDescriptor extension = EarlyAccessProgramDescriptor.EP_NAME.findExtensionOrFail(key); if(extension.getDefaultState() == itemSelected) { myStates.remove(key); } else { myStates.put(key, itemSelected); } } @Nullable @Override public Element getState() { Element element = new Element("state"); for (Map.Entry<Class<? extends EarlyAccessProgramDescriptor>, Boolean> entry : myStates.entrySet()) { EarlyAccessProgramDescriptor extension = EarlyAccessProgramDescriptor.EP_NAME.findExtension(entry.getKey()); if (extension.getDefaultState() == entry.getValue()) { continue; } Element child = new Element("state"); child.setAttribute("class", entry.getKey().getName()); child.setAttribute("value", String.valueOf(entry.getValue())); element.addContent(child); } return element; } @Override public void loadState(Element state) { myStates.clear(); Map<String, EarlyAccessProgramDescriptor> map = descriptorToMap(); for (Element element : state.getChildren()) { String aClass = element.getAttributeValue("class"); EarlyAccessProgramDescriptor descriptor = map.get(aClass); if (descriptor == null) { continue; } Boolean value = Boolean.parseBoolean(element.getAttributeValue("value")); myStates.put(descriptor.getClass(), value); } } private static Map<String, EarlyAccessProgramDescriptor> descriptorToMap() { Map<String, EarlyAccessProgramDescriptor> map = new HashMap<>(); for (EarlyAccessProgramDescriptor descriptor : EarlyAccessProgramDescriptor.EP_NAME.getExtensionList()) { map.put(descriptor.getClass().getName(), descriptor); } return map; } }
1,328
5,908
package org.testcontainers.containers; import org.testcontainers.utility.DockerImageName; /** * Factory for MS SQL Server containers. */ public class MSSQLServerContainerProvider extends JdbcDatabaseContainerProvider { @Override public boolean supports(String databaseType) { return databaseType.equals(MSSQLServerContainer.NAME); } @Override public JdbcDatabaseContainer newInstance() { return newInstance(MSSQLServerContainer.DEFAULT_TAG); } @Override public JdbcDatabaseContainer newInstance(String tag) { return new MSSQLServerContainer(DockerImageName.parse(MSSQLServerContainer.IMAGE).withTag(tag)); } }
226
476
<gh_stars>100-1000 /* * Wizer interface for Wasm module to be initialized. * * This header provides several macros that allow a Wasm module written in C/C++ * to declare an initializer function, and ensure that global constructors (in * C++'s case) are run at initialization time rather than on startup of the * pre-initialized module. */ #ifndef _WIZER_H_ #define _WIZER_H_ #ifdef __cplusplus #define __WIZER_EXTERN_C extern "C" #else #define __WIZER_EXTERN_C extern #endif /* * This macro inserts the exported functions necessary to allow Wizer to * pre-initialize a Wasm module. * * To use, simply invoke the macro in exactly one compilation unit (C/C++ file) * that is compiled into the Wasm module to be pre-initialized: * * static void my_init_function() { ... } * * WIZER_INIT(my_init_function); * * (The macro refers to the provided init function, so it must have been defined * or must have a forward declaration at the point the macro is used.) * * The resulting module should be processed by Wizer as follows: * * $ wizer -r _start=wizer.resume -o out.wasm in.wasm * * The result of this will be the following behavior: * * - If the `in.wasm` (the direct compilation output of a program including this * macro invocation) is run directly according to the WASI ABI (i.e., by * invoking `_start`), then nothing changes: global constructors are run, * `main()` is invoked, then global destructors are run. The initialization * function is *not* run in this case. * * - During pre-initialization (i.e., during this `wizer` invocation), global * constructors will run, and then the provided initialization function will * run. The module's memory and global-variable state is then snapshotted and * saved into `out.wasm`. * * All other Wizer restrictions apply (see Wizer documentation for details): * for example, WASI hostcalls may be blocked, depending on options, and * invoking any other imported function will result in an immediate trap * and failure of the Wizer run. * * - If the resulting `out.wasm` is then run using the WASI ABI, the program's * global constructors are *not* re-run. Instead, execution starts directly at * `main()`, using the heap and global-variable state left by the global * constructor and initialization function execution during the Wizer * invocation. * * If no initialization function is needed (i.e., only C++ global constructors * should be run), use `WIZER_DEFAULT_INIT()` instead. */ #define WIZER_INIT(init_func) \ __WIZER_EXTERN_C void __wasm_call_ctors(); \ __WIZER_EXTERN_C void __wasm_call_dtors(); \ __WIZER_EXTERN_C int __original_main(); \ /* This function's export name `wizer.initialize` is specially */ \ /* recognized by Wizer. It is the direct entry point for pre-init. */ \ __attribute__((export_name("wizer.initialize"))) void \ __wizer_initialize() { \ /* `__wasm_call_ctors()` is generated by `wasm-ld` and invokes all */ \ /* of the global constructors. It is safe (and in fact necessary) */ \ /* to manually invoke it here because `wizer.initialize` is the */ \ /* direct entry point, and no libc startup (crt1.o or equivalent) */ \ /* is executed before this code does. */ \ __wasm_call_ctors(); \ /* We now invoke the provided init function before returning. */ \ init_func(); \ } \ /* This function replaces `_start` (the WASI-specified entry point) in */ \ /* the pre-initialized Wasm module. */ \ __attribute__((export_name("wizer.resume"))) void __wizer_resume() { \ /* `__original_main()` is defined by the WASI SDK toolchain due to */ \ /* special semantics in C/C++ for the `main()` function, i.e., ito */ \ /* can either take argc/argv or not. It collects arguments using */ \ /* the appropriate WASI calls and then invokes the user program's */ \ /* `main()`. This may change in the future; when it does, we will */ \ /* coordinate with the WASI-SDK toolchain to implement this entry */ \ /* point in an alternate way. */ \ __original_main(); \ /* Because we are replacing `_start()`, we need to manually invoke */ \ /* destructors as well. */ \ __wasm_call_dtors(); \ } /* * This macro is like `WIZER_INIT()`, but takes no initialization function. * Instead, the pre-initialization phase only executes C++ global constructors * before snapshotting the module state. * * See documentation for `WIZER_INIT()` for more details and usage instructions. */ #define WIZER_DEFAULT_INIT() \ static void __empty_init() {} \ WIZER_INIT(__empty_init) #endif // _WIZER_H_
2,247
5,607
<reponame>kfowler/micronaut-core<filename>http-server-netty/src/main/java/io/micronaut/http/server/netty/handler/accesslog/HttpAccessLogHandler.java /* * Copyright 2017-2020 original authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.micronaut.http.server.netty.handler.accesslog; import io.micronaut.http.server.netty.handler.accesslog.element.AccessLog; import io.micronaut.http.server.netty.handler.accesslog.element.AccessLogFormatParser; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufHolder; import io.netty.channel.ChannelDuplexHandler; import io.netty.channel.ChannelHandler.Sharable; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelPromise; import io.netty.channel.socket.SocketChannel; import io.netty.handler.codec.http.HttpHeaders; import io.netty.handler.codec.http.HttpRequest; import io.netty.handler.codec.http.HttpResponse; import io.netty.handler.codec.http.HttpResponseStatus; import io.netty.handler.codec.http.LastHttpContent; import io.netty.handler.codec.http2.Http2Exception; import io.netty.handler.codec.http2.HttpConversionUtil.ExtensionHeaderNames; import io.netty.util.Attribute; import io.netty.util.AttributeKey; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Logging handler for HTTP access logs. * Access logs will be logged at info level. * * @author croudet * @since 2.0 */ @Sharable public class HttpAccessLogHandler extends ChannelDuplexHandler { /** * The default logger name. */ public static final String HTTP_ACCESS_LOGGER = "HTTP_ACCESS_LOGGER"; private static final AttributeKey<AccessLog> ACCESS_LOGGER = AttributeKey.valueOf("ACCESS_LOGGER"); private static final String H2_PROTOCOL_NAME = "HTTP/2.0"; private final Logger logger; private final AccessLogFormatParser accessLogFormatParser; /** * Creates a HttpAccessLogHandler. * * @param loggerName A logger name. * @param spec The log format specification. */ public HttpAccessLogHandler(String loggerName, String spec) { this(loggerName == null || loggerName.isEmpty() ? null : LoggerFactory.getLogger(loggerName), spec); } /** * Creates a HttpAccessLogHandler. * * @param logger A logger. Will log at info level. * @param spec The log format specification. */ public HttpAccessLogHandler(Logger logger, String spec) { super(); this.logger = logger == null ? LoggerFactory.getLogger(HTTP_ACCESS_LOGGER) : logger; this.accessLogFormatParser = new AccessLogFormatParser(spec); } @Override public void channelRead(ChannelHandlerContext ctx, Object msg) throws Http2Exception { if (logger.isInfoEnabled() && msg instanceof HttpRequest) { final SocketChannel channel = (SocketChannel) ctx.channel(); final HttpRequest request = (HttpRequest) msg; final HttpHeaders headers = request.headers(); // Trying to detect http/2 String protocol; if (headers.contains(ExtensionHeaderNames.STREAM_ID.text()) || headers.contains(ExtensionHeaderNames.SCHEME.text())) { protocol = H2_PROTOCOL_NAME; } else { protocol = request.protocolVersion().text(); } accessLog(channel).onRequestHeaders(channel, request.method().name(), request.headers(), request.uri(), protocol); } ctx.fireChannelRead(msg); } @Override public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception { if (logger.isInfoEnabled()) { processWriteEvent(ctx, msg, promise); } else { super.write(ctx, msg, promise); } } private AccessLog accessLog(SocketChannel channel) { final Attribute<AccessLog> attr = channel.attr(ACCESS_LOGGER); AccessLog accessLog = attr.get(); if (accessLog == null) { accessLog = accessLogFormatParser.newAccessLogger(); attr.set(accessLog); } else { accessLog.reset(); } return accessLog; } private void log(ChannelHandlerContext ctx, Object msg, ChannelPromise promise, AccessLog accessLog) { ctx.write(msg, promise.unvoid()).addListener(future -> { if (future.isSuccess()) { accessLog.log(logger); } }); } private static boolean processHttpResponse(HttpResponse response, AccessLog accessLogger, ChannelHandlerContext ctx, ChannelPromise promise) { final HttpResponseStatus status = response.status(); if (status.equals(HttpResponseStatus.CONTINUE)) { ctx.write(response, promise); return true; } accessLogger.onResponseHeaders(ctx, response.headers(), status.codeAsText().toString()); return false; } private void processWriteEvent(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception { final AccessLog accessLogger = ctx.channel().attr(ACCESS_LOGGER).get(); if (accessLogger != null) { if (msg instanceof HttpResponse && processHttpResponse((HttpResponse) msg, accessLogger, ctx, promise)) { return; } if (msg instanceof LastHttpContent) { accessLogger.onLastResponseWrite(((LastHttpContent) msg).content().readableBytes()); log(ctx, msg, promise, accessLogger); return; } else if (msg instanceof ByteBufHolder) { accessLogger.onResponseWrite(((ByteBufHolder) msg).content().readableBytes()); } else if (msg instanceof ByteBuf) { accessLogger.onResponseWrite(((ByteBuf) msg).readableBytes()); } } super.write(ctx, msg, promise); } }
2,403
716
<gh_stars>100-1000 // // Copyright (c) 2015-2020 CNRS INRIA // Copyright (c) 2015 Wandercraft, 86 rue de Paris 91400 Orsay, France. // #include "pinocchio/math/fwd.hpp" #include "pinocchio/multibody/joint/joints.hpp" #include "pinocchio/algorithm/rnea.hpp" #include "pinocchio/algorithm/aba.hpp" #include "pinocchio/algorithm/crba.hpp" #include "pinocchio/algorithm/jacobian.hpp" #include "pinocchio/algorithm/compute-all-terms.hpp" #include <boost/test/unit_test.hpp> #include <iostream> using namespace pinocchio; BOOST_AUTO_TEST_SUITE(JointFreeFlyer) BOOST_AUTO_TEST_CASE(spatial) { Motion v(Motion::Random()); ConstraintIdentityTpl<double,0> constraint; Motion Sv = constraint * v.toVector(); BOOST_CHECK(Sv == v); } BOOST_AUTO_TEST_SUITE_END()
329
391
<gh_stars>100-1000 # # Copyright (C) 2020 IBM. All Rights Reserved. # # See LICENSE.txt file in the root directory # of this source tree for licensing information. # import unittest from clai.server.command_message import State, NOOP_COMMAND from clai.server.plugins.nlc2cmd.nlc2cmd import NLC2CMD class NLC2CMDCloudTest(unittest.TestCase): @classmethod def set_up_class(cls): cls.state = State(user_name='tester', command_id='0', command="show me the list of cloud tags", result_code='0') cls.agent = NLC2CMD() # Checking login command @unittest.skip("Local dev testing only") def test_get_next_action_cloud_login(self): self.state.command = "how do i login" action = self.agent.get_next_action(state=self.state) print("Input: {}".format(self.state.command)) print("---------------------------") print("Explanation: {}".format(action.description)) self.assertEqual(NOOP_COMMAND, action.suggested_command) self.assertEqual("\x1b[95mTry >> ibmcloud login\x1b[0m", action.description) print("===========================") # Checking help command @unittest.skip("Local dev testing only") def test_get_next_action_cloud_help(self): self.state.command = "help me" action = self.agent.get_next_action(state=self.state) print("Input: {}".format(self.state.command)) print("---------------------------") print("Explanation: {}".format(action.description)) self.assertEqual(NOOP_COMMAND, action.suggested_command) self.assertEqual("\x1b[95mTry >> ibmcloud help COMMAND\x1b[0m", action.description) print("===========================") # Checking invite command @unittest.skip("Local dev testing only") def test_get_next_action_cloud_invite(self): self.state.command = "I want to invite someone to my cloud" action = self.agent.get_next_action(state=self.state) print("Input: {}".format(self.state.command)) print("---------------------------") print("Explanation: {}".format(action.description)) self.assertEqual(NOOP_COMMAND, action.suggested_command) self.assertEqual("\x1b[95mTry >> ibmcloud account user-invite USER_EMAIL\x1b[0m", action.description) print("===========================")
908
5,169
<filename>Specs/a/4/f/OsamaRabieFirstFrameWork/1.0.15/OsamaRabieFirstFrameWork.podspec.json { "name": "OsamaRabieFirstFrameWork", "version": "1.0.15", "summary": "My First Pod OsamaRabieFirstFrameWork.", "description": "If this worked, I will be doing much more pods to help everyone out.", "homepage": "https://github.com/osamagucCitySoc/OsamaRabieFirstFrameWork", "license": "MIT", "authors": { "<NAME>": "<EMAIL>" }, "platforms": { "ios": "11.0" }, "swift_versions": "4.0", "source": { "git": "https://github.com/osamagucCitySoc/OsamaRabieFirstFrameWork.git", "tag": "1.0.15" }, "source_files": [ "OsamaRabieFirstFrameWork", "OsamaRabieFirstFrameWork/**/*.{h,m,swift}" ], "resource_bundles": { "OsamaRabieFirstFrameWork": [ "OsamaRabieFirstFrameWork/**/*.{storyboard,xib}" ] }, "swift_version": "4.0" }
379
605
// RUN: %clang_cc1 -std=gnu99 -fsyntax-only -pedantic -verify %s // RUN: %clang_cc1 -std=gnu99 -fsyntax-only -Wgnu -Wc11-extensions -verify %s // REQUIRES: LP64 extern int foof() = 1; // expected-error{{illegal initializer (only variables can be initialized)}} static int x, y, z; static int ary[] = { x, y, z }; // expected-error{{initializer element is not a compile-time constant}} int ary2[] = { x, y, z }; // expected-error{{initializer element is not a compile-time constant}} extern int fileScopeExtern[3] = { 1, 3, 5 }; // expected-warning{{'extern' variable has an initializer}} static long ary3[] = { 1, "abc", 3, 4 }; // expected-warning{{incompatible pointer to integer conversion initializing 'long' with an expression of type 'char[4]'}} void func() { int x = 1; typedef int TInt = 1; // expected-error{{illegal initializer (only variables can be initialized)}} int xComputeSize[] = { 1, 3, 5 }; int x3[x] = { 1, 2 }; // expected-error{{variable-sized object may not be initialized}} int x4 = { 1, 2 }; // expected-warning{{excess elements in scalar initializer}} int y[4][3] = { { 1, 3, 5 }, { 2, 4, 6 }, { 3, 5, 7 }, }; int y2[4][3] = { 1, 3, 5, 2, 4, 6, 3, 5, 7 }; int y3[4][3] = { { 1, 3, 5 }, { 2, 4, 6 }, { 3, 5, 7 }, { 4, 6, 8 }, { 5 }, // expected-warning{{excess elements in array initializer}} }; struct threeElements { int a,b,c; } z = { 1 }; struct threeElements *p = 7; // expected-warning{{incompatible integer to pointer conversion initializing 'struct threeElements *' with an expression of type 'int'}} extern int blockScopeExtern[3] = { 1, 3, 5 }; // expected-error{{'extern' variable cannot have an initializer}} static long x2[3] = { 1.0, "abc", // expected-warning{{incompatible pointer to integer conversion initializing 'long' with an expression of type 'char[4]'}} 5.8 }; // expected-warning {{implicit conversion from 'double' to 'long' changes value from 5.8 to 5}} } void test() { int y1[3] = { { 1, 2, 3 } // expected-warning{{excess elements in scalar initializer}} }; int y3[4][3] = { { 1, 3, 5 }, { 2, 4, 6 }, { 3, 5, 7 }, { 4, 6, 8 }, { }, // expected-warning{{use of GNU empty initializer extension}} expected-warning{{excess elements in array initializer}} }; int y4[4][3] = { { 1, 3, 5, 2 }, // expected-warning{{excess elements in array initializer}} { 4, 6 }, { 3, 5, 7 }, { 4, 6, 8 }, }; } void allLegalAndSynonymous() { short q[4][3][2] = { { 1 }, { 2, 3 }, { 4, 5, 6 } }; short q2[4][3][2] = { { 1, 0, 0, 0, 0, 0 }, { 2, 3, 0, 0, 0, 0 }, { 4, 5, 6 } }; short q3[4][3][2] = { { { 1 }, }, { { 2, 3 }, }, { { 4, 5 }, { 6 }, }, }; } void legal() { short q[][3][2] = { { 1 }, { 2, 3 }, { 4, 5, 6 } }; int q_sizecheck[(sizeof(q) / sizeof(short [3][2])) == 3? 1 : -1]; } unsigned char asso_values[] = { 34 }; int legal2() { return asso_values[0]; } void illegal() { short q2[4][][2] = { // expected-error{{array has incomplete element type 'short[][2]'}} { 1, 0, 0, 0, 0, 0 }, { 2, 3, 0, 0, 0, 0 }, { 4, 5, 6 } }; short q3[4][3][] = { // expected-error{{array has incomplete element type 'short[]'}} { { 1 }, }, { { 2, 3 }, }, { { 4, 5 }, { 6 }, }, }; int a[][] = { 1, 2 }; // expected-error{{array has incomplete element type 'int[]'}} } typedef int AryT[]; void testTypedef() { AryT a = { 1, 2 }, b = { 3, 4, 5 }; int a_sizecheck[(sizeof(a) / sizeof(int)) == 2? 1 : -1]; int b_sizecheck[(sizeof(b) / sizeof(int)) == 3? 1 : -1]; } static char const xx[] = "test"; int xx_sizecheck[(sizeof(xx) / sizeof(char)) == 5? 1 : -1]; static char const yy[5] = "test"; static char const zz[3] = "test"; // expected-warning{{initializer-string for char array is too long}} #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wexcess-initializers" static char const zz_quiet[3] = "test"; #pragma clang diagnostic pop void charArrays() { static char const test[] = "test"; int test_sizecheck[(sizeof(test) / sizeof(char)) == 5? 1 : -1]; static char const test2[] = { "weird stuff" }; static char const test3[] = { "test", "excess stuff" }; // expected-warning{{excess elements in char array initializer}} #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wexcess-initializers" static char const test3_quiet[] = {"test", "excess stuff"}; #pragma clang diagnostic pop char* cp[] = { "Hello" }; char c[] = { "Hello" }; int l[sizeof(c) == 6 ? 1 : -1]; int i[] = { "Hello "}; // expected-warning{{incompatible pointer to integer conversion initializing 'int' with an expression of type 'char[7]'}} char c2[] = { "Hello", "Good bye" }; //expected-warning{{excess elements in char array initializer}} int i2[1] = { "Hello" }; //expected-warning{{incompatible pointer to integer conversion initializing 'int' with an expression of type 'char[6]'}} char c3[5] = { "Hello" }; char c4[4] = { "Hello" }; //expected-warning{{initializer-string for char array is too long}} int i3[] = {}; //expected-warning{{zero size arrays are an extension}} expected-warning{{use of GNU empty initializer extension}} } void variableArrayInit() { int a = 4; char strlit[a] = "foo"; //expected-error{{variable-sized object may not be initialized}} int b[a] = { 1, 2, 4 }; //expected-error{{variable-sized object may not be initialized}} } // Pure array tests float r1[10] = {{7}}; //expected-warning{{braces around scalar initializer}} float r2[] = {{8}}; //expected-warning{{braces around scalar initializer}} char r3[][5] = {1,2,3,4,5,6}; int r3_sizecheck[(sizeof(r3) / sizeof(char[5])) == 2? 1 : -1]; char r3_2[sizeof r3 == 10 ? 1 : -1]; float r4[1][2] = {1,{2},3,4}; //expected-warning{{braces around scalar initializer}} expected-warning{{excess elements in array initializer}} char r5[][5] = {"aa", "bbb", "ccccc"}; char r6[sizeof r5 == 15 ? 1 : -1]; const char r7[] = "zxcv"; char r8[5] = "5char"; char r9[5] = "6chars"; //expected-warning{{initializer-string for char array is too long}} unsigned char r10[] = __extension__ (_Generic(0, int: (__extension__ "foo" ))); int r11[0] = {}; //expected-warning{{zero size arrays are an extension}} expected-warning{{use of GNU empty initializer extension}} // Some struct tests void autoStructTest() { struct s1 {char a; char b;} t1; struct s2 {struct s1 c;} t2 = { t1 }; // The following is a less than great diagnostic (though it's on par with EDG). struct s1 t3[] = {t1, t1, "abc", 0}; //expected-warning{{incompatible pointer to integer conversion initializing 'char' with an expression of type 'char[4]'}} int t4[sizeof t3 == 6 ? 1 : -1]; } struct foo { int z; } w; int bar (void) { struct foo z = { w }; //expected-error{{initializing 'int' with an expression of incompatible type 'struct foo'}} return z.z; } struct s3 {void (*a)(void);} t5 = {autoStructTest}; struct {int a; int b[];} t6 = {1, {1, 2, 3}}; // expected-warning{{flexible array initialization is a GNU extension}} \ // expected-note{{initialized flexible array member 'b' is here}} union {char a; int b;} t7[] = {1, 2, 3}; int t8[sizeof t7 == (3*sizeof(int)) ? 1 : -1]; struct bittest{int : 31, a, :21, :12, b;}; struct bittest bittestvar = {1, 2, 3, 4}; //expected-warning{{excess elements in struct initializer}} // Not completely sure what should happen here... int u1 = {}; //expected-warning{{use of GNU empty initializer extension}} expected-error{{scalar initializer cannot be empty}} int u2 = {{3}}; //expected-warning{{too many braces around scalar initializer}} // PR2362 void varArray() { int c[][x] = { 0 }; //expected-error{{variable-sized object may not be initialized}} } // PR2151 void emptyInit() {struct {} x[] = {6};} //expected-warning{{empty struct is a GNU extension}} \ // expected-error{{initializer for aggregate with no elements}} void noNamedInit() { struct {int:5;} x[] = {6}; //expected-error{{initializer for aggregate with no elements}} \ // expected-warning {{struct without named members is a GNU extension}} } struct {int a; int:5;} noNamedImplicit[] = {1,2,3}; int noNamedImplicitCheck[sizeof(noNamedImplicit) == 3 * sizeof(*noNamedImplicit) ? 1 : -1]; // ptrs are constant struct soft_segment_descriptor { long ssd_base; }; static int dblfault_tss; union uniao { int ola; } xpto[1]; struct soft_segment_descriptor gdt_segs[] = { {(long) &dblfault_tss}, { (long)xpto}, }; static void sppp_ipv6cp_up(); const struct {} ipcp = { sppp_ipv6cp_up }; //expected-warning{{empty struct is a GNU extension}} \ // expected-warning{{excess elements in struct initializer}} struct _Matrix { union { float m[4][4]; }; }; //expected-warning{{anonymous unions are a C11 extension}} typedef struct _Matrix Matrix; void test_matrix() { const Matrix mat1 = { { { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f } } }; const Matrix mat2 = { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f }; } char badchararray[1] = { badchararray[0], "asdf" }; // expected-warning {{excess elements in array initializer}} expected-error {{initializer element is not a compile-time constant}} // Test the GNU extension for initializing an array from an array // compound literal. PR9261. typedef int int5[5]; int a1[5] = (int[]){1, 2, 3, 4, 5}; // expected-warning{{initialization of an array of type 'int[5]' from a compound literal of type 'int[5]' is a GNU extension}} int a2[5] = (int[5]){1, 2, 3, 4, 5}; // expected-warning{{initialization of an array of type 'int[5]' from a compound literal of type 'int[5]' is a GNU extension}} int a3[] = ((int[]){1, 2, 3, 4, 5}); // expected-warning{{initialization of an array of type 'int[]' from a compound literal of type 'int[5]' is a GNU extension}} int a4[] = (int[5]){1, 2, 3, 4, 5}; // expected-warning{{initialization of an array of type 'int[]' from a compound literal of type 'int[5]' is a GNU extension}} int a5[] = (int5){1, 2, 3, 4, 5}; // expected-warning{{initialization of an array of type 'int[]' from a compound literal of type 'int5' (aka 'int[5]') is a GNU extension}} int a6[5] = (int[]){1, 2, 3}; // expected-error{{cannot initialize array of type 'int[5]' with array of type 'int[3]'}} int nonconst_value(); int a7[5] = (int[5]){ 1, 2, 3, 4, nonconst_value() // expected-error{{initializer element is not a compile-time constant}} }; // <rdar://problem/10636946> __attribute__((weak)) const unsigned int test10_bound = 10; char test10_global[test10_bound]; // expected-error {{variable length array declaration not allowed at file scope}} void test10() { char test10_local[test10_bound] = "help"; // expected-error {{variable-sized object may not be initialized}} }
4,253
10,608
{"default": {"description": "The Hindi Discourse Analysis dataset is a corpus for analyzing discourse modes present in its sentences. \nIt contains sentences from stories written by 11 famous authors from the 20th Century. \n4-5 stories by each author have been selected which were available in the public domain resulting \nin a collection of 53 stories. Most of these short stories were originally written in Hindi \nbut some of them were written in other Indian languages and later translated to Hindi.\n", "citation": "@inproceedings{swapnil2020,\n title={An Annotated Dataset of Discourse Modes in Hindi Stories},\n author={<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>},\n booktitle={Proceedings of the 12th Conference on Language Resources and Evaluation (LREC 2020)},\n volume={12},\n pages={1191\u20131196},\n year={2020}\n", "homepage": "https://github.com/midas-research/hindi-discourse", "license": "", "features": {"Story_no": {"dtype": "int32", "id": null, "_type": "Value"}, "Sentence": {"dtype": "string", "id": null, "_type": "Value"}, "Discourse Mode": {"num_classes": 6, "names": ["Argumentative", "Descriptive", "Dialogue", "Informative", "Narrative", "Other"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "hindi_discourse", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1998930, "num_examples": 9968, "dataset_name": "hindi_discourse"}}, "download_checksums": {"https://raw.githubusercontent.com/midas-research/hindi-discourse/master/discourse_dataset.json": {"num_bytes": 4176677, "checksum": "d27b447e383686213f9936467ec9fbc9e44fa0aebd3f8000865f605a5b3d4ab0"}}, "download_size": 4176677, "post_processing_size": null, "dataset_size": 1998930, "size_in_bytes": 6175607}}
634
575
<gh_stars>100-1000 // Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef NET_REPORTING_REPORTING_ENDPOINT_H_ #define NET_REPORTING_REPORTING_ENDPOINT_H_ #include <string> #include <vector> #include "base/macros.h" #include "base/time/time.h" #include "net/base/net_export.h" #include "net/base/network_isolation_key.h" #include "url/gurl.h" #include "url/origin.h" namespace net { // Identifies an endpoint group. struct NET_EXPORT ReportingEndpointGroupKey { ReportingEndpointGroupKey(); ReportingEndpointGroupKey(const NetworkIsolationKey& network_isolation_key, const url::Origin& origin, const std::string& group_name); ReportingEndpointGroupKey(const ReportingEndpointGroupKey& other); ReportingEndpointGroupKey(ReportingEndpointGroupKey&& other); ReportingEndpointGroupKey& operator=(const ReportingEndpointGroupKey&); ReportingEndpointGroupKey& operator=(ReportingEndpointGroupKey&&); ~ReportingEndpointGroupKey(); std::string ToString() const; // The NetworkIsolationKey the group is scoped to. Needed to prevent leaking // third party contexts across sites. NetworkIsolationKey network_isolation_key; // Origin that configured this endpoint group. url::Origin origin; // Name of the endpoint group (defaults to "default" during header parsing). std::string group_name; }; NET_EXPORT bool operator==(const ReportingEndpointGroupKey& lhs, const ReportingEndpointGroupKey& rhs); NET_EXPORT bool operator!=(const ReportingEndpointGroupKey& lhs, const ReportingEndpointGroupKey& rhs); NET_EXPORT bool operator<(const ReportingEndpointGroupKey& lhs, const ReportingEndpointGroupKey& rhs); NET_EXPORT bool operator>(const ReportingEndpointGroupKey& lhs, const ReportingEndpointGroupKey& rhs); // The configuration by an origin to use an endpoint for report delivery. // TODO(crbug.com/912622): Track endpoint failures for garbage collection. struct NET_EXPORT ReportingEndpoint { struct NET_EXPORT EndpointInfo { static const int kDefaultPriority; static const int kDefaultWeight; // The endpoint to which reports may be delivered. (Origins may configure // many.) GURL url; // Priority when multiple endpoints are configured for an origin; endpoints // with numerically lower priorities are used first. int priority = kDefaultPriority; // Weight when multiple endpoints are configured for an origin with the same // priority; among those with the same priority, each endpoint has a chance // of being chosen that is proportional to its weight. int weight = kDefaultWeight; }; struct Statistics { // The number of attempted uploads that we've made for this endpoint. int attempted_uploads = 0; // The number of uploads that have succeeded for this endpoint. int successful_uploads = 0; // The number of individual reports that we've attempted to upload for this // endpoint. (Failed uploads will cause a report to be counted multiple // times, once for each attempt.) int attempted_reports = 0; // The number of individual reports that we've successfully uploaded for // this endpoint. int successful_reports = 0; }; // Constructs an invalid ReportingEndpoint. ReportingEndpoint(); ReportingEndpoint(const ReportingEndpointGroupKey& group, const EndpointInfo& info); ReportingEndpoint(const ReportingEndpoint& other); ReportingEndpoint(ReportingEndpoint&& other); ReportingEndpoint& operator=(const ReportingEndpoint&); ReportingEndpoint& operator=(ReportingEndpoint&&); ~ReportingEndpoint(); bool is_valid() const; explicit operator bool() const { return is_valid(); } // Identifies the endpoint group to which this endpoint belongs. ReportingEndpointGroupKey group_key; // URL, priority, and weight of the endpoint. EndpointInfo info; // Information about the number of deliveries that we've attempted for this // endpoint. Not persisted across restarts. Statistics stats; }; // Marks whether a given endpoint group is configured to include its origin's // subdomains. enum class OriginSubdomains { EXCLUDE, INCLUDE, DEFAULT = EXCLUDE }; // Represents an endpoint group set by an origin via Report-To header. struct NET_EXPORT ReportingEndpointGroup { ReportingEndpointGroup(); ReportingEndpointGroup(const ReportingEndpointGroup& other); ~ReportingEndpointGroup(); ReportingEndpointGroupKey group_key; // Whether this group applies to subdomains of its origin. OriginSubdomains include_subdomains = OriginSubdomains::DEFAULT; // Time for which the endpoint group remains valid after it is set. base::TimeDelta ttl; // Endpoints in this group. std::vector<ReportingEndpoint::EndpointInfo> endpoints; }; // Representation of an endpoint group used for in-memory and persistent // storage. struct NET_EXPORT CachedReportingEndpointGroup { CachedReportingEndpointGroup(const ReportingEndpointGroupKey& group_key, OriginSubdomains include_subdomains, base::Time expires, base::Time last_used); // |now| is the time at which the header was processed. CachedReportingEndpointGroup(const ReportingEndpointGroup& endpoint_group, base::Time now); // Origin and group name. ReportingEndpointGroupKey group_key; // Whether this group applies to subdomains of |group_key.origin|. OriginSubdomains include_subdomains = OriginSubdomains::DEFAULT; // When this group's max_age expires. // (base::Time is used here instead of base::TimeTicks for ease of // serialization for persistent storage, and because it is more appropriate // for expiration times, as per //base/time/time.h.) base::Time expires; // Last time that this group was accessed for a delivery or updated via a // new header. base::Time last_used; }; } // namespace net #endif // NET_REPORTING_REPORTING_ENDPOINT_H_
1,955
3,384
// // Generated by class-dump 3.5 (64 bit). // // class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2013 by <NAME>. // #pragma mark - // // File: /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator.sdk/System/Library/PrivateFrameworks/CoreTime.framework/CoreTime // UUID: A6D14BEF-AE54-37D4-B56D-B5FA9ABE3761 // // Arch: x86_64 // Current version: 126.1.0 // Compatibility version: 1.0.0 // Source version: 126.1.0.0.0 // Minimum iOS version: 8.0.0 // SDK version: 8.0.0 // // Objective-C Garbage Collection: Unsupported // @interface _TMBlockDebugger : NSObject { NSString *_name; } - (void)dealloc; - (oneway void)release; - (id)retain; - (void)use; - (id)initWithName:(id)arg1; - (id)init; @end
408
593
<reponame>SkUrRiEr/TTGO_TWatch_Library #pragma once #include "bma423.h" #include "../i2c/i2c_bus.h" enum { DIRECTION_TOP_EDGE = 0, DIRECTION_BOTTOM_EDGE = 1, DIRECTION_LEFT_EDGE = 2, DIRECTION_RIGHT_EDGE = 3, DIRECTION_DISP_UP = 4, DIRECTION_DISP_DOWN = 5 } ; typedef struct bma4_dev Bma; typedef struct bma4_accel Accel; typedef struct bma4_accel_config Acfg; class BMA { public: BMA(I2CBus &bus); ~BMA(); bool begin(); void reset(); uint8_t direction(); float temperature(); bool disableAccel(); bool enableAccel(bool en = true); bool disalbeIrq(uint16_t int_map = BMA423_STEP_CNTR_INT); bool enableIrq(uint16_t int_map = BMA423_STEP_CNTR_INT); void attachInterrupt(); uint32_t getCounter(); bool isStepCounter(); bool isDoubleClick(); bool readInterrupt(); bool isTilt(); bool isActivity(); bool isAnyNoMotion(); bool getAccel(Accel &acc); uint8_t getIrqStatus(); const char *getActivity(); bool resetStepCounter(); bool enableFeature(uint8_t feature, uint8_t enable ); bool accelConfig(Acfg &cfg); bool set_remap_axes(struct bma423_axes_remap *remap_data); bool enableStepCountInterrupt(bool en = true); bool enableTiltInterrupt(bool en = true); bool enableWakeupInterrupt(bool en = true); bool enableAnyNoMotionInterrupt(bool en = true); bool enableActivityInterrupt(bool en = true); private: static uint16_t read(uint8_t dev_addr, uint8_t reg_addr, uint8_t *read_data, uint16_t len); static uint16_t write(uint8_t dev_addr, uint8_t reg_addr, uint8_t *read_data, uint16_t len); uint16_t config(); Bma _dev; static I2CBus *_bus; uint16_t _irqStatus; bool _init; };
859
634
<filename>modules/base/lang-impl/src/main/java/com/intellij/psi/stubs/ByteArrayInterner.java<gh_stars>100-1000 // Copyright 2000-2019 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file. package com.intellij.psi.stubs; import com.intellij.openapi.util.io.BufferExposingByteArrayOutputStream; import consulo.util.collection.HashingStrategy; import consulo.util.collection.primitive.objects.ObjectIntMap; import consulo.util.collection.primitive.objects.ObjectMaps; import javax.annotation.Nonnull; import java.util.Arrays; class ByteArrayInterner { private static final HashingStrategy<byte[]> BYTE_ARRAY_STRATEGY = new HashingStrategy<byte[]>() { @Override public int hashCode(byte[] object) { return Arrays.hashCode(object); } @Override public boolean equals(byte[] o1, byte[] o2) { return Arrays.equals(o1, o2); } }; private final ObjectIntMap<byte[]> arrayToStart = ObjectMaps.newObjectIntHashMap(BYTE_ARRAY_STRATEGY); final BufferExposingByteArrayOutputStream joinedBuffer = new BufferExposingByteArrayOutputStream(); int internBytes(@Nonnull byte[] bytes) { if (bytes.length == 0) return 0; int start = arrayToStart.getInt(bytes); if (start == 0) { start = joinedBuffer.size() + 1; // should be positive arrayToStart.putInt(bytes, start); joinedBuffer.write(bytes, 0, bytes.length); } return start; } }
511
765
<gh_stars>100-1000 /* * Copyright (c) 2019-2021 Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef __GPU_COMPUTE_OPERAND_INFO_HH__ #define __GPU_COMPUTE_OPERAND_INFO_HH__ #include "arch/gpu_registers.hh" #include "base/flags.hh" #include "config/the_gpu_isa.hh" namespace gem5 { class OperandInfo { public: OperandInfo() = delete; OperandInfo(int opSelectorVal, int size, bool src, bool scalar_reg, bool vector_reg, bool imm) : _opSelectorVal(opSelectorVal), _size(size), _numDWords(size <= 4 ? 1 : size / 4) { if (src) flags.set(SRC); if (scalar_reg) flags.set(SCALAR_REG); if (vector_reg) flags.set(VECTOR_REG); if (imm) flags.set(IMMEDIATE); if (TheGpuISA::isVccReg(opSelectorVal)) flags.set(VCC); if (TheGpuISA::isExecMask(opSelectorVal)) flags.set(EXEC); if (TheGpuISA::isFlatScratchReg(opSelectorVal)) flags.set(FLAT); if (TheGpuISA::isLiteral(opSelectorVal)) flags.set(LITERAL); if (TheGpuISA::isConstVal(opSelectorVal)) flags.set(CONSTANT); if (TheGpuISA::isPosConstVal(opSelectorVal)) flags.set(POS_CONST); } int numRegisters() const { return _numDWords / TheGpuISA::RegSizeDWords; } int sizeInDWords() const { return _numDWords; } int size() const { return _size; } // Certain opIdx's get changed in calls to opSelectorToRegIdx // This avoids that by returning the exact value int rawRegisterIndex() const { return _opSelectorVal; } int registerIndex(int numScalarRegs) const { // Some regs (i.e. VSRC, VDST) are explicitly declared as vectors // as opposed to checking if it's a vector through a function call, so // they don't have an offset applied and can be returned immediately if (isVectorReg() && _opSelectorVal < TheGpuISA::REG_VGPR_MIN) return _opSelectorVal; return TheGpuISA::opSelectorToRegIdx(_opSelectorVal, numScalarRegs); } bool isSrc() const { return flags.isSet(SRC); } bool isDst() const { return !flags.isSet(SRC); } bool isImm() const { return flags.isSet(IMMEDIATE); } bool isScalarReg() const { return flags.isSet(SCALAR_REG); } bool isVectorReg() const { return flags.isSet(VECTOR_REG); } bool isVcc() const { return flags.isSet(VCC); } bool isExec() const { return flags.isSet(EXEC); } bool isFlatScratch() const { return flags.isSet(FLAT); } void setVirtToPhysMapping(std::vector<int> v, std::vector<int> p) { _virtIndices = v; _physIndices = p; assert(_virtIndices.size() == _physIndices.size()); assert(_numDWords == _physIndices.size()); } /** * We typically only need the first virtual register for the operand * regardless of its size. */ int virtIdx(int reg_num=0) const { return _virtIndices.at(reg_num); } int physIdx(int reg_num=0) const { return _physIndices.at(reg_num); } const std::vector<int>& virtIndices() const { return _virtIndices; } const std::vector<int>& physIndices() const { return _physIndices; } std::vector<int>& bankReadCounts() const { return _bankReadCounts; } typedef uint32_t FlagsType; typedef gem5::Flags<FlagsType> Flags; private: enum : FlagsType { // If the operand is a src or not SRC = 0x00000001, // If the operand is a scalar or not SCALAR_REG = 0x00000002, // If the operand is a vector or not VECTOR_REG = 0x00000004, // If the operand is an immediate or not IMMEDIATE = 0x00000008, // If the operand is a VCC register VCC = 0x00000010, // If the operand is an EXEC register EXEC = 0x00000020, // If the operand is a FLAT/SCRATCH register FLAT = 0x00000040, // If the operand is a literal LITERAL = 0x00000080, // If the operand is a constant value CONSTANT = 0x00000100, // If the constant is positive or negative POS_CONST = 0x00000200 }; Flags flags; /** * Value of the operand as used in registers.cc functions */ const int _opSelectorVal; /** * Size of the operand in bytes */ const int _size; /** * Size of operand in DWords */ const int _numDWords; std::vector<int> _virtIndices; std::vector<int> _physIndices; /** * The number of reads this operand will make to each bank. */ mutable std::vector<int> _bankReadCounts; }; } // namespace gem5 #endif // __GPU_COMPUTE_OPERAND_INFO_H__
2,629
348
<gh_stars>100-1000 {"nom":"Maisons","circ":"3ème circonscription","dpt":"Aude","inscrits":40,"abs":14,"votants":26,"blancs":3,"nuls":1,"exp":22,"res":[{"nuance":"REM","nom":"<NAME>","voix":12},{"nuance":"SOC","nom":"M. <NAME>","voix":10}]}
99
2,816
//===----------------------------------------------------------------------===// // DuckDB // // duckdb/main/relation/view_relation.hpp // // //===----------------------------------------------------------------------===// #pragma once #include "duckdb/main/relation.hpp" namespace duckdb { class ViewRelation : public Relation { public: ViewRelation(ClientContext &context, string schema_name, string view_name); string schema_name; string view_name; vector<ColumnDefinition> columns; public: unique_ptr<QueryNode> GetQueryNode() override; unique_ptr<TableRef> GetTableRef() override; const vector<ColumnDefinition> &Columns() override; string ToString(idx_t depth) override; string GetAlias() override; }; } // namespace duckdb
230
825
<reponame>Liangrj5/tf-encrypted<gh_stars>100-1000 """Extracting tf.data processing pipelines from full model GraphDefs.""" import os import tensorflow as tf # tf.graph_util.extract_sub_graph will be removed in future tf version try: from tensorflow.compat.v1.graph_util import extract_sub_graph except ImportError: from tensorflow.graph_util import extract_sub_graph def data_prep_from_saved_model( graph_def, data_filenames, batch_size, data_prep_start_node="serialized_example:0", data_prep_end_node="DatasetToSingleElement:0" ): """Main function to extract data processing pipelines.""" # Trim graph to keep only the nodes related to data pre-processing data_prep_end_node_name = data_prep_end_node.split(":")[0] gdef_trimmed = extract_sub_graph( graph_def, dest_nodes=[data_prep_end_node_name], ) # Load TFRecord files then generate a Dataset of batch dataset = tf.data.TFRecordDataset(data_filenames) dataset = dataset.batch(batch_size) iterator = dataset.make_one_shot_iterator() dataset_b = iterator.get_next() # Preprocess data data_out, = tf.import_graph_def( gdef_trimmed, input_map={data_prep_start_node: dataset_b}, return_elements=[data_prep_end_node], ) # TFE expects tensors with fully defined shape fixed_shape = [batch_size] + data_out.get_shape().as_list()[1:] data_out = tf.reshape(data_out, fixed_shape) return data_out def list_files_from_dir(directory): file_names_list = tf.io.gfile.listdir(directory) path_files_list = [os.path.join(directory, f) for f in file_names_list] return path_files_list
589
559
/** * Copyright (c) 2016-2017 Netflix, Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <tokens/ClientTokenFactory.h> #include <crypto/Key.h> #include <entityauth/EntityAuthenticationData.h> #include <io/MslObject.h> #include <MslError.h> #include <MslInternalException.h> #include <tokens/MasterToken.h> #include <tokens/MslUser.h> #include <tokens/UserIdToken.h> #include <util/MslContext.h> #include <memory> #include <string> using namespace std; using namespace netflix::msl; using namespace netflix::msl::crypto; using namespace netflix::msl::entityauth; using namespace netflix::msl::io; using namespace netflix::msl::util; namespace netflix { namespace msl { namespace tokens { MslError ClientTokenFactory::isMasterTokenRevoked(shared_ptr<MslContext> /*ctx*/, shared_ptr<MasterToken> /*masterToken*/) { return MslError::NONE; // Note: java code returns java null here; } MslError ClientTokenFactory::acceptNonReplayableId(shared_ptr<MslContext> /*ctx*/, shared_ptr<MasterToken> /*masterToken*/, int64_t /*nonReplayableId*/) { return MslError::NONE; // Note: java code returns java null here; } shared_ptr<MasterToken> ClientTokenFactory::createMasterToken( shared_ptr<MslContext> /*ctx*/, shared_ptr<EntityAuthenticationData> /*entityAuthData*/, const SecretKey& /*encryptionKey*/, const SecretKey& /*hmacKey*/, shared_ptr<MslObject> /*issuerData*/) { throw MslInternalException("Creating master tokens is unsupported by the token factory."); } MslError ClientTokenFactory::isMasterTokenRenewable(shared_ptr<MslContext> /*ctx*/, shared_ptr<MasterToken> /*masterToken*/) { return MslError::NONE; // Note: java code returns java null here; } shared_ptr<MasterToken> ClientTokenFactory::renewMasterToken( shared_ptr<MslContext> /*ctx*/, shared_ptr<MasterToken> /*masterToken*/, const SecretKey& /*encryptionKey*/, const SecretKey& /*hmacKey*/, shared_ptr<MslObject> /*issuerData*/) { throw MslInternalException("Renewing master tokens is unsupported by the token factory."); } MslError ClientTokenFactory::isUserIdTokenRevoked( shared_ptr<MslContext> /*ctx*/, shared_ptr<MasterToken> /*masterToken*/, shared_ptr<UserIdToken> /*userIdToken*/) { return MslError::NONE; // Note: java code returns java null here; } shared_ptr<UserIdToken> ClientTokenFactory::createUserIdToken(shared_ptr<MslContext> /*ctx*/, shared_ptr<MslUser> /*user*/, shared_ptr<MasterToken> /*masterToken*/) { throw MslInternalException("Creating user ID tokens is unsupported by the token factory."); } shared_ptr<UserIdToken> ClientTokenFactory::renewUserIdToken(shared_ptr<MslContext> /*ctx*/, shared_ptr<UserIdToken> /*userIdToken*/, shared_ptr<MasterToken> /*masterToken*/) { throw MslInternalException("Renewing master tokens is unsupported by the token factory."); } shared_ptr<MslUser> ClientTokenFactory::createUser(shared_ptr<MslContext> /*ctx*/, const string& /*userdata*/) { throw MslInternalException("Creating users is unsupported by the token factory."); } }}} // namespace netflix::msl::tokens
1,262
332
{ "name": "txtfilereader", "class": "com.alibaba.datax.plugin.reader.txtfilereader.TxtFileReader", "description": "useScene: test. mechanism: use datax framework to transport data from txt file. warn: The more you know about the data, the less problems you encounter.", "developer": "alibaba" }
91
452
<reponame>the-real-mrcs/firebase-admin-java /* * Copyright 2017 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.firebase.database.future; import com.google.firebase.database.DataSnapshot; import com.google.firebase.database.DatabaseError; import com.google.firebase.database.EventRecord; import com.google.firebase.database.Query; import com.google.firebase.database.TestFailure; import com.google.firebase.database.ValueEventListener; import com.google.firebase.database.core.view.Event; import com.google.firebase.testing.TestUtils; import java.util.ArrayList; import java.util.List; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; public class ReadFuture implements Future<List<EventRecord>> { private final ValueEventListener valueEventListener; private List<EventRecord> events = new ArrayList<>(); private Semaphore semaphore; private boolean wasCancelled = false; private boolean done = false; private Exception exception; public ReadFuture(final Query ref, final CompletionCondition condition) { this(ref, condition, false); } private ReadFuture( final Query ref, final CompletionCondition condition, final boolean ignoreFirstNull) { semaphore = new Semaphore(0); this.valueEventListener = new ValueEventListener() { @Override public void onDataChange(DataSnapshot snapshot) { if (ignoreFirstNull && events.size() == 0 && snapshot.getValue() == null) { return; } events.add(new EventRecord(snapshot, Event.EventType.VALUE, null)); try { if (condition.isComplete(events)) { ref.removeEventListener(valueEventListener); finish(); } } catch (Exception e) { exception = e; finish(); } } @Override public void onCancelled(DatabaseError error) { wasCancelled = true; finish(); } }; ref.addValueEventListener(this.valueEventListener); } // Completes on the first fired event public ReadFuture(final Query ref) { this( ref, new CompletionCondition() { @Override public boolean isComplete(List<EventRecord> events) { return true; } }); } // Factory helper - completes on first non-null value returned. public static ReadFuture untilNonNull(Query ref) { return new ReadFuture( ref, new CompletionCondition() { @Override public boolean isComplete(List<EventRecord> events) { return events.get(events.size() - 1).getSnapshot().getValue() != null; } }); } // Factory helper - completes when equal to provided value. public static ReadFuture untilEquals(Query ref, final Object value) { return untilEquals(ref, value, false); } public static ReadFuture untilEquals(Query ref, final Object value, boolean ignoreFirstNull) { return new ReadFuture( ref, new CompletionCondition() { @Override public boolean isComplete(List<EventRecord> events) { Object eventValue = events.get(events.size() - 1).getSnapshot().getValue(); return eventValue != null && eventValue.equals(value); } }, ignoreFirstNull); } // Factory helper - completes after count events, ignoring the first null public static ReadFuture untilCountAfterNull(Query ref, final int count) { return new ReadFuture( ref, new CompletionCondition() { @Override public boolean isComplete(List<EventRecord> events) { return events.size() == count; } }, true); } // Factory helper - completes after count events. public static ReadFuture untilCount(Query ref, final int count) { return new ReadFuture( ref, new CompletionCondition() { @Override public boolean isComplete(List<EventRecord> events) { return events.size() == count; } }); } @Override public boolean cancel(boolean mayInterruptIfRunning) { // Can't cancel this return false; } @Override public boolean isCancelled() { return wasCancelled; } @Override public boolean isDone() { return done; //To change body of implemented methods use File | Settings | File Templates. } public Object lastValue() { return events.get(events.size() - 1).getSnapshot().getValue(); } @Override public List<EventRecord> get() throws InterruptedException, ExecutionException { semaphore.acquire(1); return events; } @Override public List<EventRecord> get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { return null; //To change body of implemented methods use File | Settings | File Templates. } public Object waitForLastValue() throws InterruptedException, TimeoutException, TestFailure { timedWait(); return lastValue(); } public List<EventRecord> timedGet() throws InterruptedException, TimeoutException, TestFailure { return timedGet(TestUtils.TEST_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS); } public List<EventRecord> timedGet(long timeout, TimeUnit timeoutUnit) throws InterruptedException, TimeoutException, TestFailure { timedWait(timeout, timeoutUnit); return events; } public void timedWait() throws InterruptedException, TimeoutException, TestFailure { timedWait(TestUtils.TEST_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS); } public void timedWait(long timeout, TimeUnit timeoutUnit) throws InterruptedException, TimeoutException, TestFailure { if (!semaphore.tryAcquire(1, timeout, timeoutUnit)) { throw new TimeoutException(); } if (exception != null) { throw new TestFailure(exception); } } private void finish() { done = true; semaphore.release(1); } public interface CompletionCondition { boolean isComplete(List<EventRecord> events); } }
2,399
3,269
<reponame>Akhil-Kashyap/LeetCode-Solutions // Time: O(n) // Space: O(1) class Solution { public: int diagonalSum(vector<vector<int>>& mat) { const int n = mat.size(); int result = 0; for (int i = 0; i < n; ++i) { result += mat[i][i] + mat[n - 1 - i][i]; } if (n % 2) { result -= mat[n / 2][n / 2]; } return result; } };
222
1,847
// Copyright (c) 2021 The Orbit Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "WindowsTracing/Tracer.h" #include "TracerImpl.h" namespace orbit_windows_tracing { std::unique_ptr<Tracer> Tracer::Create(orbit_grpc_protos::CaptureOptions capture_options, TracerListener* listener) { return std::make_unique<TracerImpl>(capture_options, listener); } } // namespace orbit_windows_tracing
189
459
<filename>nuparu/include/tbb/internal/_tbb_windef.h /* Copyright 2005-2015 Intel Corporation. All Rights Reserved. This file is part of Threading Building Blocks. Threading Building Blocks is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. Threading Building Blocks is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Threading Building Blocks; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA As a special exception, you may use this file as part of a free software library without restriction. Specifically, if other files instantiate templates or use macros or inline functions from this file, or you compile this file and link it with other files to produce an executable, this file does not by itself cause the resulting executable to be covered by the GNU General Public License. This exception does not however invalidate any other reasons why the executable file might be covered by the GNU General Public License. */ #ifndef __TBB_tbb_windef_H #error Do not #include this internal file directly; use public TBB headers instead. #endif /* __TBB_tbb_windef_H */ // Check that the target Windows version has all API calls requried for TBB. // Do not increase the version in condition beyond 0x0500 without prior discussion! #if defined(_WIN32_WINNT) && _WIN32_WINNT<0x0501 #error TBB is unable to run on old Windows versions; _WIN32_WINNT must be 0x0501 or greater. #endif #if !defined(_MT) #error TBB requires linkage with multithreaded C/C++ runtime library. \ Choose multithreaded DLL runtime in project settings, or use /MD[d] compiler switch. #endif // Workaround for the problem with MVSC headers failing to define namespace std namespace std { using ::size_t; using ::ptrdiff_t; } #define __TBB_STRING_AUX(x) #x #define __TBB_STRING(x) __TBB_STRING_AUX(x) // Default setting of TBB_USE_DEBUG #ifdef TBB_USE_DEBUG # if TBB_USE_DEBUG # if !defined(_DEBUG) # pragma message(__FILE__ "(" __TBB_STRING(__LINE__) ") : Warning: Recommend using /MDd if compiling with TBB_USE_DEBUG!=0") # endif # else # if defined(_DEBUG) # pragma message(__FILE__ "(" __TBB_STRING(__LINE__) ") : Warning: Recommend using /MD if compiling with TBB_USE_DEBUG==0") # endif # endif #endif #if (__TBB_BUILD || __TBBMALLOC_BUILD) && !defined(__TBB_NO_IMPLICIT_LINKAGE) #define __TBB_NO_IMPLICIT_LINKAGE 1 #endif #if _MSC_VER #if !__TBB_NO_IMPLICIT_LINKAGE #ifdef __TBB_LIB_NAME #pragma comment(lib, __TBB_STRING(__TBB_LIB_NAME)) #else #ifdef _DEBUG #pragma comment(lib, "tbb_debug.lib") #else #pragma comment(lib, "tbb.lib") #endif #endif #endif #endif
1,089
530
#include "frame_cpu.h" #include "../../../feature/feature_depthMap.h" #include "../../../commDefs.h" namespace VGUGV { namespace Common { template<class T_FeatureType, class T_FeatureDescriptorType> Frame_CPU<T_FeatureType, T_FeatureDescriptorType>::~Frame_CPU() { for(int i = 0; i < mnPyramidLevels; i++) { if(mpPyramidImages != NULL) { delete [] mpPyramidImages[i]; mpPyramidImages[i] = NULL; } if(mpPyramidImageGradientMag != NULL) { delete [] mpPyramidImageGradientMag[i]; mpPyramidImageGradientMag[i] = NULL; } if(mpPyramidImageGradientVec != NULL) { delete [] mpPyramidImageGradientVec[i]; mpPyramidImageGradientVec[i] = NULL; } } delete [] mpPyramidImages; mpPyramidImages = NULL; delete [] mpPyramidImageGradientMag; mpPyramidImageGradientMag = NULL; delete [] mpPyramidImageGradientVec; mpPyramidImageGradientVec = NULL; } template<class T_FeatureType, class T_FeatureDescriptorType> void Frame_CPU<T_FeatureType, T_FeatureDescriptorType>::computeImagePyramids(int nTotalLevels) { if(nTotalLevels < 1) return; if(mpPyramidImages != NULL) return; mnPyramidLevels = nTotalLevels; std::cout << "Computing image pyramids for " << nTotalLevels << " levels..." << std::endl; // dynamically allocate memory space for mpPyramidImages mpPyramidImages = new unsigned char*[nTotalLevels]; // copy first level directly mpPyramidImages[0] = new unsigned char[mnRows * mnCols]; memcpy(mpPyramidImages[0], mpGrayImageData_CPU, mnRows * mnCols); for (int i = 1; i < nTotalLevels; i++) { printf("Computing pyramid level %d/%d (1 pre-copied)...\n", (i + 1), nTotalLevels); int scale = 1 << i; int nRows = mnRows / scale; int nCols = mnCols / scale; int nSize = nRows * nCols; mpPyramidImages[i] = new unsigned char[nSize]; unsigned char* p_dst = mpPyramidImages[i]; unsigned char* p_src = mpPyramidImages[i-1]; for (int r = 0; r < nRows; r++) { for (int c = 0; c < nCols; c++, p_dst++) { int rowIndexInPreviousImage = 2 * r + 1; int colIndexInPreviousImage = 2 * c + 1; if (c == nCols - 1 || r == nRows - 1) // on bounary { int index = rowIndexInPreviousImage * nCols * 2 + colIndexInPreviousImage; p_dst[0] = (p_src + index)[0]; continue; } int P00_index = (rowIndexInPreviousImage - 1) * nCols * 2 + (colIndexInPreviousImage - 1); int P01_index = P00_index + 2; int P10_index = P00_index + nCols * 4; int P11_index = P10_index + 2; unsigned short sum = static_cast<unsigned short>((p_src + P00_index)[0]) + static_cast<unsigned short>((p_src + P01_index)[0]) + static_cast<unsigned short>((p_src + P10_index)[0]) + static_cast<unsigned short>((p_src + P11_index)[0]); p_dst[0] = static_cast<unsigned char>(sum / 4); } } { p_dst = NULL; p_src = NULL; } } } template<class T_FeatureType, class T_FeatureDescriptorType> void Frame_CPU<T_FeatureType, T_FeatureDescriptorType>::computeImagePyramidsGradients(int nTotalLevels) { if (nTotalLevels == 0) return; if (mpPyramidImageGradientMag != NULL) return; mnPyramidLevels = nTotalLevels; if (mpPyramidImages == NULL) { printf("Frame_CPU::computeImagePyramidsGradients failed due to no pyramid image data ...\n"); return; } printf("Pyramid data present. Will compute gradients and their magnitudes...\n"); mpPyramidImageGradientMag = new float*[nTotalLevels]; mpPyramidImageGradientVec = new Eigen::Vector2f*[nTotalLevels]; for (int i = 0; i < nTotalLevels; i++) { printf("Computing gradients for level %d.\n", i+1); int scale = 1 << i; int nRows = mnRows / scale; int nCols = mnCols / scale; mpPyramidImageGradientMag[i] = new float[nRows * nCols]; mpPyramidImageGradientVec[i] = new Eigen::Vector2f[nRows * nCols]; unsigned char* pImageSrc = mpPyramidImages[i]; float* pGradientMag = mpPyramidImageGradientMag[i]; Eigen::Vector2f* pGradientVec = mpPyramidImageGradientVec[i]; for (int r = 0; r < nRows; r++) { for (int c = 0; c < nCols; c++) { int index = r * nCols + c; if (r == 0 || r == nRows - 1 || c == 0 || c == nCols - 1) { pGradientMag[index] = 0.0f; pGradientVec[index] = Eigen::Vector2f(0.0f, 0.0f); continue; } int rInTopLevel = scale * r + scale - 1; int cInTopLevel = scale * c + scale - 1; // TODO(andrei): re-enable if we want masking support in DynSLAM. // if (!pixelLieOutsideImageMask(rInTopLevel, cInTopLevel)) // check whether it is being masked or not... // { // pGradientMag[index] = 0.0f; // pGradientVec[index] = Eigen::Vector2f(0.0f, 0.0f); // continue; // } int indexRght = index + 1; int indexLeft = index - 1; int indexUp = index - nCols; int indexBot = index + nCols; float indensityRght = pImageSrc[indexRght]; float indensityLeft = pImageSrc[indexLeft]; float indensityUp = pImageSrc[indexUp]; float indensityBot = pImageSrc[indexBot]; float dx = (indensityRght - indensityLeft)*0.5f; float dy = (indensityBot - indensityUp)*0.5f; pGradientMag[index] = sqrt(dx * dx + dy * dy); pGradientVec[index] = Eigen::Vector2f(dx, dy); } } // end for r c pImageSrc = NULL; pGradientMag = NULL; pGradientVec = NULL; } // end for i } template<class T_FeatureType, class T_FeatureDescriptorType> bool Frame_CPU<T_FeatureType, T_FeatureDescriptorType>::pixelLieOutsideImageMask(int r, int c) { unsigned char* pMask = mpImageMaskData_CPU; if(pMask == NULL) return false; int index = r*mnCols + c; if(index < 0 || pMask[index] < 100) return false; return true; } template<class T_FeatureType, class T_FeatureDescriptorType> unsigned char* Frame_CPU<T_FeatureType, T_FeatureDescriptorType>::getGrayImage(DEVICE_TYPE device) { return mpGrayImageData_CPU; } template<class T_FeatureType, class T_FeatureDescriptorType> unsigned char* Frame_CPU<T_FeatureType, T_FeatureDescriptorType>::getPyramidImage(int nLevel, DEVICE_TYPE type) { if(mpPyramidImages == NULL) return NULL; return mpPyramidImages[nLevel]; } template<class T_FeatureType, class T_FeatureDescriptorType> float* Frame_CPU<T_FeatureType, T_FeatureDescriptorType>::getPyramidImageGradientMag(int nLevel, DEVICE_TYPE type) { if(mpPyramidImageGradientMag == NULL) return NULL; return mpPyramidImageGradientMag[nLevel]; } template<class T_FeatureType, class T_FeatureDescriptorType> Eigen::Vector2f* Frame_CPU<T_FeatureType, T_FeatureDescriptorType>::getPyramidImageGradientVec(int nLevel, DEVICE_TYPE device) { if(mpPyramidImageGradientVec == NULL) return NULL; return mpPyramidImageGradientVec[nLevel]; } template class Frame_CPU<Feature_depthMap<DepthHypothesis_GMM>, DepthHypothesis_GMM>; } }
3,118
1,491
#include <stdlib.h> #include <string.h> #include "cli.h" #include "fio.h" #include "fio_cli.h" #include "http.h" #include "redis_engine.h" static void redis_cleanup(void *e_) { redis_engine_destroy(e_); FIO_LOG_DEBUG("Cleaned up redis engine object."); FIO_PUBSUB_DEFAULT = FIO_PUBSUB_CLUSTER; } void initialize_cli(int argc, char const *argv[]) { /* **** Command line arguments **** */ fio_cli_start( argc, argv, 0, 0, NULL, FIO_CLI_PRINT_HEADER("Address binding:"), FIO_CLI_INT("-port -p port number to listen to. defaults port 3000"), FIO_CLI_STRING("-bind -b address to listen to. defaults any available."), FIO_CLI_PRINT_HEADER("Concurrency:"), FIO_CLI_INT("-workers -w number of processes to use."), FIO_CLI_INT("-threads -t number of threads per process."), FIO_CLI_PRINT_HEADER("HTTP Server:"), FIO_CLI_STRING("-public -www public folder, for static file service."), FIO_CLI_INT( "-keep-alive -k HTTP keep-alive timeout (0..255). default: ~5s"), FIO_CLI_INT("-max-body -maxbd HTTP upload limit. default: ~50Mb"), FIO_CLI_BOOL("-log -v request verbosity (logging)."), FIO_CLI_PRINT_HEADER("WebSocket Server:"), FIO_CLI_INT("-ping websocket ping interval (0..255). default: ~40s"), FIO_CLI_INT("-max-msg -maxms incoming websocket message size limit. " "default: ~250Kb"), FIO_CLI_PRINT_HEADER("Redis support:"), FIO_CLI_STRING("-redis -r an optional Redis URL server address."), FIO_CLI_PRINT("\t\ti.e.: redis://user:password@localhost:6379/")); /* Test and set any default options */ if (!fio_cli_get("-b")) { char *tmp = getenv("ADDRESS"); if (tmp) { fio_cli_set("-b", tmp); fio_cli_set("-bind", tmp); } } if (!fio_cli_get("-p")) { /* Test environment as well and make sure address is missing */ char *tmp = getenv("PORT"); if (!tmp && !fio_cli_get("-b")) tmp = "3000"; /* Set default (unlike cmd line arguments, aliases are manually set) */ fio_cli_set("-p", tmp); fio_cli_set("-port", tmp); } if (!fio_cli_get("-public")) { char *tmp = getenv("HTTP_PUBLIC_FOLDER"); if (tmp) { fio_cli_set("-public", tmp); fio_cli_set("-www", tmp); } } if (!fio_cli_get("-redis")) { char *tmp = getenv("REDIS_URL"); if (tmp) { fio_cli_set("-redis", tmp); fio_cli_set("-r", tmp); } } if (fio_cli_get("-redis") && strlen(fio_cli_get("-redis"))) { FIO_LOG_INFO("* Initializing Redis connection to %s\n", fio_cli_get("-redis")); http_url_s info = http_url_parse(fio_cli_get("-redis"), strlen(fio_cli_get("-redis"))); fio_pubsub_engine_s *e = redis_engine_create(.address = info.host, .port = info.port, .auth = info.password); if (e) { fio_state_callback_add(FIO_CALL_ON_FINISH, redis_cleanup, e); FIO_PUBSUB_DEFAULT = e; } else { FIO_LOG_ERROR("Failed to create redis engine object."); } } } void free_cli(void) { fio_cli_end(); }
1,431
6,541
#include <stdio.h> #include <string.h> #include <pthread.h> #include <GLES2/gl2.h> #include <math.h> #include <assert.h> #include <emscripten/emscripten.h> #include <emscripten/html5.h> #include <emscripten/threading.h> #include <bits/errno.h> #include <stdlib.h> int main() { if (!emscripten_supports_offscreencanvas()) { printf("Current browser does not support OffscreenCanvas. Skipping this test.\n"); #ifdef REPORT_RESULT REPORT_RESULT(1); #endif return 0; } EmscriptenWebGLContextAttributes attr; emscripten_webgl_init_context_attributes(&attr); EMSCRIPTEN_WEBGL_CONTEXT_HANDLE ctx = emscripten_webgl_create_context("#canvas", &attr); printf("Created context with handle %u\n", (unsigned int)ctx); emscripten_webgl_make_context_current(ctx); printf("You should see the canvas fade from black to red.\n"); double color = 0; for(int i = 0; i < 100; ++i) { color += 0.01; glClearColor(color, 0, 0, 1); glClear(GL_COLOR_BUFFER_BIT); #if ASYNCIFY emscripten_sleep(16); #else double now = emscripten_get_now(); while(emscripten_get_now() - now < 16) /*no-op*/; #endif } emscripten_webgl_make_context_current(0); emscripten_webgl_destroy_context(ctx); printf("Thread quit\n"); #ifdef REPORT_RESULT REPORT_RESULT(1); #endif }
521
4,002
<reponame>cornos/Cornos<filename>src/main/java/me/zeroX150/cornos/etc/config/MConfMultiOption.java package me.zeroX150.cornos.etc.config; import me.zeroX150.cornos.etc.helper.STL; public class MConfMultiOption extends MConf.ConfigKey { public String[] possibleValues; public int current = 0; public MConfMultiOption(String k, String initialValue, String[] possible) { super(k, initialValue, "No description"); this.possibleValues = possible; } public MConfMultiOption(String k, String initialValue, String[] possible, String description) { this(k, initialValue, possible); this.description = description; } @Override public void setValue(String newV) { boolean pass = false; int currentIndex = 0; for (String s : possibleValues) { if (s.equals(newV)) { pass = true; current = currentIndex; break; } currentIndex++; } if (!pass) { STL.notifyUser("[Config] You tried to set a value that is not possible. The values you can set are " + String.join(", ", possibleValues)); return; } super.setValue(newV); } }
543
357
<reponame>sgravrock/cedar #import <Foundation/Foundation.h> #import "Cedar.h" #import "CDRNullabilityCompat.h" NS_ASSUME_NONNULL_BEGIN @interface TestReporter : NSObject <CDRExampleReporter> @property (nonatomic, readonly) NSArray *startedExamples; @property (nonatomic, readonly) NSArray *finishedExamples; @property (nonatomic, readonly) NSArray *startedExampleGroups; @property (nonatomic, readonly) NSArray *finishedExampleGroups; @end NS_ASSUME_NONNULL_END
162
5,079
<reponame>kokosing/hue<gh_stars>1000+ class Snapshot(object): """ This class is used by the histogram meter """ MEDIAN = 0.5 P75_Q = 0.75 P95_Q = 0.95 P99_Q = 0.99 P999_Q = 0.999 def __init__(self, values): super(Snapshot, self).__init__() self.values = sorted(values) def get_size(self): "get current size" return len(self.values) def get_median(self): "get current median" return self.get_percentile(Snapshot.MEDIAN) def get_75th_percentile(self): "get current 75th percentile" return self.get_percentile(Snapshot.P75_Q) def get_95th_percentile(self): "get current 95th percentile" return self.get_percentile(Snapshot.P95_Q) def get_99th_percentile(self): "get current 99th percentile" return self.get_percentile(Snapshot.P99_Q) def get_999th_percentile(self): "get current 999th percentile" return self.get_percentile(Snapshot.P999_Q) def get_percentile(self, percentile): """ get custom percentile :param percentile: float value between 0 and 1 """ if percentile < 0 or percentile > 1: raise ValueError("{0} is not in [0..1]".format(percentile)) length = len(self.values) if length == 0: return 0 pos = percentile * (length + 1) if pos < 1: return self.values[0] if pos >= length: return self.values[-1] lower = self.values[int(pos) - 1] upper = self.values[int(pos)] return lower + (pos - int(pos)) * (upper - lower)
764