max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
356
<reponame>gokay/cert-issuer import unittest import copy from cert_issuer.models import verify_presentation presentation_example = { "@context": [ "https://www.w3.org/2018/credentials/v1" ], "type": [ "VerifiablePresentation" ], "verifiableCredential": [ { "@context": [ "https://www.w3.org/2018/credentials/v1", "https://www.blockcerts.org/schema/3.0-alpha/context.json", "https://www.w3.org/2018/credentials/examples/v1", { "metadataJson": { "@id": "https://schemas.learningmachine.com/2017/blockcerts/metadata", "@type": "https://schemas.learningmachine.com/2017/types/text/json" }, "displayHtml": { "@id": "https://schemas.learningmachine.com/2017/blockcerts/displayHtml", "@type": "https://schemas.learningmachine.com/2017/types/text/html" }, "nonce": { "@id": "https://schemas.learningmachine.com/2017/blockcerts/nonce", "@type": "https://schema.org/Text" }, "universalIdentifier": { "@id": "https://schemas.learningmachine.com/2017/blockcerts/identifier", "@type": "https://schema.org/Text" } } ], "id": "urn:uuid:bbba8553-8ec1-445f-82c9-a57251dd731c", "metadataJson": "{\"schema\":{\"$schema\":\"http://json-schema.org/draft-04/schema#\",\"type\":\"object\",\"properties\":{\"displayOrder\":{\"type\":\"array\",\"items\":{\"type\":\"string\"}},\"certificate\":{\"order\":[],\"type\":\"object\",\"properties\":{\"issuingInstitution\":{\"title\":\"Issuing Institution\",\"type\":\"string\",\"default\":\"Learning Machine Technologies, Inc.\"}}},\"recipient\":{}}},\"certificate\":{\"issuingInstitution\":\"Learning Machine Technologies, Inc.\"},\"recipient\":{},\"displayOrder\":[\"certificate.issuingInstitution\"]}", "displayHtml": "<b>hello world</b>", "nonce": "814ce340-12f3-414b-af91-a0f9489e5dbc", "universalIdentifier": "ab569127-34bb-5784-bced-00b7e0e82ac9", "type": [ "VerifiableCredential", "BlockcertsCredential" ], "issuer": "https://raw.githubusercontent.com/AnthonyRonning/https-github.com-labnol-files/master/issuer-eth.json", "issuanceDate": "2010-01-01T19:33:24Z", "credentialSubject": { "id": "did:key:<KEY>", "alumniOf": { "id": "did:example:c276e12ec21ebfeb1f712ebc6f1" } }, "proof": { "type": "MerkleProof2019", "created": "2020-03-23T15:38:11.804838", "proofValue": "<KEY>XH<KEY>", "proofPurpose": "assertionMethod", "verificationMethod": "ecdsa-koblitz-pubkey:0x7e30a37763e6Ba1fFeDE1750bBeFB4c60b17a1B3" } } ] } class UnitValidationV3 (unittest.TestCase): def test_verify_presentation_invalid_credential (self): candidate = copy.deepcopy(presentation_example) del candidate['verifiableCredential'][0]['credentialSubject'] try: verify_presentation(candidate) except: assert True return assert False def test_verify_presentation_valid_credential (self): candidate = copy.deepcopy(presentation_example) try: verify_presentation(candidate) except: assert False return assert True if __name__ == '__main__': unittest.main()
1,863
2,405
<filename>STM32F1/libraries/MapleCoOS116/utility/OsError.h /** ******************************************************************************* * @file OsError.h * @version V1.1.6 * @date 2014.05.23 * @brief rror dispose header file * @details This file use to dispose error which from error configure for OS. ******************************************************************************* * @copy * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the <ORGANIZATION> nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * <h2><center>&copy; COPYRIGHT 2014 CooCox </center></h2> ******************************************************************************* */ #ifndef _ERROR_H #define _ERROR_H #if (CFG_SYSTICK_FREQ > 1000) ||(CFG_SYSTICK_FREQ < 1) #error " OsConfig.h System Tick time must between 1ms and 1s!" #endif #if CFG_MAX_USER_TASKS > 253 #error " OsConfig.h, CFG_MAX_USER_TASKS must be <= 253! " #endif #if CFG_LOWEST_PRIO > 254 #error " OsConfig.h, CFG_LOWEST_PRIO must be <= 254! " #endif #if CFG_IDLE_STACK_SIZE <25 #error " OsConfig.h, CFG_IDLE_STACK_SIZE must be >= 25! " #endif #if CFG_ROBIN_EN > 0 #if CFG_TIME_SLICE > 4095 #error " OsConfig.h, CFG_TIME_SLICE must be <= 4095! " #endif #endif #if CFG_TMR_EN > 0 #if CFG_MAX_TMR > 32 #error " OsConfig.h, CFG_MAX_TMR must be <= 32! " #endif #endif #if CFG_MM_EN > 0 #if CFG_MAX_MM > 32 #error " config.h, CFG_MAX_MM must be <= 32! " #endif #endif #if CFG_KHEAP_EN > 0 #if KHEAP_SIZE < 0x20 #error " config.h, CFG_MAX_MM must be >= 0x20! " #endif #endif #if CFG_MUTEX_EN > 0 #if CFG_MAX_MUTEX > 254 #error " config.h, CFG_MAX_MUTEX must be <= 254! " #endif #endif #if CFG_EVENT_EN > 0 #if (CFG_MAX_EVENT > 254 || CFG_MAX_EVENT <= 0) #error " config.h, CFG_MAX_EVENT must be <= 254 && > 0! " #endif #if CFG_QUEUE_EN > 0 #if CFG_MAX_QUEUE > CFG_MAX_EVENT #error " config.h, CFG_MAX_QUEUE must be <= CFG_MAX_EVENT! " #endif #endif #endif /* CFG_EVENT_EN */ #endif /* _ERROR_H */
1,349
428
<gh_stars>100-1000 package org.loon.framework.android.game.action; import org.loon.framework.android.game.action.sprite.ISprite; /** * Copyright 2008 - 2011 * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. * * @project loonframework * @author chenpeng * @email:<EMAIL> * @version 0.1 */ public class FadeTo extends ActionEvent { public int time; public int currentFrame; public int type; private int opacity; public FadeTo(int type, int speed) { this.type = type; this.setSpeed(speed); } public int getType() { return type; } public void setType(int type) { this.type = type; } void setOpacity(int opacity) { this.opacity = opacity; } public int getOpacity() { return opacity; } public boolean isComplete() { return isComplete; } public float getSpeed() { return time; } public void setSpeed(int delay) { this.time = delay; if (type == ISprite.TYPE_FADE_IN) { this.currentFrame = this.time; } else { this.currentFrame = 0; } } public void onLoad() { } public void update(long elapsedTime) { if (type == ISprite.TYPE_FADE_IN) { currentFrame--; if (currentFrame == 0) { setOpacity(0); isComplete = true; } } else { currentFrame++; if (currentFrame == time) { setOpacity(0); isComplete = true; } } double op = ((double) currentFrame / (double) time) * 255; setOpacity((int) op); if (opacity > 0) { original.setAlpha(((float) opacity / 255)); } } }
712
3,010
import wifi import socketpool TIMEOUT = None print("Connecting to Wifi") wifi.radio.connect("mySSID", "myPASS") pool = socketpool.SocketPool(wifi.radio) print("Finding IP address") print(wifi.radio.ipv4_address) HOST = str(wifi.radio.ipv4_address) PORT = 80 # Port to listen on print("Creating socket") sock = pool.socket(pool.AF_INET, pool.SOCK_STREAM) sock.bind((HOST, PORT)) sock.listen(1) print("Accepting connections") conn, addr = sock.accept() with conn: print("Connected by", addr) buff = bytearray(128) print("Receiving") numbytes = conn.recvfrom_into(buff) print(buff[: numbytes[0]]) if numbytes: print("Sending") conn.send(buff[: numbytes[0]])
284
4,036
import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.security.SecureRandom; import java.util.Base64; public class HashWithoutSalt { // BAD - Hash without a salt. public String getSHA256Hash(String password) throws NoSuchAlgorithmException { MessageDigest md = MessageDigest.getInstance("SHA-256"); byte[] messageDigest = md.digest(password.getBytes()); return Base64.getEncoder().encodeToString(messageDigest); } // GOOD - Hash with a salt. public String getSHA256Hash(String password, byte[] salt) throws NoSuchAlgorithmException { MessageDigest md = MessageDigest.getInstance("SHA-256"); md.update(salt); byte[] messageDigest = md.digest(password.getBytes()); return Base64.getEncoder().encodeToString(messageDigest); } // BAD - Hash without a salt. public String getSHA256Hash2(String password) throws NoSuchAlgorithmException { MessageDigest md = MessageDigest.getInstance("SHA-256"); md.update(password.getBytes()); byte[] messageDigest = md.digest(); return Base64.getEncoder().encodeToString(messageDigest); } // GOOD - Hash with a salt. public String getSHA256Hash2(String password, byte[] salt) throws NoSuchAlgorithmException { MessageDigest md = MessageDigest.getInstance("SHA-256"); md.update(salt); md.update(password.getBytes()); byte[] messageDigest = md.digest(); return Base64.getEncoder().encodeToString(messageDigest); } // GOOD - Hash with a salt concatenated with the password. public String getSHA256Hash3(String password, byte[] salt) throws NoSuchAlgorithmException { MessageDigest md = MessageDigest.getInstance("SHA-256"); byte[] passBytes = password.getBytes(); byte[] allBytes = new byte[passBytes.length + salt.length]; System.arraycopy(passBytes, 0, allBytes, 0, passBytes.length); System.arraycopy(salt, 0, allBytes, passBytes.length, salt.length); byte[] messageDigest = md.digest(allBytes); byte[] cipherBytes = new byte[32 + salt.length]; // SHA-256 is 32 bytes long System.arraycopy(messageDigest, 0, cipherBytes, 0, 32); System.arraycopy(salt, 0, cipherBytes, 32, salt.length); return Base64.getEncoder().encodeToString(cipherBytes); } // GOOD - Hash with a given salt stored somewhere else. public String getSHA256Hash(String password, String salt) throws NoSuchAlgorithmException { MessageDigest alg = MessageDigest.getInstance("SHA-256"); String payload = password+":"+salt; return Base64.getEncoder().encodeToString(alg.digest(payload.getBytes(java.nio.charset.StandardCharsets.UTF_8))); } // GOOD - Hash with a given salt stored somewhere else. public String getSHA256Hash2(String password, String salt, boolean useSalt) throws NoSuchAlgorithmException { MessageDigest alg = MessageDigest.getInstance("SHA-256"); String payload = useSalt?password+":"+salt:password; return Base64.getEncoder().encodeToString(alg.digest(payload.getBytes(java.nio.charset.StandardCharsets.UTF_8))); } // GOOD - Hash with a salt for a variable named passwordHash, whose value is a hash used as an input for a hashing function. public String getSHA256Hash3(String passwordHash) throws NoSuchAlgorithmException { MessageDigest md = MessageDigest.getInstance("SHA-256"); byte[] messageDigest = md.digest(passwordHash.getBytes()); return Base64.getEncoder().encodeToString(messageDigest); } public void update(SHA256 sha256, byte[] foo, int start, int len) throws NoSuchAlgorithmException { sha256.update(foo, start, len); } // GOOD - Invoking a wrapper implementation through qualifier with a salt. public String getWrapperSHA256Hash(String password) throws NoSuchAlgorithmException, ClassNotFoundException, IllegalAccessException, InstantiationException { SHA256 sha256 = new SHA256(); byte[] salt = getSalt(); byte[] passBytes = password.getBytes(); sha256.update(passBytes, 0, passBytes.length); sha256.update(salt, 0, salt.length); return Base64.getEncoder().encodeToString(sha256.digest()); } // BAD - Invoking a wrapper implementation through qualifier without a salt. public String getWrapperSHA256Hash2(String password) throws NoSuchAlgorithmException, ClassNotFoundException, IllegalAccessException, InstantiationException { SHA256 sha256 = new SHA256(); byte[] passBytes = password.getBytes(); sha256.update(passBytes, 0, passBytes.length); return Base64.getEncoder().encodeToString(sha256.digest()); } // GOOD - Invoking a wrapper implementation through qualifier and argument with a salt. public String getWrapperSHA256Hash3(String password) throws NoSuchAlgorithmException { SHA256 sha256 = new SHA256(); byte[] salt = getSalt(); byte[] passBytes = password.getBytes(); sha256.update(passBytes, 0, passBytes.length); update(sha256, salt, 0, salt.length); return Base64.getEncoder().encodeToString(sha256.digest()); } // BAD - Invoking a wrapper implementation through argument without a salt. public String getWrapperSHA256Hash4(String password) throws NoSuchAlgorithmException { SHA256 sha256 = new SHA256(); byte[] passBytes = password.getBytes(); update(sha256, passBytes, 0, passBytes.length); return Base64.getEncoder().encodeToString(sha256.digest()); } // GOOD - Invoking a wrapper implementation through argument with a salt. public String getWrapperSHA256Hash5(String password) throws NoSuchAlgorithmException { SHA256 sha256 = new SHA256(); byte[] salt = getSalt(); byte[] passBytes = password.getBytes(); update(sha256, passBytes, 0, passBytes.length); update(sha256, salt, 0, salt.length); return Base64.getEncoder().encodeToString(sha256.digest()); } // BAD - Invoke a wrapper implementation with a salt, which is not detected with an interface type variable. public String getSHA512Hash8(byte[] passphrase) throws NoSuchAlgorithmException, ClassNotFoundException, IllegalAccessException, InstantiationException { Class c = Class.forName("SHA512"); HASH sha512 = (HASH) (c.newInstance()); byte[] tmp = new byte[4]; byte[] key = new byte[32 * 2]; for (int i = 0; i < 2; i++) { sha512.init(); tmp[3] = (byte) i; sha512.update(passphrase, 0, passphrase.length); System.arraycopy(sha512.digest(), 0, key, i * 32, 32); } return Base64.getEncoder().encodeToString(key); } public static byte[] getSalt() throws NoSuchAlgorithmException { SecureRandom sr = SecureRandom.getInstance("SHA1PRNG"); byte[] salt = new byte[16]; sr.nextBytes(salt); return salt; } }
2,049
940
/* * jit-cache.cpp - Translation cache management * * Kheperix (C) 2003-2005 <NAME> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "sysdeps.h" #include "vm_alloc.h" #include "cpu/jit/jit-cache.hpp" #define DEBUG 0 #include "debug.h" // Default cache size in KB #if defined(__alpha__) const int JIT_CACHE_SIZE = 2 * 1024; #elif defined(__powerpc__) || defined(__ppc__) const int JIT_CACHE_SIZE = 4 * 1024; #else const int JIT_CACHE_SIZE = 8 * 1024; #endif const int JIT_CACHE_SIZE_GUARD = 4096; basic_jit_cache::basic_jit_cache() : cache_size(0), tcode_start(NULL), code_start(NULL), code_p(NULL), code_end(NULL), data(NULL) { } basic_jit_cache::~basic_jit_cache() { kill_translation_cache(); // Release data pool data_chunk_t *p = data; while (p) { data_chunk_t *d = p; p = p->next; D(bug("basic_jit_cache: Release data pool %p (%d KB)\n", d, d->size / 1024)); vm_release(d, d->size); } } bool basic_jit_cache::init_translation_cache(uint32 size) { size *= 1024; // Round up translation cache size to 16 KB boundaries const uint32 roundup = 16 * 1024; cache_size = (size + JIT_CACHE_SIZE_GUARD + roundup - 1) & -roundup; assert(cache_size > 0); tcode_start = (uint8 *)vm_acquire(cache_size, VM_MAP_PRIVATE | VM_MAP_32BIT); if (tcode_start == VM_MAP_FAILED) { tcode_start = NULL; return false; } if (vm_protect(tcode_start, cache_size, VM_PAGE_READ | VM_PAGE_WRITE | VM_PAGE_EXECUTE) < 0) { vm_release(tcode_start, cache_size); tcode_start = NULL; return false; } D(bug("basic_jit_cache: Translation cache: %d KB at %p\n", cache_size / 1024, tcode_start)); code_start = tcode_start; code_p = code_start; code_end = code_p + size; return true; } void basic_jit_cache::kill_translation_cache() { if (tcode_start) { D(bug("basic_jit_cache: Release translation cache\n")); vm_release(tcode_start, cache_size); cache_size = 0; tcode_start = NULL; } } bool basic_jit_cache::initialize(void) { if (cache_size == 0) set_cache_size(JIT_CACHE_SIZE); return tcode_start && cache_size; } void basic_jit_cache::set_cache_size(uint32 size) { kill_translation_cache(); if (size) init_translation_cache(size); } uint8 * basic_jit_cache::copy_data(const uint8 *block, uint32 size) { const int ALIGN = 16; uint8 *ptr; if (data && (data->offs + size) < data->size) ptr = (uint8 *)data + data->offs; else { // No free space left, allocate a new chunk uint32 to_alloc = sizeof(*data) + size + ALIGN; uint32 page_size = vm_get_page_size(); to_alloc = (to_alloc + page_size - 1) & -page_size; D(bug("basic_jit_cache: Allocate data pool (%d KB)\n", to_alloc / 1024)); ptr = (uint8 *)vm_acquire(to_alloc, VM_MAP_PRIVATE | VM_MAP_32BIT); if (ptr == VM_MAP_FAILED) { fprintf(stderr, "FATAL: Could not allocate data pool!\n"); abort(); } data_chunk_t *dcp = (data_chunk_t *)ptr; dcp->size = to_alloc; dcp->offs = (sizeof(*data) + ALIGN - 1) & -ALIGN; dcp->next = data; data = dcp; ptr += dcp->offs; } memcpy(ptr, block, size); data->offs += (size + ALIGN - 1) & -ALIGN; D(bug("basic_jit_cache: DATA %p, %d bytes [data=%p, offs=%u]\n", ptr, size, data, data->offs)); return ptr; }
1,524
945
<reponame>holtenko/iotdb /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iotdb.pulsar; import org.apache.iotdb.rpc.IoTDBConnectionException; import org.apache.iotdb.rpc.StatementExecutionException; import org.apache.iotdb.session.pool.SessionPool; import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; import org.apache.pulsar.client.api.Consumer; import org.apache.pulsar.client.api.Message; import org.apache.pulsar.client.api.Messages; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.ArrayList; import java.util.Arrays; import java.util.List; public class PulsarConsumerThread implements Runnable { private static final Logger logger = LoggerFactory.getLogger(PulsarConsumerThread.class); private final Consumer<?> consumer; private SessionPool pool; public PulsarConsumerThread(Consumer<?> consumer, SessionPool pool) throws ClassNotFoundException { this.consumer = consumer; Class.forName("org.apache.iotdb.jdbc.IoTDBDriver"); } /** insert data to IoTDB */ private void insert(String data) throws IoTDBConnectionException, StatementExecutionException { String[] dataArray = data.split(","); String device = dataArray[0]; long time = Long.parseLong(dataArray[1]); List<String> measurements = Arrays.asList(dataArray[2].split(":")); List<TSDataType> types = new ArrayList<>(); for (String type : dataArray[3].split(":")) { types.add(TSDataType.valueOf(type)); } List<Object> values = new ArrayList<>(); String[] valuesStr = dataArray[4].split(":"); for (int i = 0; i < valuesStr.length; i++) { switch (types.get(i)) { case INT64: values.add(Long.parseLong(valuesStr[i])); break; case DOUBLE: values.add(Double.parseDouble(valuesStr[i])); break; case INT32: values.add(Integer.parseInt(valuesStr[i])); break; case TEXT: values.add(valuesStr[i]); break; case FLOAT: values.add(Float.parseFloat(valuesStr[i])); break; case BOOLEAN: values.add(Boolean.parseBoolean(valuesStr[i])); break; } } pool.insertRecord(device, time, measurements, types, values); } /** insert data to IoTDB */ private void insertDatas(List<String> datas) throws IoTDBConnectionException, StatementExecutionException { int size = datas.size(); List<String> deviceIds = new ArrayList<>(size); List<Long> times = new ArrayList<>(size); List<List<String>> measurementsList = new ArrayList<>(size); List<List<TSDataType>> typesList = new ArrayList<>(size); List<List<Object>> valuesList = new ArrayList<>(size); for (String data : datas) { String[] dataArray = data.split(","); String device = dataArray[0]; long time = Long.parseLong(dataArray[1]); List<String> measurements = Arrays.asList(dataArray[2].split(":")); List<TSDataType> types = new ArrayList<>(); for (String type : dataArray[3].split(":")) { types.add(TSDataType.valueOf(type)); } List<Object> values = new ArrayList<>(); String[] valuesStr = dataArray[4].split(":"); for (int i = 0; i < valuesStr.length; i++) { switch (types.get(i)) { case INT64: values.add(Long.parseLong(valuesStr[i])); break; case DOUBLE: values.add(Double.parseDouble(valuesStr[i])); break; case INT32: values.add(Integer.parseInt(valuesStr[i])); break; case TEXT: values.add(valuesStr[i]); break; case FLOAT: values.add(Float.parseFloat(valuesStr[i])); break; case BOOLEAN: values.add(Boolean.parseBoolean(valuesStr[i])); break; } } deviceIds.add(device); times.add(time); measurementsList.add(measurements); typesList.add(types); valuesList.add(values); } pool.insertRecords(deviceIds, times, measurementsList, typesList, valuesList); } @SuppressWarnings("squid:S2068") @Override public void run() { try { do { Messages<?> messages = consumer.batchReceive(); List<String> datas = new ArrayList<>(messages.size()); for (Message<?> message : messages) { datas.add(new String(message.getData())); } insertDatas(datas); consumer.acknowledge(messages); } while (true); } catch (Exception e) { logger.error(e.getMessage()); } } }
2,132
663
/* * Copyright 2016-2017 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.github.blindpirate.gogradle.core.dependency.lock; import com.github.blindpirate.gogradle.core.dependency.GolangDependency; import com.github.blindpirate.gogradle.core.dependency.ResolvedDependency; import com.github.blindpirate.gogradle.core.dependency.produce.ExternalDependencyFactory; import com.github.blindpirate.gogradle.util.DataExchange; import com.github.blindpirate.gogradle.util.IOUtils; import org.gradle.api.Project; import javax.inject.Inject; import javax.inject.Singleton; import java.io.File; import java.util.Collection; import java.util.Comparator; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.stream.Collectors; import static com.github.blindpirate.gogradle.core.GolangConfiguration.BUILD; import static com.github.blindpirate.gogradle.core.GolangConfiguration.TEST; import static com.github.blindpirate.gogradle.util.DataExchange.parseYaml; @Singleton public class DefaultLockedDependencyManager extends ExternalDependencyFactory implements LockedDependencyManager { public static final String WARNING = "# This file is generated by gogradle automatically, " + "you should NEVER modify it manually.\n"; @Inject private Project project; private static final String LOCK_FILE = "gogradle.lock"; @Override public void lock(Collection<? extends ResolvedDependency> flatBuildDependencies, Collection<? extends ResolvedDependency> flatTestDependencies) { List<Map<String, Object>> buildNotations = toNotations(flatBuildDependencies); List<Map<String, Object>> testNotations = toNotations(flatTestDependencies); GogradleLockModel model = GogradleLockModel.of(buildNotations, testNotations); String content = DataExchange.toYaml(model); content = insertWarning(content); IOUtils.write(project.getProjectDir(), LOCK_FILE, content); } private String insertWarning(String content) { return WARNING + content; } private List<Map<String, Object>> toNotations(Collection<? extends ResolvedDependency> flatDependencies) { List<Map<String, Object>> ret = flatDependencies.stream() .sorted(Comparator.comparing(GolangDependency::getName)) // to have a deterministic order .map(ResolvedDependency::toLockedNotation) .filter(Objects::nonNull) .collect(Collectors.toList()); ret.forEach(this::deactivateTransitive); return ret; } private void deactivateTransitive(Map<String, Object> map) { map.put("transitive", false); } @Override public String identityFileName() { return LOCK_FILE; } @Override protected List<Map<String, Object>> adapt(File file) { GogradleLockModel model = parseYaml(file, GogradleLockModel.class); return model.getDependencies(BUILD); } @Override protected List<Map<String, Object>> adaptTest(File file) { GogradleLockModel model = parseYaml(file, GogradleLockModel.class); return model.getDependencies(TEST); } }
1,318
2,829
<filename>euler/client/rpc_manager.cc /* Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "euler/client/rpc_manager.h" #include "euler/common/logging.h" #include "euler/client/impl_register.h" #include "euler/client/grpc_manager.h" namespace euler { bool RpcManager::Initialize(std::shared_ptr<ServerMonitor> monitor, size_t shard_index, const GraphConfig &config) { if (monitor_) { return true; } config.Get("num_channels_per_host", &num_channels_per_host_); int value_int; if (config.Get("bad_host_cleanup_interval", &value_int)) { bad_host_cleanup_interval_ = Duration(value_int); } if (config.Get("bad_host_timeout", &value_int)) { bad_host_timeout_ = Duration(value_int); } bool success = monitor->SetShardCallback(shard_index, &shard_callback_); if (success) { monitor_ = monitor; shard_index_ = shard_index; } else { EULER_LOG(ERROR) << "Fail to listen on ServerMonitor."; } return success; } RpcManager::~RpcManager() { shutdown_ = true; bad_hosts_cleaner_.join(); if (monitor_) { monitor_->UnsetShardCallback(shard_index_, &shard_callback_); } } std::shared_ptr<RpcChannel> RpcManager::GetChannel() { std::unique_lock<std::mutex> lock(mu_); cv_.wait(lock, [this]{ return !channels_.empty(); }); return channels_[next_replica_index_++ % channels_.size()]; } void RpcManager::MoveToBadHost(const std::string &host_port) { std::lock_guard<std::mutex> lock(mu_); DoRemoveChannel(host_port); // MoveToBadHost may be call many times for the same host. if (std::find_if(bad_hosts_.begin(), bad_hosts_.end(), [host_port](const BadHost &bad_host) { return bad_host.first == host_port; }) == bad_hosts_.end()) { bad_hosts_.emplace_back(host_port, std::chrono::system_clock::now()); } } void RpcManager::AddChannel(const std::string &host_port) { { std::lock_guard<std::mutex> lock(mu_); #if 0 for (auto& channel : channels_) { if (channel->host_port() == host_port) { return; } } #endif DoAddChannel(host_port); } cv_.notify_all(); } void RpcManager::RemoveChannel(const std::string &host_port) { std::lock_guard<std::mutex> lock(mu_); DoRemoveChannel(host_port); bad_hosts_.erase( std::remove_if(bad_hosts_.begin(), bad_hosts_.end(), [host_port](const BadHost &bad_host) { return bad_host.first == host_port; }), bad_hosts_.end()); } void RpcManager::CleanupBadHosts() { while (!shutdown_) { std::this_thread::sleep_for(bad_host_cleanup_interval_); TimePoint now = std::chrono::system_clock::now(); { std::lock_guard<std::mutex> lock(mu_); DoCleanupBadHosts(now); } cv_.notify_all(); } } void RpcManager::DoAddChannel(const std::string &host_port) { for (int tag = 0; tag < num_channels_per_host_; ++tag) { channels_.emplace_back(CreateChannel(host_port, tag)); } } void RpcManager::DoRemoveChannel(const std::string &host_port) { channels_.erase( std::remove_if(channels_.begin(), channels_.end(), [host_port](const std::shared_ptr<RpcChannel> &channel) { return channel->host_port() == host_port; }), channels_.end()); } void RpcManager::DoCleanupBadHosts(TimePoint now) { auto iter = std::partition( bad_hosts_.begin(), bad_hosts_.end(), [now, this](const BadHost &bad_host) { return now - bad_host.second < bad_host_timeout_; }); std::for_each(iter, bad_hosts_.end(), [this](const BadHost &bad_host) { DoAddChannel(bad_host.first); }); bad_hosts_.erase(iter, bad_hosts_.end()); } REGISTER_IMPL(RpcManager, GrpcManager); } // namespace euler
1,775
1,085
<reponame>weltam/dremio-oss<filename>sabot/kernel/src/test/resources/deltalake/testDataset/_delta_log/00000000000000000012.json<gh_stars>1000+ {"commitInfo":{"timestamp":1613298242407,"operation":"WRITE","operationParameters":{"mode":"Append","partitionBy":"[]"},"readVersion":11,"isBlindAppend":true,"operationMetrics":{"numFiles":"1","numOutputBytes":"2884","numOutputRows":"25"}}} {"add":{"path":"part-00000-aa1a0925-e7d1-49fd-9240-81e3ab24d288-c000.snappy.parquet","partitionValues":{},"size":2884,"modificationTime":1613298242000,"dataChange":true}}
198
2,989
<reponame>tb-soft/databus package com.linkedin.databus.client.pub; /* * * Copyright 2013 LinkedIn Corp. All rights reserved * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * */ import java.net.InetSocketAddress; public class DatabusServerCoordinates implements Comparable<DatabusServerCoordinates> { public enum StateId { UNKNOWN, ERROR, OFFLINE, ONLINE }; /** * A user-friendly name to identify the relay/bootstrep-server */ private final String _name; /** * The IP Address and port number */ private final InetSocketAddress _address; /** * State of the relay/bootstrap-server ( as reported by Helix ) * States are Online, Offline, Error */ private StateId _state; /** * Base constructor * * @param name * @param address * @param state */ public DatabusServerCoordinates(String name, InetSocketAddress address, String state) { _name = name; _address = address; try { _state = StateId.valueOf(state); } catch (Exception ex ) { _state = StateId.UNKNOWN; } } public DatabusServerCoordinates(String name, InetSocketAddress address, StateId state) { _name = name; _address = address; _state = state; } /** * Typically called from DatabusHttpV3ClientImpl * * @param id * @param name * @param address */ public DatabusServerCoordinates(String name, InetSocketAddress address) { this(name, address, "OFFLINE"); } /** * Typically called for constructing RelayCoordinates based on Client's external view * * @param address * @param state */ public DatabusServerCoordinates(InetSocketAddress address, String state) { this("default", address, state); } public String getName() { return _name; } public InetSocketAddress getAddress() { return _address; } public void setState(StateId state) { _state = state; } public StateId getState() { return _state; } @Override public boolean equals(Object o) { if ( this == o) return true; if ( o == null) return false; if (getClass() != o.getClass()) return false; final DatabusServerCoordinates castedObj = (DatabusServerCoordinates) o; if ( _address.equals(castedObj.getAddress()) && _state.equals(castedObj.getState()) ) return true; else return false; } @Override public int hashCode() { return (getAddress().hashCode() << 16) + getState().hashCode(); } @Override public int compareTo(DatabusServerCoordinates o) { InetSocketAddress addr1 = getAddress(); InetSocketAddress addr2 = o.getAddress(); String addrStr1 = (addr1 != null ? addr1.toString() + getState() : ""); String addrStr2 = (addr2 != null ? addr2.toString() + getState() : ""); return addrStr1.compareTo(addrStr2); } /** * Check if the Server represented by this object is in ONLINE state * @return */ public boolean isOnlineState() { return _state == StateId.ONLINE; } @Override public String toString() { return "DatabusServerCoordinates [_name=" + _name + ", _address=" + _address + ", _state=" + _state + "]"; } }
1,250
10,504
@@@@base_visitor @@root_base@@Visitor #### @@@@body@struct_name void visit(@@struct_name@@& @@parameter_name@@) override; #### @@@@main #pragma once // Generated File // DO NOT EDIT @@extra_inc@@ #include "@@base_visitor@@.h" namespace @@namespace@@ { class @@visitor_name@@ : public @@base_visitor@@ { public: @@body@@ @@ctor_and_member@@ }; } // namespace @@namespace@@ ####
145
374
/** * \class ResizeDotWidget * \brief This widget represents a shape which is used in FormView as * the resizing grip of a FW during resize operation. * \author <NAME> - GIOWISYS Software * \date 04/05/2012 */ #ifndef RESIZEDOTWIDGET_H #define RESIZEDOTWIDGET_H //----------------------------------------------------------------------------- // Headers //----------------------------------------------------------------------------- #include <QWidget> //----------------------------------------------------------------------------- // Forward declarations //----------------------------------------------------------------------------- class QLabel; class QVBoxLayout; //----------------------------------------------------------------------------- // ResizeDotWidget //----------------------------------------------------------------------------- class ResizeDotWidget : public QWidget { Q_OBJECT public: explicit ResizeDotWidget(QWidget *parent = nullptr); bool isResizing; /**< A boolean indicating if the grip is currently in a drag resize operation */ private: QLabel *label; QVBoxLayout *layout; }; #endif // RESIZEDOTWIDGET_H
303
628
<filename>addons/zotero/models.py # -*- coding: utf-8 -*- from addons.base.models import BaseCitationsNodeSettings, BaseOAuthUserSettings from addons.base import exceptions from django.db import models from framework import sentry from framework.exceptions import HTTPError from pyzotero import zotero, zotero_errors from addons.zotero import \ settings # TODO: Move `settings` to `apps.py` when deleting from addons.zotero.serializer import ZoteroSerializer from website.citations.providers import CitationsOauthProvider # TODO: Don't cap at 200 responses. We can only fetch 100 citations at a time. With lots # of citations, requesting the citations may take longer than the UWSGI harakiri time. # For now, we load 200 citations max and show a message to the user. MAX_CITATION_LOAD = 200 class Zotero(CitationsOauthProvider): name = 'Zotero' short_name = 'zotero' _oauth_version = 1 client_id = settings.ZOTERO_CLIENT_ID client_secret = settings.ZOTERO_CLIENT_SECRET auth_url_base = 'https://www.zotero.org/oauth/authorize' callback_url = 'https://www.zotero.org/oauth/access' request_token_url = 'https://www.zotero.org/oauth/request' default_scopes = ['all'] serializer = ZoteroSerializer _library_client = None def handle_callback(self, response): return { 'display_name': response['username'], 'provider_id': response['userID'], 'profile_url': 'https://zotero.org/users/{}/'.format( response['userID'] ), } def get_list(self, list_id=None, library_id=None): """Get a single CitationList :param str list_id: ID for a folder. Optional. :param str list_id: ID for library. Optional. :return CitationList: CitationList for the folder, or for all documents """ if not list_id or list_id == 'ROOT': return self._citations_for_user(library_id) return self._citations_for_folder(list_id, library_id) def _get_folders(self, library_id=None, folder_id=None): """ Get a list of a user's folders, either from their personal library, or a group library, if specified. If folder_id is specified, will return the folders just within that folder. """ client = self._get_library(library_id) # Note: Pagination is the only way to ensure all of the collections # are retrieved. 100 is the limit per request. This applies # to Mendeley too, though that limit is 500. if folder_id: if folder_id == library_id: # Returns library's top-level folders return client.collections_top(limit=100) # Returns subfolders within a specific folder return client.collections_sub(folder_id) else: # No folder is specified, so all folders are returned underneath the library return client.collections(limit=100) def _get_library(self, library_id): """ If library id specified, fetch the group library from Zotero. Otherwise, use the user's personal library. """ if library_id and library_id != 'personal': if not self._library_client: self._library_client = zotero.Zotero(str(library_id), 'group', self.account.oauth_key) return self._library_client else: return self.client def _get_client(self): return zotero.Zotero(self.account.provider_id, 'user', self.account.oauth_key) def _verify_client_validity(self): # Check if Zotero can be accessed with current credentials try: self._client.collections() except zotero_errors.PyZoteroError as err: self._client = None if isinstance(err, zotero_errors.UserNotAuthorised): raise HTTPError(403) else: raise err def _fetch_libraries(self, limit=None, start=None): """ Retrieves the Zotero library data to which the current library_id and api_key has access """ total_libraries = self.client._totals('/users/{u}/groups') libraries = self.client.groups(limit=limit, start=start, sort='title') libraries.append(total_libraries) return libraries def _folder_metadata(self, folder_id, library_id=None): client = self._get_library(library_id) return client.collection(folder_id) def _library_metadata(self, library_id): for library in self.client.groups(): if str(library['id']) == library_id: return library return None def _citations_for_folder(self, list_id, library_id=None): """Get all the citations in a specified collection :param str list_id: ID for a Zotero collection. :return list of csljson objects representing documents. """ client = self._get_library(library_id) citations = [] more = True offset = 0 while more and len(citations) <= MAX_CITATION_LOAD: page = client.collection_items_top(list_id, content='csljson', limit=100, start=offset) citations = citations + page if len(page) == 0 or len(page) < 100: more = False else: offset = offset + len(page) return citations def _citations_for_user(self, library_id=None): """Get all the citations from the user """ citations = [] more = True offset = 0 client = self._get_library(library_id) while more and len(citations) <= MAX_CITATION_LOAD: page = client.top(content='csljson', limit=100, start=offset) citations = citations + page if len(page) == 0 or len(page) < 100: more = False else: offset = offset + len(page) # Loops through all items in the library and extracts the keys for unfiled items unfiled_keys = [citation['data']['key'] for citation in client.top() if not citation['data']['collections']] # Return only unfiled items in csljson format return [cite for cite in citations if cite['id'].split('/')[1] in unfiled_keys] @property def auth_url(self): """ Add all_groups query param so Zotero API key will have permissions to user's groups """ url = super(Zotero, self).auth_url return url + '&all_groups=read' class UserSettings(BaseOAuthUserSettings): oauth_provider = Zotero serializer = ZoteroSerializer class NodeSettings(BaseCitationsNodeSettings): provider_name = 'zotero' oauth_provider = Zotero serializer = ZoteroSerializer user_settings = models.ForeignKey(UserSettings, null=True, blank=True, on_delete=models.CASCADE) list_id = models.TextField(blank=True, null=True) library_id = models.TextField(blank=True, null=True) _api = None @property def complete(self): """ Boolean indication of addon completeness Requires that both library_id and list_id have been defined. """ return bool(self.has_auth and self.list_id and self.library_id and self.user_settings.verify_oauth_access( node=self.owner, external_account=self.external_account, metadata={'folder': self.list_id, 'library': self.library_id}, )) @property def fetch_library_name(self): """Returns a displayable library name""" if self.library_id is None: return '' else: if self.library_id == 'personal': return 'My library' library = self.api._library_metadata(self.library_id) return library['data'].get('name') if library else 'My library' @property def _fetch_folder_name(self): folder = self.api._folder_metadata(self.list_id, self.library_id) return folder['data'].get('name') def clear_settings(self): """Clears selected folder and selected library configuration""" self.list_id = None self.library_id = None def serialize_folder(self, kind, id, name, path, parent=None, provider_list_id=None): return { 'addon': 'zotero', 'kind': kind, 'id': id, 'name': name, 'path': path, 'parent_list_id': parent, 'provider_list_id': provider_list_id or id } def get_folders(self, path=None, folder_id=None, **kwargs): """ Returns Zotero folders at any level. Top level are group/personal libraries. Secondary levels are folders within those libraries. If path (also known as library id) is specified, then folders are returned from that specific library. If no path(library) specified, then all libraries are returned. """ if self.has_auth: if path: return self.get_sub_folders(path, folder_id) else: return self.get_top_level_folders(**kwargs) else: raise exceptions.InvalidAuthError() def get_top_level_folders(self, **kwargs): """ Returns serialized group libraries - your personal library along with any group libraries. This is the top-tier of "folders" in Zotero. You can use kwargs to refine what data is returned - how to limit the number of group libraries, whether to return the personal library alongside group_libraries, or append the total library count. """ # These kwargs are passed in from ZoteroViews > library_list limit = kwargs.get('limit', None) start = kwargs.get('start', None) return_count = kwargs.get('return_count', False) append_personal = kwargs.get('append_personal', True) try: # Fetch group libraries libraries = self.api._fetch_libraries(limit=limit, start=start) except zotero_errors.ResourceNotFound: raise HTTPError(404) except zotero_errors.UserNotAuthorised: raise HTTPError(403) except zotero_errors.HTTPError: sentry.log_exception() sentry.log_message('Unexpected Zotero Error when fetching group libraries.') raise HTTPError(500) # Serialize libraries serialized = [] for library in libraries[:-1]: data = library['data'] serialized.append(self.serialize_folder('library', data['id'], data['name'], str(data['id']))) if return_count: # Return total number of libraries as last item in list serialized.append(libraries[-1]) if append_personal: # Append personal library as option alongside group libraries serialized.insert(0, self.serialize_folder('library', 'personal', 'My Library', 'personal')) return serialized def get_sub_folders(self, library_id, folder_id=None, **kwargs): """ Returns serialized folders underneath a specific library/group - these are the lower tiers of folders in Zotero. If no folder_id is specified, all folders in a flat manner are returned for the group library. If a folder_id is specified, only the subfolders within that folder are returned. """ try: sub_folders = self.api._get_folders(library_id=library_id, folder_id=folder_id) except zotero_errors.ResourceNotFound: raise HTTPError(404) except zotero_errors.UserNotAuthorised: raise HTTPError(403) except zotero_errors.HTTPError: sentry.log_exception() sentry.log_message('Unexpected Zotero Error when fetching folders.') raise HTTPError(500) serialized = [] for folder in sub_folders: data = folder['data'] path = folder['library']['id'] if folder['library']['type'] == 'group' else 'personal' serialized.append(self.serialize_folder('folder', data['key'], data['name'], path, data['parentCollection'])) if folder_id: return serialized else: all_documents = self.serialize_folder('folder', 'ROOT', 'All Documents', library_id, '__', None) return [all_documents] + serialized
5,105
1,338
<filename>headers/build/os/interface/Rect.h #include <../os/interface/Rect.h>
28
471
# -*- coding: utf-8 -*- # Generated by Django 1.11.28 on 2020-05-25 18:14 from __future__ import unicode_literals from django.conf import settings import django.contrib.postgres.fields from django.db import migrations, models import django.db.models.deletion import django.utils.timezone from corehq.util.django_migrations import skip_on_fresh_install @skip_on_fresh_install def populate_api_keys(apps, schema_editor): TastyPieApiKey = apps.get_model('tastypie', 'ApiKey') ApiKeySettings = apps.get_model('hqwebapp', 'ApiKeySettings') HQApiKey = apps.get_model('users', 'HQApiKey') for api_key in TastyPieApiKey.objects.all(): try: ip_allowlist = ApiKeySettings.objects.get(api_key=api_key).ip_whitelist except ApiKeySettings.DoesNotExist: ip_allowlist = [] HQApiKey.objects.create( key=api_key.key, created=api_key.created, user=api_key.user, ip_allowlist=ip_allowlist, ) class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('users', '0015_domainpermissionsmirror'), ] operations = [ migrations.CreateModel( name='HQApiKey', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('key', models.CharField(blank=True, db_index=True, default='', max_length=128)), ('name', models.CharField(blank=True, default='', max_length=255)), ('created', models.DateTimeField(default=django.utils.timezone.now)), ('ip_allowlist', django.contrib.postgres.fields.ArrayField(base_field=models.GenericIPAddressField(), default=list, size=None)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='api_keys', to=settings.AUTH_USER_MODEL)), ], ), migrations.RunPython(populate_api_keys, migrations.RunPython.noop) ]
902
451
<gh_stars>100-1000 /*Header-MicMac-eLiSe-25/06/2007 MicMac : Multi Image Correspondances par Methodes Automatiques de Correlation eLiSe : ELements of an Image Software Environnement www.micmac.ign.fr Copyright : Institut Geographique National Author : <NAME> Contributors : <NAME>, <NAME>. [1] <NAME>, <NAME>. "A multiresolution and optimization-based image matching approach: An application to surface reconstruction from SPOT5-HRS stereo imagery." In IAPRS vol XXXVI-1/W41 in ISPRS Workshop On Topographic Mapping From Space (With Special Emphasis on Small Satellites), Ankara, Turquie, 02-2006. [2] <NAME>, "MicMac, un lociel de mise en correspondance d'images, adapte au contexte geograhique" to appears in Bulletin d'information de l'Institut Geographique National, 2007. Francais : MicMac est un logiciel de mise en correspondance d'image adapte au contexte de recherche en information geographique. Il s'appuie sur la bibliotheque de manipulation d'image eLiSe. Il est distibue sous la licences Cecill-B. Voir en bas de fichier et http://www.cecill.info. English : MicMac is an open source software specialized in image matching for research in geographic information. MicMac is built on the eLiSe image library. MicMac is governed by the "Cecill-B licence". See below and http://www.cecill.info. Header-MicMac-eLiSe-25/06/2007*/ #include "StdAfx.h" typedef struct tagGg { double X0, YR , C , R2 , SS ,LC0 ; } Gg ; /* Lamb_geo */ typedef struct tagPof { double X0, Sino, Sink, LC0, R0, YR ; } Pof ; /* Geo_Lamb */ /********************************************************/ /* Lamb_geo.c Calcule la longitude Alon et la latitude Alat en grades d'un point de coordonnees X,Y donnees en metres ( avec le chiffre indicateur de la zone Lambert pour les y) dans la Projection Lambert de numero Nlamb. On rappelle que la constante origine des Y vaut: 1200000.00 Metres pour la zone Lambert I 2200000.00 " Lambert II 3200000.00 " Lambert III 4185861.37 " Lambert IV Les 4 zones Lambert-France sont caracterisees par: Lambert I : Alat>=53.5 Gr (Parallele origine:55 Gr) Lambert II : 50.5 Gr<=Alat<=53.5 Gr (Parallele origine:52 Gr) Lambert III: Alat<=50.5 Gr et Alon<=6.2 Gr (Parallele origine:49 Gr) Lambert IV : Alat<=48 Gr et Alon>=6.4 Gr (Parallele origine:46,85 Gr) */ /*******************************************************/ void Lamb_geo (double xx, double yy, int N_lamb, double *Alon, double *Alat ) { double U, V, P, T, S, S2, A, E; int Nl ; Gg Eve[4] ; /* Constantes des projections Lambert-France */ Eve[0].X0= 600000; Eve[1].X0= 600000; Eve[2].X0= 600000; Eve[3].X0= 234.358; Eve[0].YR= 6657616.674 ; Eve[1].YR= 8199695.768 ; Eve[2].YR= 9791905.085 ; Eve[3].YR= 11239161.542 ; Eve[0].C= 83.721038652; Eve[1].C= 87.331573464; Eve[2].C= 91.479819811; Eve[3].C= 94.838400858; Eve[0].R2 = 2.978557976E13; Eve[1].R2 = 3.5996349309E13; Eve[2].R2 = 4.345321265E13; Eve[3].R2 = 4.974904333E13; Eve[0].SS = 1.5208119312; Eve[1].SS = 1.4579372548; Eve[2].SS = 1.3918255932; Eve[3].SS = 1.3425358644; Eve[0].LC0= 0.9919966654; Eve[1].LC0= 0.9215573613; Eve[2].LC0= 0.8545910977; Eve[3].LC0= 0.8084757728; if ( yy < (double)(N_lamb * 1000000) ) { yy = yy + (double)(N_lamb * 1000000) ; } Nl = N_lamb - 1 ; U = xx - (double) ( Eve[Nl].X0 ) ; V = (double) ( Eve[Nl].YR ) - yy ; *Alon = ( atan( U / V ) ) * Eve[Nl].C ; P = ( U * U + V * V ) / Eve[Nl].R2 ; E = exp( log(P)/Eve[Nl].SS - Eve[Nl].LC0 ) ; T = ( 1 - E ) / ( 1 + E ) ; S = sin( 2 * atan(T) ) ; S2 = S * S ; A = ( ( 1.38E-7 * S2 - 1.5707E-5 ) * S2 + 3.425046E-3 ) * S ; *Alat = atan( (A + T) / (1 + A * T ) ) * 127.32395447 ; /* Le calcul est fait en grades : en degres en sortie */ *Alon = *Alon * 0.9 ; *Alat = *Alat * 0.9 ; } /******************************************************************/ /* Geo_lamb.c Calcule les coordonnees X ,Y en metres,dans la projection Lambert de numero N_amb d'un point de longitude Alon et de latitude Alat donnees en grades. On rappelle que la constante origine des Y vaut: 1200000.00 Metres pour la zone Lambert I 2200000.00 " Lambert II 3200000.00 " Lambert III 4185861.37 " Lambert IV Les 4 zones Lambert-France sont caracterisees par: Lambert I : Alat>=53.5 Gr (Parallele origine:55 Gr) Lambert II : 50.5 Gr<=Alat<=53.5 Gr (Parallele origine:52 Gr) Lambert III: Alat<=50.5 Gr et Alon<=6.2 Gr (Parallele origine:49 Gr) Lambert IV : Alat<=48 Gr et Alon>=6.4 Gr (Parallele origine:46,85 Gr) */ /******************************************************************/ void Geo_lamb( double Alon, double Alat, int N_lamb, double *x, double *y ) { double T, Lc , R , C ; double gAlon , gAlat ; /* en grades */ Pof Pif[4] ; int Nl ; /* Constantes des projections Lambert-France */ Pif[0].X0=600000; Pif[1].X0=600000; Pif[2].X0=600000; Pif[3].X0=234.358; Pif[0].Sino=0.7604059656; Pif[1].Sino=0.7289686274; Pif[2].Sino=0.6959127966; Pif[3].Sino=0.6712679322; Pif[0].Sink=1.194442898E-2; Pif[1].Sink=1.145061242E-2; Pif[2].Sink=1.093137265E-2; Pif[3].Sink=1.054425202E-2; Pif[0].LC0=0.9919966654; Pif[1].LC0=0.9215573613; Pif[2].LC0=0.8545910977; Pif[3].LC0=0.8084757728; Pif[0].R0=5457616.674; Pif[1].R0=5999695.768; Pif[2].R0=6591905.085; Pif[3].R0=7053300.173; Pif[0].YR=6657616.674; Pif[1].YR=8199695.768; Pif[2].YR=9791905.085; Pif[3].YR=11239161.542; Nl = N_lamb -1 ; gAlon = Alon * 200.0 / 180.0 ; gAlat = Alat * 200.0 / 180.0 ; T = 8.24832568E-2 * sin( gAlat * 1.5707963268E-2 ) ; Lc = log( tan( 7.8539816340E-3 * ( gAlat + 100) ) ) - 4.12416284E-2 * log( (1+T) / (1-T) ) ; /* latitude croissante */ R=Pif[Nl].R0 * exp(Pif[Nl].Sino*(Pif[Nl].LC0-Lc)); C=Pif[Nl].Sink * gAlon ; *x = Pif[Nl].X0 + R * sin(C); *y = Pif[Nl].YR - R * cos(C); /* * Formulation abandonnee : * *y = *y - (double)(N_lamb * 1000000) ; * on adopte desormais une convention pour laquelle * Y porte les "millions" du Lambert */ } /*Footer-MicMac-eLiSe-25/06/2007 Ce logiciel est un programme informatique servant à la mise en correspondances d'images pour la reconstruction du relief. Ce logiciel est régi par la licence CeCILL-B soumise au droit français et respectant les principes de diffusion des logiciels libres. Vous pouvez utiliser, modifier et/ou redistribuer ce programme sous les conditions de la licence CeCILL-B telle que diffusée par le CEA, le CNRS et l'INRIA sur le site "http://www.cecill.info". En contrepartie de l'accessibilité au code source et des droits de copie, de modification et de redistribution accordés par cette licence, il n'est offert aux utilisateurs qu'une garantie limitée. Pour les mêmes raisons, seule une responsabilité restreinte pèse sur l'auteur du programme, le titulaire des droits patrimoniaux et les concédants successifs. A cet égard l'attention de l'utilisateur est attirée sur les risques associés au chargement, à l'utilisation, à la modification et/ou au développement et à la reproduction du logiciel par l'utilisateur étant donné sa spécificité de logiciel libre, qui peut le rendre complexe à manipuler et qui le réserve donc à des développeurs et des professionnels avertis possédant des connaissances informatiques approfondies. Les utilisateurs sont donc invités à charger et tester l'adéquation du logiciel à leurs besoins dans des conditions permettant d'assurer la sécurité de leurs systèmes et ou de leurs données et, plus généralement, à l'utiliser et l'exploiter dans les mêmes conditions de sécurité. Le fait que vous puissiez accéder à cet en-tête signifie que vous avez pris connaissance de la licence CeCILL-B, et que vous en avez accepté les termes. Footer-MicMac-eLiSe-25/06/2007*/
3,672
5,169
{ "name": "DZBadgeView", "version": "0.1.0", "summary": "DZBadgeView is a badge style view", "description": "badge style view, it will used for remind. have fun!", "homepage": "https://github.com/yishuiliunian/DZBadgeView", "license": "MIT", "authors": { "yishuiliunian": "<EMAIL>" }, "source": { "git": "https://github.com/yishuiliunian/DZBadgeView.git", "tag": "0.1.0" }, "platforms": { "ios": "7.0" }, "requires_arc": true, "source_files": "Pod/Classes/**/*", "resource_bundles": { "DZBadgeView": [ "Pod/Assets/*.png" ] }, "dependencies": { "DZGeometryTools": [ ] } }
294
511
<reponame>JojOatXGME/Grammar-Kit<gh_stars>100-1000 // ---- ConsumeMethods.java ----------------- // This is a generated file. Not intended for manual editing. package ; import com.intellij.lang.PsiBuilder; import com.intellij.lang.PsiBuilder.Marker; import static generated.GeneratedTypes.*; import static com.intellij.lang.parser.GeneratedParserUtilBase.*; import com.intellij.psi.tree.IElementType; import com.intellij.lang.ASTNode; import com.intellij.psi.tree.TokenSet; import com.intellij.lang.PsiParser; import com.intellij.lang.LightPsiParser; @SuppressWarnings({"SimplifiableIfStatement", "UnusedAssignment"}) public class ConsumeMethods implements PsiParser, LightPsiParser { public ASTNode parse(IElementType root_, PsiBuilder builder_) { parseLight(root_, builder_); return builder_.getTreeBuilt(); } public void parseLight(IElementType root_, PsiBuilder builder_) { boolean result_; builder_ = adapt_builder_(root_, builder_, this, null); Marker marker_ = enter_section_(builder_, 0, _COLLAPSE_, null); result_ = parse_root_(root_, builder_); exit_section_(builder_, 0, marker_, root_, result_, true, TRUE_CONDITION); } protected boolean parse_root_(IElementType root_, PsiBuilder builder_) { return parse_root_(root_, builder_, 0); } static boolean parse_root_(IElementType root_, PsiBuilder builder_, int level_) { return root(builder_, level_ + 1); } /* ********************************************************** */ // &token_fast token_regular public static boolean fast_predicate_vs_regular(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "fast_predicate_vs_regular")) return false; if (!nextTokenIs(builder_, SAME_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_); result_ = fast_predicate_vs_regular_0(builder_, level_ + 1); result_ = result_ && token_regular(builder_, level_ + 1); exit_section_(builder_, marker_, FAST_PREDICATE_VS_REGULAR, result_); return result_; } // &token_fast private static boolean fast_predicate_vs_regular_0(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "fast_predicate_vs_regular_0")) return false; boolean result_; Marker marker_ = enter_section_(builder_, level_, _AND_); result_ = token_fast(builder_, level_ + 1); exit_section_(builder_, level_, marker_, result_, false, null); return result_; } /* ********************************************************** */ // &token_fast token_smart public static boolean fast_predicate_vs_smart(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "fast_predicate_vs_smart")) return false; if (!nextTokenIsSmart(builder_, SAME_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_); result_ = fast_predicate_vs_smart_0(builder_, level_ + 1); result_ = result_ && token_smart(builder_, level_ + 1); exit_section_(builder_, marker_, FAST_PREDICATE_VS_SMART, result_); return result_; } // &token_fast private static boolean fast_predicate_vs_smart_0(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "fast_predicate_vs_smart_0")) return false; boolean result_; Marker marker_ = enter_section_(builder_, level_, _AND_); result_ = token_fast(builder_, level_ + 1); exit_section_(builder_, level_, marker_, result_, false, null); return result_; } /* ********************************************************** */ // FAST_TOKEN 3 5 public static boolean fast_rule(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "fast_rule")) return false; if (!nextTokenIsFast(builder_, FAST_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_); result_ = consumeTokenFast(builder_, FAST_TOKEN); result_ = result_ && consumeToken(builder_, "3"); result_ = result_ && consumeToken(builder_, "5"); exit_section_(builder_, marker_, FAST_RULE, result_); return result_; } /* ********************************************************** */ // token_fast | token_regular public static boolean fast_vs_regular(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "fast_vs_regular")) return false; if (!nextTokenIs(builder_, SAME_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_); result_ = token_fast(builder_, level_ + 1); if (!result_) result_ = token_regular(builder_, level_ + 1); exit_section_(builder_, marker_, FAST_VS_REGULAR, result_); return result_; } /* ********************************************************** */ // token_fast | token_regular public static boolean fast_vs_regular_in_fast(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "fast_vs_regular_in_fast")) return false; if (!nextTokenIsFast(builder_, SAME_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_); result_ = token_fast(builder_, level_ + 1); if (!result_) result_ = token_regular(builder_, level_ + 1); exit_section_(builder_, marker_, FAST_VS_REGULAR_IN_FAST, result_); return result_; } /* ********************************************************** */ // token_fast | token_regular public static boolean fast_vs_regular_in_smart(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "fast_vs_regular_in_smart")) return false; if (!nextTokenIsSmart(builder_, SAME_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_); result_ = token_fast(builder_, level_ + 1); if (!result_) result_ = token_regular(builder_, level_ + 1); exit_section_(builder_, marker_, FAST_VS_REGULAR_IN_SMART, result_); return result_; } /* ********************************************************** */ // token_fast | token_smart public static boolean fast_vs_smart(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "fast_vs_smart")) return false; if (!nextTokenIsSmart(builder_, SAME_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_); result_ = token_fast(builder_, level_ + 1); if (!result_) result_ = token_smart(builder_, level_ + 1); exit_section_(builder_, marker_, FAST_VS_SMART, result_); return result_; } /* ********************************************************** */ // token_fast | token_smart public static boolean fast_vs_smart_in_fast(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "fast_vs_smart_in_fast")) return false; if (!nextTokenIsFast(builder_, SAME_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_); result_ = token_fast(builder_, level_ + 1); if (!result_) result_ = token_smart(builder_, level_ + 1); exit_section_(builder_, marker_, FAST_VS_SMART_IN_FAST, result_); return result_; } /* ********************************************************** */ // token_fast | token_smart public static boolean fast_vs_smart_in_smart(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "fast_vs_smart_in_smart")) return false; if (!nextTokenIsSmart(builder_, SAME_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_); result_ = token_fast(builder_, level_ + 1); if (!result_) result_ = token_smart(builder_, level_ + 1); exit_section_(builder_, marker_, FAST_VS_SMART_IN_SMART, result_); return result_; } /* ********************************************************** */ // regular_rule | smart_rule | fast_rule public static boolean parent_fast(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "parent_fast")) return false; if (!nextTokenIsFast(builder_, FAST_TOKEN, REGULAR_TOKEN, SMART_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_, level_, _NONE_, PARENT_FAST, "<parent fast>"); result_ = regular_rule(builder_, level_ + 1); if (!result_) result_ = smart_rule(builder_, level_ + 1); if (!result_) result_ = fast_rule(builder_, level_ + 1); exit_section_(builder_, level_, marker_, result_, false, null); return result_; } /* ********************************************************** */ // regular_rule | smart_rule | fast_rule public static boolean parent_regular(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "parent_regular")) return false; if (!nextTokenIsFast(builder_, FAST_TOKEN) && !nextTokenIsSmart(builder_, SMART_TOKEN) && !nextTokenIs(builder_, "<parent regular>", REGULAR_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_, level_, _NONE_, PARENT_REGULAR, "<parent regular>"); result_ = regular_rule(builder_, level_ + 1); if (!result_) result_ = smart_rule(builder_, level_ + 1); if (!result_) result_ = fast_rule(builder_, level_ + 1); exit_section_(builder_, level_, marker_, result_, false, null); return result_; } /* ********************************************************** */ // regular_rule | smart_rule | fast_rule public static boolean parent_smart(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "parent_smart")) return false; if (!nextTokenIsFast(builder_, FAST_TOKEN) && !nextTokenIsSmart(builder_, REGULAR_TOKEN, SMART_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_, level_, _NONE_, PARENT_SMART, "<parent smart>"); result_ = regular_rule(builder_, level_ + 1); if (!result_) result_ = smart_rule(builder_, level_ + 1); if (!result_) result_ = fast_rule(builder_, level_ + 1); exit_section_(builder_, level_, marker_, result_, false, null); return result_; } /* ********************************************************** */ // &token_regular token_fast public static boolean regular_predicate_vs_fast(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "regular_predicate_vs_fast")) return false; if (!nextTokenIs(builder_, SAME_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_); result_ = regular_predicate_vs_fast_0(builder_, level_ + 1); result_ = result_ && token_fast(builder_, level_ + 1); exit_section_(builder_, marker_, REGULAR_PREDICATE_VS_FAST, result_); return result_; } // &token_regular private static boolean regular_predicate_vs_fast_0(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "regular_predicate_vs_fast_0")) return false; boolean result_; Marker marker_ = enter_section_(builder_, level_, _AND_); result_ = token_regular(builder_, level_ + 1); exit_section_(builder_, level_, marker_, result_, false, null); return result_; } /* ********************************************************** */ // &token_regular token_smart public static boolean regular_predicate_vs_smart(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "regular_predicate_vs_smart")) return false; if (!nextTokenIs(builder_, SAME_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_); result_ = regular_predicate_vs_smart_0(builder_, level_ + 1); result_ = result_ && token_smart(builder_, level_ + 1); exit_section_(builder_, marker_, REGULAR_PREDICATE_VS_SMART, result_); return result_; } // &token_regular private static boolean regular_predicate_vs_smart_0(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "regular_predicate_vs_smart_0")) return false; boolean result_; Marker marker_ = enter_section_(builder_, level_, _AND_); result_ = token_regular(builder_, level_ + 1); exit_section_(builder_, level_, marker_, result_, false, null); return result_; } /* ********************************************************** */ // REGULAR_TOKEN 1 2 3 public static boolean regular_rule(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "regular_rule")) return false; if (!nextTokenIs(builder_, REGULAR_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_); result_ = consumeToken(builder_, REGULAR_TOKEN); result_ = result_ && consumeToken(builder_, "1"); result_ = result_ && consumeToken(builder_, "2"); result_ = result_ && consumeToken(builder_, "3"); exit_section_(builder_, marker_, REGULAR_RULE, result_); return result_; } /* ********************************************************** */ // token_regular | token_fast public static boolean regular_vs_fast(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "regular_vs_fast")) return false; if (!nextTokenIs(builder_, SAME_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_); result_ = token_regular(builder_, level_ + 1); if (!result_) result_ = token_fast(builder_, level_ + 1); exit_section_(builder_, marker_, REGULAR_VS_FAST, result_); return result_; } /* ********************************************************** */ // token_regular | token_fast public static boolean regular_vs_fast_in_fast(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "regular_vs_fast_in_fast")) return false; if (!nextTokenIsFast(builder_, SAME_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_); result_ = token_regular(builder_, level_ + 1); if (!result_) result_ = token_fast(builder_, level_ + 1); exit_section_(builder_, marker_, REGULAR_VS_FAST_IN_FAST, result_); return result_; } /* ********************************************************** */ // token_regular | token_fast public static boolean regular_vs_fast_in_smart(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "regular_vs_fast_in_smart")) return false; if (!nextTokenIsSmart(builder_, SAME_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_); result_ = token_regular(builder_, level_ + 1); if (!result_) result_ = token_fast(builder_, level_ + 1); exit_section_(builder_, marker_, REGULAR_VS_FAST_IN_SMART, result_); return result_; } /* ********************************************************** */ // token_regular | token_smart public static boolean regular_vs_smart(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "regular_vs_smart")) return false; if (!nextTokenIs(builder_, SAME_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_); result_ = token_regular(builder_, level_ + 1); if (!result_) result_ = token_smart(builder_, level_ + 1); exit_section_(builder_, marker_, REGULAR_VS_SMART, result_); return result_; } /* ********************************************************** */ // token_regular | token_smart public static boolean regular_vs_smart_in_fast(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "regular_vs_smart_in_fast")) return false; if (!nextTokenIsFast(builder_, SAME_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_); result_ = token_regular(builder_, level_ + 1); if (!result_) result_ = token_smart(builder_, level_ + 1); exit_section_(builder_, marker_, REGULAR_VS_SMART_IN_FAST, result_); return result_; } /* ********************************************************** */ // token_regular | token_smart public static boolean regular_vs_smart_in_smart(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "regular_vs_smart_in_smart")) return false; if (!nextTokenIsSmart(builder_, SAME_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_); result_ = token_regular(builder_, level_ + 1); if (!result_) result_ = token_smart(builder_, level_ + 1); exit_section_(builder_, marker_, REGULAR_VS_SMART_IN_SMART, result_); return result_; } /* ********************************************************** */ static boolean root(PsiBuilder builder_, int level_) { return true; } /* ********************************************************** */ // &token_smart token_fast public static boolean smart_predicate_vs_fast(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "smart_predicate_vs_fast")) return false; if (!nextTokenIsSmart(builder_, SAME_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_); result_ = smart_predicate_vs_fast_0(builder_, level_ + 1); result_ = result_ && token_fast(builder_, level_ + 1); exit_section_(builder_, marker_, SMART_PREDICATE_VS_FAST, result_); return result_; } // &token_smart private static boolean smart_predicate_vs_fast_0(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "smart_predicate_vs_fast_0")) return false; boolean result_; Marker marker_ = enter_section_(builder_, level_, _AND_); result_ = token_smart(builder_, level_ + 1); exit_section_(builder_, level_, marker_, result_, false, null); return result_; } /* ********************************************************** */ // &token_smart token_regular public static boolean smart_predicate_vs_regular(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "smart_predicate_vs_regular")) return false; if (!nextTokenIs(builder_, SAME_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_); result_ = smart_predicate_vs_regular_0(builder_, level_ + 1); result_ = result_ && token_regular(builder_, level_ + 1); exit_section_(builder_, marker_, SMART_PREDICATE_VS_REGULAR, result_); return result_; } // &token_smart private static boolean smart_predicate_vs_regular_0(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "smart_predicate_vs_regular_0")) return false; boolean result_; Marker marker_ = enter_section_(builder_, level_, _AND_); result_ = token_smart(builder_, level_ + 1); exit_section_(builder_, level_, marker_, result_, false, null); return result_; } /* ********************************************************** */ // SMART_TOKEN 2 4 public static boolean smart_rule(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "smart_rule")) return false; if (!nextTokenIsSmart(builder_, SMART_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_); result_ = consumeTokenSmart(builder_, SMART_TOKEN); result_ = result_ && consumeToken(builder_, "2"); result_ = result_ && consumeToken(builder_, "4"); exit_section_(builder_, marker_, SMART_RULE, result_); return result_; } /* ********************************************************** */ // token_smart | token_fast public static boolean smart_vs_fast(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "smart_vs_fast")) return false; if (!nextTokenIsSmart(builder_, SAME_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_); result_ = token_smart(builder_, level_ + 1); if (!result_) result_ = token_fast(builder_, level_ + 1); exit_section_(builder_, marker_, SMART_VS_FAST, result_); return result_; } /* ********************************************************** */ // token_smart | token_fast public static boolean smart_vs_fast_in_fast(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "smart_vs_fast_in_fast")) return false; if (!nextTokenIsFast(builder_, SAME_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_); result_ = token_smart(builder_, level_ + 1); if (!result_) result_ = token_fast(builder_, level_ + 1); exit_section_(builder_, marker_, SMART_VS_FAST_IN_FAST, result_); return result_; } /* ********************************************************** */ // token_smart | token_fast public static boolean smart_vs_fast_in_smart(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "smart_vs_fast_in_smart")) return false; if (!nextTokenIsSmart(builder_, SAME_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_); result_ = token_smart(builder_, level_ + 1); if (!result_) result_ = token_fast(builder_, level_ + 1); exit_section_(builder_, marker_, SMART_VS_FAST_IN_SMART, result_); return result_; } /* ********************************************************** */ // token_smart | token_regular public static boolean smart_vs_regular(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "smart_vs_regular")) return false; if (!nextTokenIs(builder_, SAME_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_); result_ = token_smart(builder_, level_ + 1); if (!result_) result_ = token_regular(builder_, level_ + 1); exit_section_(builder_, marker_, SMART_VS_REGULAR, result_); return result_; } /* ********************************************************** */ // token_smart | token_regular public static boolean smart_vs_regular_in_fast(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "smart_vs_regular_in_fast")) return false; if (!nextTokenIsFast(builder_, SAME_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_); result_ = token_smart(builder_, level_ + 1); if (!result_) result_ = token_regular(builder_, level_ + 1); exit_section_(builder_, marker_, SMART_VS_REGULAR_IN_FAST, result_); return result_; } /* ********************************************************** */ // token_smart | token_regular public static boolean smart_vs_regular_in_smart(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "smart_vs_regular_in_smart")) return false; if (!nextTokenIsSmart(builder_, SAME_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_); result_ = token_smart(builder_, level_ + 1); if (!result_) result_ = token_regular(builder_, level_ + 1); exit_section_(builder_, marker_, SMART_VS_REGULAR_IN_SMART, result_); return result_; } /* ********************************************************** */ // SAME_TOKEN 5 6 public static boolean token_fast(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "token_fast")) return false; if (!nextTokenIsFast(builder_, SAME_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_); result_ = consumeTokenFast(builder_, SAME_TOKEN); result_ = result_ && consumeToken(builder_, "5"); result_ = result_ && consumeToken(builder_, "6"); exit_section_(builder_, marker_, TOKEN_FAST, result_); return result_; } /* ********************************************************** */ // SAME_TOKEN 3 4 public static boolean token_regular(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "token_regular")) return false; if (!nextTokenIs(builder_, SAME_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_); result_ = consumeToken(builder_, SAME_TOKEN); result_ = result_ && consumeToken(builder_, "3"); result_ = result_ && consumeToken(builder_, "4"); exit_section_(builder_, marker_, TOKEN_REGULAR, result_); return result_; } /* ********************************************************** */ // SAME_TOKEN 4 5 public static boolean token_smart(PsiBuilder builder_, int level_) { if (!recursion_guard_(builder_, level_, "token_smart")) return false; if (!nextTokenIsSmart(builder_, SAME_TOKEN)) return false; boolean result_; Marker marker_ = enter_section_(builder_); result_ = consumeTokenSmart(builder_, SAME_TOKEN); result_ = result_ && consumeToken(builder_, "4"); result_ = result_ && consumeToken(builder_, "5"); exit_section_(builder_, marker_, TOKEN_SMART, result_); return result_; } }
8,334
324
<reponame>morsvolia/mongo<filename>src/mongo/db/txn_complete_hooks.cpp<gh_stars>100-1000 /** * Copyright (C) 2013 Tokutek Inc. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License, version 3, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "mongo/pch.h" #include "mongo/db/client.h" #include "mongo/db/clientcursor.h" #include "mongo/db/collection.h" #include "mongo/db/collection_map.h" #include "mongo/db/databaseholder.h" #include "mongo/db/txn_context.h" namespace mongo { TxnCompleteHooksImpl _txnCompleteHooks; void TxnCompleteHooksImpl::noteTxnCompletedInserts(const string &ns, const BSONObj &minPK, long long nDelta, long long sizeDelta, bool committed) { LOCK_REASON(lockReason, "txn: noting completed inserts"); Lock::DBRead lk(ns, lockReason); if (dbHolder().__isLoaded(ns, dbpath)) { scoped_ptr<Client::Context> ctx(cc().getContext() == NULL ? new Client::Context(ns) : NULL); // Because this transaction did inserts, we're guaranteed to be the // only party capable of closing/reopening the ns due to file-ops. // So, if the ns is open, note the commit/abort to fix up in-memory // stats and do nothing otherwise since there are no stats to fix. // // Only matters for capped collections. CollectionMap *cm = collectionMap(ns); Collection *cl = cm->find_ns(ns); if (cl != NULL && cl->isCapped()) { CappedCollection *cappedCl = cl->as<CappedCollection>(); if (committed) { cappedCl->noteCommit(minPK, nDelta, sizeDelta); } else { cappedCl->noteAbort(minPK, nDelta, sizeDelta); } } } } void TxnCompleteHooksImpl::noteTxnAbortedFileOps(const set<string> &namespaces, const set<string> &dbs) { for (set<string>::const_iterator i = namespaces.begin(); i != namespaces.end(); i++) { const char *ns = i->c_str(); // We cannot be holding a read lock at this point, since we're in one of two situations: // - Single-statement txn is aborting. If it did fileops, it had to hold a write lock, // and therefore it still is. // - Multi-statement txn is aborting. The only way to do this is through a command that // takes no lock, therefore we're not read locked. verify(!Lock::isReadLocked()); // If something is already write locked we must be in the single-statement case, so // assert that the write locked namespace is this one. if (Lock::somethingWriteLocked()) { verify(Lock::isWriteLocked(ns)); } // The ydb requires that a txn closes any dictionaries it created beforeaborting. // Hold a write lock while trying to close the namespace in the collection map. LOCK_REASON(lockReason, "txn: closing created dictionaries during txn abort"); Lock::DBWrite lk(ns, lockReason); if (dbHolder().__isLoaded(ns, dbpath)) { scoped_ptr<Client::Context> ctx(cc().getContext() == NULL ? new Client::Context(ns) : NULL); // Pass aborting = true to close_ns(), which hints to the implementation // that the calling transaction is about to abort. (void) collectionMap(ns)->close_ns(ns, true); } } for (set<string>::const_iterator it = dbs.begin(); it != dbs.end(); ++it) { const string &db = *it; // The same locking rules above apply here. verify(!Lock::isReadLocked()); if (Lock::somethingWriteLocked()) { verify(Lock::isWriteLocked(db)); } LOCK_REASON(lockReason, "txn: rolling back db creates"); Lock::DBWrite lk(db, lockReason); if (dbHolder().__isLoaded(db, dbpath)) { scoped_ptr<Client::Context> ctx(cc().getContext() == NULL ? new Client::Context(db) : NULL); collectionMap(db)->rollbackCreate(); } } } // If a txn is completing, the cursors it created // must be killed before it can commit or abort. void TxnCompleteHooksImpl::noteTxnCompletedCursors(const set<long long> &cursorIds) { for (set<long long>::const_iterator i = cursorIds.begin(); i != cursorIds.end(); ++i) { ClientCursor::erase(*i); } } } // namespace mongo
2,334
445
import re def getnametime(x): matchobj = re.search("(.+)(\d?\d:\d{2}\s*[AP]M)", x) return(matchobj.group(1), matchobj.group(2)) class Message: def __init__(self, strings): self.strings = [] self.strings.extend(strings) self.name, self.time = getnametime(strings[0]) self.strings.pop(0) self.strings = tuple(self.strings) def show(self): print("Name: " + self.name) print("Time: " + self.time) print("Messages: ") for i in self.strings: print(i) print('') def getname(self): return(self.name) def gettime(self): return(self.time) def getmessages(self): return(self.strings)
347
13,885
/* Copyright (c) 2013 <NAME> - Advanced Micro Devices, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #pragma once #ifndef O3DGC_TRIANGLE_LIST_DECODER_H #define O3DGC_TRIANGLE_LIST_DECODER_H #include "o3dgcCommon.h" #include "o3dgcTriangleFans.h" #include "o3dgcBinaryStream.h" #include "o3dgcAdjacencyInfo.h" namespace o3dgc { //! template <class T> class TriangleListDecoder { public: //! Constructor. TriangleListDecoder(void) { m_vertexCount = 0; m_triangleCount = 0; m_numTriangles = 0; m_numVertices = 0; m_triangles = 0; m_numConqueredTriangles = 0; m_numVisitedVertices = 0; m_visitedVertices = 0; m_visitedVerticesValence = 0; m_maxNumVertices = 0; m_maxNumTriangles = 0; m_itNumTFans = 0; m_itDegree = 0; m_itConfig = 0; m_itOperation = 0; m_itIndex = 0; m_tempTriangles = 0; m_tempTrianglesSize = 0; m_decodeTrianglesOrder = false; m_decodeVerticesOrder = false; }; //! Destructor. ~TriangleListDecoder(void) { delete [] m_tempTriangles; }; O3DGCStreamType GetStreamType() const { return m_streamType; } bool GetReorderTriangles() const { return m_decodeTrianglesOrder; } bool GetReorderVertices() const { return m_decodeVerticesOrder; } void SetStreamType(O3DGCStreamType streamType) { m_streamType = streamType; } const AdjacencyInfo & GetVertexToTriangle() const { return m_vertexToTriangle;} O3DGCErrorCode Decode(T * const triangles, const long numTriangles, const long numVertices, const BinaryStream & bstream, unsigned long & iterator) { unsigned char compressionMask = bstream.ReadUChar(iterator, m_streamType); m_decodeTrianglesOrder = ( (compressionMask&2) != 0); m_decodeVerticesOrder = ( (compressionMask&1) != 0); if (m_decodeVerticesOrder) // vertices reordering not supported { return O3DGC_ERROR_NON_SUPPORTED_FEATURE; } unsigned long maxSizeV2T = bstream.ReadUInt32(iterator, m_streamType); Init(triangles, numTriangles, numVertices, maxSizeV2T); m_ctfans.Load(bstream, iterator, m_decodeTrianglesOrder, m_streamType); Decompress(); return O3DGC_OK; } O3DGCErrorCode Reorder(); private: O3DGCErrorCode Init(T * const triangles, const long numTriangles, const long numVertices, const long maxSizeV2T); O3DGCErrorCode Decompress(); O3DGCErrorCode CompueLocalConnectivityInfo(const long focusVertex); O3DGCErrorCode DecompressTFAN(const long focusVertex); unsigned long m_itNumTFans; unsigned long m_itDegree; unsigned long m_itConfig; unsigned long m_itOperation; unsigned long m_itIndex; long m_maxNumVertices; long m_maxNumTriangles; long m_numTriangles; long m_numVertices; long m_tempTrianglesSize; T * m_triangles; T * m_tempTriangles; long m_vertexCount; long m_triangleCount; long m_numConqueredTriangles; long m_numVisitedVertices; long * m_visitedVertices; long * m_visitedVerticesValence; AdjacencyInfo m_vertexToTriangle; CompressedTriangleFans m_ctfans; TriangleFans m_tfans; O3DGCStreamType m_streamType; bool m_decodeTrianglesOrder; bool m_decodeVerticesOrder; }; } #include "o3dgcTriangleListDecoder.inl" // template implementation #endif // O3DGC_TRIANGLE_LIST_DECODER_H
4,345
534
{ "values": [ "mekanismadditions:black_plastic_glow_stairs", "mekanismadditions:blue_plastic_glow_stairs", "mekanismadditions:green_plastic_glow_stairs", "mekanismadditions:cyan_plastic_glow_stairs", "mekanismadditions:dark_red_plastic_glow_stairs", "mekanismadditions:purple_plastic_glow_stairs", "mekanismadditions:orange_plastic_glow_stairs", "mekanismadditions:light_gray_plastic_glow_stairs", "mekanismadditions:gray_plastic_glow_stairs", "mekanismadditions:light_blue_plastic_glow_stairs", "mekanismadditions:lime_plastic_glow_stairs", "mekanismadditions:aqua_plastic_glow_stairs", "mekanismadditions:red_plastic_glow_stairs", "mekanismadditions:magenta_plastic_glow_stairs", "mekanismadditions:yellow_plastic_glow_stairs", "mekanismadditions:white_plastic_glow_stairs", "mekanismadditions:brown_plastic_glow_stairs", "mekanismadditions:pink_plastic_glow_stairs" ] }
401
2,502
package com.shuzijun.leetcode.plugin.actions.toolbar; import com.intellij.openapi.actionSystem.AnActionEvent; import com.intellij.openapi.actionSystem.ToggleAction; import com.shuzijun.leetcode.plugin.utils.DataKeys; import com.shuzijun.leetcode.plugin.window.WindowFactory; import javax.swing.*; /** * @author shuzijun */ public class FindAction extends ToggleAction { @Override public boolean isSelected(AnActionEvent anActionEvent) { if (anActionEvent.getProject() == null) { //Why is it null? return false; } JPanel panel = WindowFactory.getDataContext(anActionEvent.getProject()).getData(DataKeys.LEETCODE_PROJECTS_TERRFIND); if (panel == null) { return false; } return panel.isVisible(); } @Override public void setSelected(AnActionEvent anActionEvent, boolean b) { JPanel panel = WindowFactory.getDataContext(anActionEvent.getProject()).getData(DataKeys.LEETCODE_PROJECTS_TERRFIND); if (panel == null) { return; } panel.setVisible(b); } }
459
1,645
<filename>python/seldon/text/tests/test_docsim.py import unittest from seldon.text import DocumentSimilarity, DefaultJsonCorpus import logging from gensim import interfaces, utils from gensim.corpora.dictionary import Dictionary import copy class Test_DocumentSimilarity(unittest.TestCase): def get_docs(self): return [{"id":1,"text":"an article about sports and football, Arsenel, Liverpool","tags":"football"}, {"id":2,"text":"an article about football and finance, Liverpool, Arsenel","tags":"football"}, {"id":3,"text":"an article about money and lending","tags":"money"}, {"id":4,"text":"an article about money and banking and lending","tags":"money"}] def test_sklearn_nmf(self): corpus = DefaultJsonCorpus(self.get_docs()) ds = DocumentSimilarity(model_type="sklearn_nmf") ds.fit(corpus) res = ds.nn(0,k=1) self.assertEqual(res[0][0],1) def test_gensim_lsi(self): corpus = DefaultJsonCorpus(self.get_docs()) ds = DocumentSimilarity(model_type="gensim_lsi") ds.fit(corpus) res = ds.nn(0,k=1) self.assertEqual(res[0][0],1) def test_gensim_rp(self): corpus = DefaultJsonCorpus(self.get_docs()) ds = DocumentSimilarity(model_type="gensim_rp") ds.fit(corpus) res = ds.nn(0,k=1) self.assertEqual(res[0][0],1) def test_gensim_lsi(self): corpus = DefaultJsonCorpus(self.get_docs()) ds = DocumentSimilarity(model_type="gensim_lsi") ds.fit(corpus) score = ds.score(k=1) self.assertEqual(score,1.0) if __name__ == '__main__': logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) unittest.main()
804
6,811
#ifndef SCREEN_H #define SCREEN_H #include <SDL.h> #include "ScreenSettings.h" class Screen { public: void init(const struct ScreenSettings* settings); void destroy(void); void GetSettings(struct ScreenSettings* settings); void LoadIcon(void); void ResizeScreen(int x, int y); void ResizeToNearestMultiple(void); void GetWindowSize(int* x, int* y); void UpdateScreen(SDL_Surface* buffer, SDL_Rect* rect); void FlipScreen(bool flipmode); const SDL_PixelFormat* GetFormat(void); void toggleFullScreen(void); void toggleScalingMode(void); void toggleLinearFilter(void); void toggleVSync(void); bool isForcedFullscreen(void); bool isWindowed; bool isFiltered; bool badSignalEffect; int scalingMode; bool vsync; SDL_Window *m_window; SDL_Renderer *m_renderer; SDL_Texture *m_screenTexture; SDL_Surface* m_screen; }; #ifndef GAMESCREEN_DEFINITION extern Screen gameScreen; #endif #endif /* SCREEN_H */
382
799
EXPECTED_RANSOMWARE_CVES = [ { "Cve": "CVE-0000-0000", "CVSS": 7.5, "VRR": 5.67, "ThreatCount": 1, "Trending": "false", "VulnLastTrendingOn": "2018-05-01", "Description": "remote code execution", "Threats": [{ "Title": "Hunter Exploit Kit", "Category": "Ransomware", "Severity": "null", "Description": "", "Cve": [ "CVE-0000-0000" ], "Source": "MCAFEE", "Published": "2017-08-03T00:00:00", "Updated": "2019-08-16T15:50:04", "ThreatLastTrendingOn": "2018-02-23", "Trending": "false" }] } ] HOST_FINDING_DATA = [{ "HostID": 3569982, "Vulnerability": [ { "Cve": "CVE-0000-0000", "BaseScore": 7.5, "ThreatCount": 0, "AttackVector": "Network", "AccessComplexity": "Low", "Authentication": "None", "ConfidentialityImpact": "Partial", "Integrity": "Partial", "AvailabilityImpact": "Partial", "Trending": "false", "VulnLastTrendingOn": "2018-05-01", "Description": "remote code execution" } ], "ThreatCount": 0, "Threat": [ { "Title": "Hunter Exploit Kit", "Category": "Ransomware", "Severity": "null", "Description": "", "Details": "", "Cve": [ "CVE-0000-0000" ], "Source": "MCAFEE", "Published": "2017-08-03T00:00:00", "Updated": "2019-08-16T15:50:04", "ThreatLastTrendingOn": "2018-02-23", "Trending": "false", "Link": "" } ], "RiskRating": 5.67 }] def test_header_transform(): from RiskSenseGetRansomewareCVEScript import header_transform assert header_transform('CVSS') == 'CVSS Score' assert header_transform('VRR') == 'VRR Score' assert header_transform('ThreatCount') == 'Threat Count' assert header_transform('VulnLastTrendingOn') == 'Last Trending On Date' assert header_transform('Trending') == 'Trending' def test_get_ransomware_cves(): from RiskSenseGetRansomewareCVEScript import get_ransomware_cves ransomware_cves = get_ransomware_cves(HOST_FINDING_DATA) assert ransomware_cves == EXPECTED_RANSOMWARE_CVES def test_display_ransomware_trending_cve_results(): from RiskSenseGetRansomewareCVEScript import display_ransomware_trending_cve_results result = display_ransomware_trending_cve_results(EXPECTED_RANSOMWARE_CVES) assert result.outputs_prefix == 'RiskSense.RansomwareTrendingCves' assert result.outputs_key_field == 'Cve' def test_display_ransomware_cve_results(): from RiskSenseGetRansomewareCVEScript import display_ransomware_cve_results result = display_ransomware_cve_results(EXPECTED_RANSOMWARE_CVES) assert result.outputs_prefix == 'RiskSense.RansomwareCves' assert result.outputs_key_field == 'Cve' assert result.outputs == EXPECTED_RANSOMWARE_CVES
1,546
2,150
<gh_stars>1000+ package org.javaee7.cdi.instance; import org.jboss.arquillian.container.test.api.Deployment; import org.jboss.arquillian.junit.Arquillian; import org.jboss.shrinkwrap.api.Archive; import org.jboss.shrinkwrap.api.ShrinkWrap; import org.jboss.shrinkwrap.api.spec.JavaArchive; import org.junit.Test; import org.junit.runner.RunWith; import javax.enterprise.inject.Default; import javax.enterprise.inject.Instance; import javax.enterprise.util.AnnotationLiteral; import javax.inject.Inject; import static org.hamcrest.CoreMatchers.instanceOf; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertThat; /** * @author <NAME> */ @RunWith(Arquillian.class) public class GreetingTest { @Deployment public static Archive<?> deploy() { return ShrinkWrap.create(JavaArchive.class) .addClasses(Greeting.class, SimpleGreeting.class, FormalGreeting.class, Business.class, Personal.class) .addAsManifestResource("beans.xml"); } /** * Container will assume built-in @Default qualifier here as well as for beans that don't declare a qualifier. */ @Inject private Instance<Greeting> instance; /** * Only instance of SimpleGreeting class should be available.<br/> * * When dependent scoped bean is retrieved via an instance then explicit destroy action should be taken. * This is a known memory leak in CDI 1.0 fixed in CDI 1.1 see the link bellow for details. * * @see <a href="https://issues.jboss.org/browse/CDI-139">CDI-139</a> */ @Test public void test() throws Exception { assertFalse(instance.isUnsatisfied()); assertFalse(instance.isAmbiguous()); // use Instance<T>#get() Greeting bean = instance.get(); assertThat(bean, instanceOf(SimpleGreeting.class)); instance.destroy(bean); // use Instance<T>#select() Instance<Greeting> anotherInstance = instance.select(new AnnotationLiteral<Default>() { }); Greeting anotherBean = anotherInstance.get(); assertThat(anotherBean, instanceOf(SimpleGreeting.class)); anotherInstance.destroy(anotherBean); } }
709
6,989
#include "hash.h"
8
5,169
<filename>Specs/JMProgressStepper/0.0.1/JMProgressStepper.podspec.json { "name": "JMProgressStepper", "version": "0.0.1", "summary": "A simple customizable stepper.", "description": "JMProgressStepper is a costumizable view that displays\nthe progress of something defined by single steps.", "homepage": "https://github.com/JARMourato/JMProgressStepper", "license": "MIT", "authors": { "<NAME>": "<EMAIL>" }, "source": { "git": "https://github.com/JARMourato/JMProgressStepper.git", "tag": "0.0.1" }, "platforms": { "ios": "8.0" }, "requires_arc": true, "source_files": "**/*", "exclude_files": [ "JMProgressStepper.podspec", "LICENSE", "README.md" ] }
282
400
<reponame>tradeshift-zihe/ofdrw package org.ofdrw.core.basicStructure.pageTree; import org.dom4j.Element; import org.ofdrw.core.OFDElement; import org.ofdrw.core.basicType.ST_ID; import org.ofdrw.core.basicType.ST_Loc; /** * 页节点 * <p> * 7.6 页树 表 11 页树属性 * * * @author 权观宇 * @since 2019-10-05 10:49:09 */ public class Page extends OFDElement { public Page(Element proxy) { super(proxy); } public Page() { super("Page"); } /** * @param id 对象ID * @param baseLoc 页面内容位置 */ public Page(long id, String baseLoc) { this(); this.setID(new ST_ID(id)) .setBaseLoc(ST_Loc.getInstance(baseLoc)); } public Page(ST_ID id, ST_Loc baseLoc) { this(); this.setID(id) .setBaseLoc(baseLoc); } /** * 【必选 属性】 * 设置 页的标识符,不能与已有标识重复 * * @param id 页的标识符 * @return this */ public Page setID(ST_ID id) { this.addAttribute("ID", id.toString()); return this; } /** * 【必选 属性】 * 获取 页的标识符,不能与已有标识重复 * * @return 页的标识符 */ public ST_ID getID() { return ST_ID.getInstance(this.attributeValue("ID")); } /** * 【必选 属性】 * 设置 页对象描述文件 * * @param baseLoc 页对象描述文件路径 * @return this */ public Page setBaseLoc(ST_Loc baseLoc) { this.addAttribute("BaseLoc", baseLoc.toString()); return this; } /** * 【必选 属性】 * 获取 页对象描述文件 * * @return 页对象描述文件路径 */ public ST_Loc getBaseLoc() { return ST_Loc.getInstance(this.attributeValue("BaseLoc")); } }
1,028
1,942
/* ****************************************************************** * Huffman encoder, part of New Generation Entropy library * Copyright (c) <NAME>, Facebook, Inc. * * You can contact the author at : * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy * - Public forum : https://groups.google.com/forum/#!forum/lz4c * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. ****************************************************************** */ /* ************************************************************** * Compiler specifics ****************************************************************/ #ifdef _MSC_VER /* Visual Studio */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ #endif /* ************************************************************** * Includes ****************************************************************/ #include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset */ #include "../common/compiler.h" #include "../common/bitstream.h" #include "hist.h" #define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */ #include "../common/fse.h" /* header compression */ #define HUF_STATIC_LINKING_ONLY #include "../common/huf.h" #include "../common/error_private.h" /* ************************************************************** * Error Management ****************************************************************/ #define HUF_isError ERR_isError #define HUF_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */ /* ************************************************************** * Utils ****************************************************************/ unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue) { return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1); } /* ******************************************************* * HUF : Huffman block compression *********************************************************/ #define HUF_WORKSPACE_MAX_ALIGNMENT 8 static void* HUF_alignUpWorkspace(void* workspace, size_t* workspaceSizePtr, size_t align) { size_t const mask = align - 1; size_t const rem = (size_t)workspace & mask; size_t const add = (align - rem) & mask; BYTE* const aligned = (BYTE*)workspace + add; assert((align & (align - 1)) == 0); /* pow 2 */ assert(align <= HUF_WORKSPACE_MAX_ALIGNMENT); if (*workspaceSizePtr >= add) { assert(add < align); assert(((size_t)aligned & mask) == 0); *workspaceSizePtr -= add; return aligned; } else { *workspaceSizePtr = 0; return NULL; } } /* HUF_compressWeights() : * Same as FSE_compress(), but dedicated to huff0's weights compression. * The use case needs much less stack memory. * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX. */ #define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6 typedef struct { FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)]; U32 scratchBuffer[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(HUF_TABLELOG_MAX, MAX_FSE_TABLELOG_FOR_HUFF_HEADER)]; unsigned count[HUF_TABLELOG_MAX+1]; S16 norm[HUF_TABLELOG_MAX+1]; } HUF_CompressWeightsWksp; static size_t HUF_compressWeights(void* dst, size_t dstSize, const void* weightTable, size_t wtSize, void* workspace, size_t workspaceSize) { BYTE* const ostart = (BYTE*) dst; BYTE* op = ostart; BYTE* const oend = ostart + dstSize; unsigned maxSymbolValue = HUF_TABLELOG_MAX; U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER; HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, sizeof(U32)); if (workspaceSize < sizeof(HUF_CompressWeightsWksp)) return ERROR(GENERIC); /* init conditions */ if (wtSize <= 1) return 0; /* Not compressible */ /* Scan input and build symbol stats */ { unsigned const maxCount = HIST_count_simple(wksp->count, &maxSymbolValue, weightTable, wtSize); /* never fails */ if (maxCount == wtSize) return 1; /* only a single symbol in src : rle */ if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */ } tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue); CHECK_F( FSE_normalizeCount(wksp->norm, tableLog, wksp->count, wtSize, maxSymbolValue, /* useLowProbCount */ 0) ); /* Write table description header */ { CHECK_V_F(hSize, FSE_writeNCount(op, (size_t)(oend-op), wksp->norm, maxSymbolValue, tableLog) ); op += hSize; } /* Compress */ CHECK_F( FSE_buildCTable_wksp(wksp->CTable, wksp->norm, maxSymbolValue, tableLog, wksp->scratchBuffer, sizeof(wksp->scratchBuffer)) ); { CHECK_V_F(cSize, FSE_compress_usingCTable(op, (size_t)(oend - op), weightTable, wtSize, wksp->CTable) ); if (cSize == 0) return 0; /* not enough space for compressed data */ op += cSize; } return (size_t)(op-ostart); } static size_t HUF_getNbBits(HUF_CElt elt) { return elt & 0xFF; } static size_t HUF_getNbBitsFast(HUF_CElt elt) { return elt; } static size_t HUF_getValue(HUF_CElt elt) { return elt & ~0xFF; } static size_t HUF_getValueFast(HUF_CElt elt) { return elt; } static void HUF_setNbBits(HUF_CElt* elt, size_t nbBits) { assert(nbBits <= HUF_TABLELOG_ABSOLUTEMAX); *elt = nbBits; } static void HUF_setValue(HUF_CElt* elt, size_t value) { size_t const nbBits = HUF_getNbBits(*elt); if (nbBits > 0) { assert((value >> nbBits) == 0); *elt |= value << (sizeof(HUF_CElt) * 8 - nbBits); } } typedef struct { HUF_CompressWeightsWksp wksp; BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */ BYTE huffWeight[HUF_SYMBOLVALUE_MAX]; } HUF_WriteCTableWksp; size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, void* workspace, size_t workspaceSize) { HUF_CElt const* const ct = CTable + 1; BYTE* op = (BYTE*)dst; U32 n; HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, sizeof(U32)); /* check conditions */ if (workspaceSize < sizeof(HUF_WriteCTableWksp)) return ERROR(GENERIC); if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge); /* convert to weight */ wksp->bitsToWeight[0] = 0; for (n=1; n<huffLog+1; n++) wksp->bitsToWeight[n] = (BYTE)(huffLog + 1 - n); for (n=0; n<maxSymbolValue; n++) wksp->huffWeight[n] = wksp->bitsToWeight[HUF_getNbBits(ct[n])]; /* attempt weights compression by FSE */ if (maxDstSize < 1) return ERROR(dstSize_tooSmall); { CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, wksp->huffWeight, maxSymbolValue, &wksp->wksp, sizeof(wksp->wksp)) ); if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */ op[0] = (BYTE)hSize; return hSize+1; } } /* write raw values as 4-bits (max : 15) */ if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */ if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */ op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1)); wksp->huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */ for (n=0; n<maxSymbolValue; n+=2) op[(n/2)+1] = (BYTE)((wksp->huffWeight[n] << 4) + wksp->huffWeight[n+1]); return ((maxSymbolValue+1)/2) + 1; } /*! HUF_writeCTable() : `CTable` : Huffman tree to save, using huf representation. @return : size of saved CTable */ size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog) { HUF_WriteCTableWksp wksp; return HUF_writeCTable_wksp(dst, maxDstSize, CTable, maxSymbolValue, huffLog, &wksp, sizeof(wksp)); } size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* hasZeroWeights) { BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */ U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */ U32 tableLog = 0; U32 nbSymbols = 0; HUF_CElt* const ct = CTable + 1; /* get symbol weights */ CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize)); *hasZeroWeights = (rankVal[0] > 0); /* check result */ if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall); CTable[0] = tableLog; /* Prepare base value per rank */ { U32 n, nextRankStart = 0; for (n=1; n<=tableLog; n++) { U32 curr = nextRankStart; nextRankStart += (rankVal[n] << (n-1)); rankVal[n] = curr; } } /* fill nbBits */ { U32 n; for (n=0; n<nbSymbols; n++) { const U32 w = huffWeight[n]; HUF_setNbBits(ct + n, (BYTE)(tableLog + 1 - w) & -(w != 0)); } } /* fill val */ { U16 nbPerRank[HUF_TABLELOG_MAX+2] = {0}; /* support w=0=>n=tableLog+1 */ U16 valPerRank[HUF_TABLELOG_MAX+2] = {0}; { U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[HUF_getNbBits(ct[n])]++; } /* determine stating value per rank */ valPerRank[tableLog+1] = 0; /* for w==0 */ { U16 min = 0; U32 n; for (n=tableLog; n>0; n--) { /* start at n=tablelog <-> w=1 */ valPerRank[n] = min; /* get starting value within each rank */ min += nbPerRank[n]; min >>= 1; } } /* assign value within rank, symbol order */ { U32 n; for (n=0; n<nbSymbols; n++) HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); } } *maxSymbolValuePtr = nbSymbols - 1; return readSize; } U32 HUF_getNbBitsFromCTable(HUF_CElt const* CTable, U32 symbolValue) { const HUF_CElt* ct = CTable + 1; assert(symbolValue <= HUF_SYMBOLVALUE_MAX); return (U32)HUF_getNbBits(ct[symbolValue]); } typedef struct nodeElt_s { U32 count; U16 parent; BYTE byte; BYTE nbBits; } nodeElt; /** * HUF_setMaxHeight(): * Enforces maxNbBits on the Huffman tree described in huffNode. * * It sets all nodes with nbBits > maxNbBits to be maxNbBits. Then it adjusts * the tree to so that it is a valid canonical Huffman tree. * * @pre The sum of the ranks of each symbol == 2^largestBits, * where largestBits == huffNode[lastNonNull].nbBits. * @post The sum of the ranks of each symbol == 2^largestBits, * where largestBits is the return value <= maxNbBits. * * @param huffNode The Huffman tree modified in place to enforce maxNbBits. * @param lastNonNull The symbol with the lowest count in the Huffman tree. * @param maxNbBits The maximum allowed number of bits, which the Huffman tree * may not respect. After this function the Huffman tree will * respect maxNbBits. * @return The maximum number of bits of the Huffman tree after adjustment, * necessarily no more than maxNbBits. */ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits) { const U32 largestBits = huffNode[lastNonNull].nbBits; /* early exit : no elt > maxNbBits, so the tree is already valid. */ if (largestBits <= maxNbBits) return largestBits; /* there are several too large elements (at least >= 2) */ { int totalCost = 0; const U32 baseCost = 1 << (largestBits - maxNbBits); int n = (int)lastNonNull; /* Adjust any ranks > maxNbBits to maxNbBits. * Compute totalCost, which is how far the sum of the ranks is * we are over 2^largestBits after adjust the offending ranks. */ while (huffNode[n].nbBits > maxNbBits) { totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)); huffNode[n].nbBits = (BYTE)maxNbBits; n--; } /* n stops at huffNode[n].nbBits <= maxNbBits */ assert(huffNode[n].nbBits <= maxNbBits); /* n end at index of smallest symbol using < maxNbBits */ while (huffNode[n].nbBits == maxNbBits) --n; /* renorm totalCost from 2^largestBits to 2^maxNbBits * note : totalCost is necessarily a multiple of baseCost */ assert((totalCost & (baseCost - 1)) == 0); totalCost >>= (largestBits - maxNbBits); assert(totalCost > 0); /* repay normalized cost */ { U32 const noSymbol = 0xF0F0F0F0; U32 rankLast[HUF_TABLELOG_MAX+2]; /* Get pos of last (smallest = lowest cum. count) symbol per rank */ ZSTD_memset(rankLast, 0xF0, sizeof(rankLast)); { U32 currentNbBits = maxNbBits; int pos; for (pos=n ; pos >= 0; pos--) { if (huffNode[pos].nbBits >= currentNbBits) continue; currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */ rankLast[maxNbBits-currentNbBits] = (U32)pos; } } while (totalCost > 0) { /* Try to reduce the next power of 2 above totalCost because we * gain back half the rank. */ U32 nBitsToDecrease = BIT_highbit32((U32)totalCost) + 1; for ( ; nBitsToDecrease > 1; nBitsToDecrease--) { U32 const highPos = rankLast[nBitsToDecrease]; U32 const lowPos = rankLast[nBitsToDecrease-1]; if (highPos == noSymbol) continue; /* Decrease highPos if no symbols of lowPos or if it is * not cheaper to remove 2 lowPos than highPos. */ if (lowPos == noSymbol) break; { U32 const highTotal = huffNode[highPos].count; U32 const lowTotal = 2 * huffNode[lowPos].count; if (highTotal <= lowTotal) break; } } /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */ assert(rankLast[nBitsToDecrease] != noSymbol || nBitsToDecrease == 1); /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */ while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol)) nBitsToDecrease++; assert(rankLast[nBitsToDecrease] != noSymbol); /* Increase the number of bits to gain back half the rank cost. */ totalCost -= 1 << (nBitsToDecrease-1); huffNode[rankLast[nBitsToDecrease]].nbBits++; /* Fix up the new rank. * If the new rank was empty, this symbol is now its smallest. * Otherwise, this symbol will be the largest in the new rank so no adjustment. */ if (rankLast[nBitsToDecrease-1] == noSymbol) rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]; /* Fix up the old rank. * If the symbol was at position 0, meaning it was the highest weight symbol in the tree, * it must be the only symbol in its rank, so the old rank now has no symbols. * Otherwise, since the Huffman nodes are sorted by count, the previous position is now * the smallest node in the rank. If the previous position belongs to a different rank, * then the rank is now empty. */ if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */ rankLast[nBitsToDecrease] = noSymbol; else { rankLast[nBitsToDecrease]--; if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease) rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */ } } /* while (totalCost > 0) */ /* If we've removed too much weight, then we have to add it back. * To avoid overshooting again, we only adjust the smallest rank. * We take the largest nodes from the lowest rank 0 and move them * to rank 1. There's guaranteed to be enough rank 0 symbols because * TODO. */ while (totalCost < 0) { /* Sometimes, cost correction overshoot */ /* special case : no rank 1 symbol (using maxNbBits-1); * let's create one from largest rank 0 (using maxNbBits). */ if (rankLast[1] == noSymbol) { while (huffNode[n].nbBits == maxNbBits) n--; huffNode[n+1].nbBits--; assert(n >= 0); rankLast[1] = (U32)(n+1); totalCost++; continue; } huffNode[ rankLast[1] + 1 ].nbBits--; rankLast[1]++; totalCost ++; } } /* repay normalized cost */ } /* there are several too large elements (at least >= 2) */ return maxNbBits; } typedef struct { U16 base; U16 curr; } rankPos; typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32]; /* Number of buckets available for HUF_sort() */ #define RANK_POSITION_TABLE_SIZE 128 typedef struct { huffNodeTable huffNodeTbl; rankPos rankPosition[RANK_POSITION_TABLE_SIZE]; } HUF_buildCTable_wksp_tables; /* RANK_POSITION_DISTINCT_COUNT_CUTOFF == Cutoff point in HUF_sort() buckets for which we use log2 bucketing. * Strategy is to use as many buckets as possible for representing distinct * counts while using the remainder to represent all counts up to HUF_BLOCKSIZE_MAX * using log2 bucketing. * * To satisfy this requirement for 128 buckets, we can do the following: * Let buckets 0-114 represent distinct counts of [0, 114] * Let buckets 115 to 126 represent counts of [115, HUF_BLOCKSIZE_MAX]. (the final bucket 127 must remain empty) * * Note that we don't actually need 17 buckets (assuming 2^17 maxcount) for log2 bucketing since * the first few buckets in the log2 bucketing representation are already covered by the distinct count bucketing. */ #define RANK_POSITION_LOG_BUCKETS_BEGIN (RANK_POSITION_TABLE_SIZE - 1) - BIT_highbit32(HUF_BLOCKSIZE_MAX) - 1 #define RANK_POSITION_DISTINCT_COUNT_CUTOFF RANK_POSITION_LOG_BUCKETS_BEGIN + BIT_highbit32(RANK_POSITION_LOG_BUCKETS_BEGIN) /* Return the appropriate bucket index for a given count. See definition of * RANK_POSITION_DISTINCT_COUNT_CUTOFF for explanation of bucketing strategy. */ static U32 HUF_getIndex(U32 const count) { return (count < RANK_POSITION_DISTINCT_COUNT_CUTOFF) ? count : BIT_highbit32(count) + RANK_POSITION_LOG_BUCKETS_BEGIN; } /* Helper swap function for HUF_quickSortPartition() */ static void HUF_swapNodes(nodeElt* a, nodeElt* b) { nodeElt tmp = *a; *a = *b; *b = tmp; } /* Returns 0 if the huffNode array is not sorted by descending count */ UNUSED_ATTR static int HUF_isSorted(nodeElt huffNode[], U32 const maxSymbolValue1) { U32 i; for (i = 1; i < maxSymbolValue1; ++i) { if (huffNode[i].count > huffNode[i-1].count) { return 0; } } return 1; } /* Insertion sort by descending order */ HINT_INLINE void HUF_insertionSort(nodeElt huffNode[], int const low, int const high) { int i; int const size = high-low+1; huffNode += low; for (i = 1; i < size; ++i) { nodeElt const key = huffNode[i]; int j = i - 1; while (j >= 0 && huffNode[j].count < key.count) { huffNode[j + 1] = huffNode[j]; j--; } huffNode[j + 1] = key; } } /* Pivot helper function for quicksort. */ static int HUF_quickSortPartition(nodeElt arr[], int const low, int const high) { /* Simply select rightmost element as pivot. "Better" selectors like * median-of-three don't experimentally appear to have any benefit. */ U32 const pivot = arr[high].count; int i = low - 1; int j = low; for ( ; j < high; j++) { if (arr[j].count > pivot) { i++; HUF_swapNodes(&arr[i], &arr[j]); } } HUF_swapNodes(&arr[i + 1], &arr[high]); return i + 1; } /* Classic quicksort by descending with partially iterative calls * to reduce worst case callstack size. */ static void HUF_simpleQuickSort(nodeElt arr[], int low, int high) { int const kInsertionSortThreshold = 8; if (high - low < kInsertionSortThreshold) { HUF_insertionSort(arr, low, high); return; } while (low < high) { int const idx = HUF_quickSortPartition(arr, low, high); if (idx - low < high - idx) { HUF_simpleQuickSort(arr, low, idx - 1); low = idx + 1; } else { HUF_simpleQuickSort(arr, idx + 1, high); high = idx - 1; } } } /** * HUF_sort(): * Sorts the symbols [0, maxSymbolValue] by count[symbol] in decreasing order. * This is a typical bucket sorting strategy that uses either quicksort or insertion sort to sort each bucket. * * @param[out] huffNode Sorted symbols by decreasing count. Only members `.count` and `.byte` are filled. * Must have (maxSymbolValue + 1) entries. * @param[in] count Histogram of the symbols. * @param[in] maxSymbolValue Maximum symbol value. * @param rankPosition This is a scratch workspace. Must have RANK_POSITION_TABLE_SIZE entries. */ static void HUF_sort(nodeElt huffNode[], const unsigned count[], U32 const maxSymbolValue, rankPos rankPosition[]) { U32 n; U32 const maxSymbolValue1 = maxSymbolValue+1; /* Compute base and set curr to base. * For symbol s let lowerRank = HUF_getIndex(count[n]) and rank = lowerRank + 1. * See HUF_getIndex to see bucketing strategy. * We attribute each symbol to lowerRank's base value, because we want to know where * each rank begins in the output, so for rank R we want to count ranks R+1 and above. */ ZSTD_memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE); for (n = 0; n < maxSymbolValue1; ++n) { U32 lowerRank = HUF_getIndex(count[n]); assert(lowerRank < RANK_POSITION_TABLE_SIZE - 1); rankPosition[lowerRank].base++; } assert(rankPosition[RANK_POSITION_TABLE_SIZE - 1].base == 0); /* Set up the rankPosition table */ for (n = RANK_POSITION_TABLE_SIZE - 1; n > 0; --n) { rankPosition[n-1].base += rankPosition[n].base; rankPosition[n-1].curr = rankPosition[n-1].base; } /* Insert each symbol into their appropriate bucket, setting up rankPosition table. */ for (n = 0; n < maxSymbolValue1; ++n) { U32 const c = count[n]; U32 const r = HUF_getIndex(c) + 1; U32 const pos = rankPosition[r].curr++; assert(pos < maxSymbolValue1); huffNode[pos].count = c; huffNode[pos].byte = (BYTE)n; } /* Sort each bucket. */ for (n = RANK_POSITION_DISTINCT_COUNT_CUTOFF; n < RANK_POSITION_TABLE_SIZE - 1; ++n) { U32 const bucketSize = rankPosition[n].curr-rankPosition[n].base; U32 const bucketStartIdx = rankPosition[n].base; if (bucketSize > 1) { assert(bucketStartIdx < maxSymbolValue1); HUF_simpleQuickSort(huffNode + bucketStartIdx, 0, bucketSize-1); } } assert(HUF_isSorted(huffNode, maxSymbolValue1)); } /** HUF_buildCTable_wksp() : * Same as HUF_buildCTable(), but using externally allocated scratch buffer. * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as sizeof(HUF_buildCTable_wksp_tables). */ #define STARTNODE (HUF_SYMBOLVALUE_MAX+1) /* HUF_buildTree(): * Takes the huffNode array sorted by HUF_sort() and builds an unlimited-depth Huffman tree. * * @param huffNode The array sorted by HUF_sort(). Builds the Huffman tree in this array. * @param maxSymbolValue The maximum symbol value. * @return The smallest node in the Huffman tree (by count). */ static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue) { nodeElt* const huffNode0 = huffNode - 1; int nonNullRank; int lowS, lowN; int nodeNb = STARTNODE; int n, nodeRoot; /* init for parents */ nonNullRank = (int)maxSymbolValue; while(huffNode[nonNullRank].count == 0) nonNullRank--; lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb; huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count; huffNode[lowS].parent = huffNode[lowS-1].parent = (U16)nodeNb; nodeNb++; lowS-=2; for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30); huffNode0[0].count = (U32)(1U<<31); /* fake entry, strong barrier */ /* create parents */ while (nodeNb <= nodeRoot) { int const n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; int const n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count; huffNode[n1].parent = huffNode[n2].parent = (U16)nodeNb; nodeNb++; } /* distribute weights (unlimited tree height) */ huffNode[nodeRoot].nbBits = 0; for (n=nodeRoot-1; n>=STARTNODE; n--) huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1; for (n=0; n<=nonNullRank; n++) huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1; return nonNullRank; } /** * HUF_buildCTableFromTree(): * Build the CTable given the Huffman tree in huffNode. * * @param[out] CTable The output Huffman CTable. * @param huffNode The Huffman tree. * @param nonNullRank The last and smallest node in the Huffman tree. * @param maxSymbolValue The maximum symbol value. * @param maxNbBits The exact maximum number of bits used in the Huffman tree. */ static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, int nonNullRank, U32 maxSymbolValue, U32 maxNbBits) { HUF_CElt* const ct = CTable + 1; /* fill result into ctable (val, nbBits) */ int n; U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0}; U16 valPerRank[HUF_TABLELOG_MAX+1] = {0}; int const alphabetSize = (int)(maxSymbolValue + 1); for (n=0; n<=nonNullRank; n++) nbPerRank[huffNode[n].nbBits]++; /* determine starting value per rank */ { U16 min = 0; for (n=(int)maxNbBits; n>0; n--) { valPerRank[n] = min; /* get starting value within each rank */ min += nbPerRank[n]; min >>= 1; } } for (n=0; n<alphabetSize; n++) HUF_setNbBits(ct + huffNode[n].byte, huffNode[n].nbBits); /* push nbBits per symbol, symbol order */ for (n=0; n<alphabetSize; n++) HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); /* assign value within rank, symbol order */ CTable[0] = maxNbBits; } size_t HUF_buildCTable_wksp (HUF_CElt* CTable, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize) { HUF_buildCTable_wksp_tables* const wksp_tables = (HUF_buildCTable_wksp_tables*)HUF_alignUpWorkspace(workSpace, &wkspSize, sizeof(U32)); nodeElt* const huffNode0 = wksp_tables->huffNodeTbl; nodeElt* const huffNode = huffNode0+1; int nonNullRank; /* safety checks */ if (wkspSize < sizeof(HUF_buildCTable_wksp_tables)) return ERROR(workSpace_tooSmall); if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT; if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge); ZSTD_memset(huffNode0, 0, sizeof(huffNodeTable)); /* sort, decreasing order */ HUF_sort(huffNode, count, maxSymbolValue, wksp_tables->rankPosition); /* build tree */ nonNullRank = HUF_buildTree(huffNode, maxSymbolValue); /* enforce maxTableLog */ maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits); if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */ HUF_buildCTableFromTree(CTable, huffNode, nonNullRank, maxSymbolValue, maxNbBits); return maxNbBits; } size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) { HUF_CElt const* ct = CTable + 1; size_t nbBits = 0; int s; for (s = 0; s <= (int)maxSymbolValue; ++s) { nbBits += HUF_getNbBits(ct[s]) * count[s]; } return nbBits >> 3; } int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) { HUF_CElt const* ct = CTable + 1; int bad = 0; int s; for (s = 0; s <= (int)maxSymbolValue; ++s) { bad |= (count[s] != 0) & (HUF_getNbBits(ct[s]) == 0); } return !bad; } size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); } /** HUF_CStream_t: * Huffman uses its own BIT_CStream_t implementation. * There are three major differences from BIT_CStream_t: * 1. HUF_addBits() takes a HUF_CElt (size_t) which is * the pair (nbBits, value) in the format: * format: * - Bits [0, 4) = nbBits * - Bits [4, 64 - nbBits) = 0 * - Bits [64 - nbBits, 64) = value * 2. The bitContainer is built from the upper bits and * right shifted. E.g. to add a new value of N bits * you right shift the bitContainer by N, then or in * the new value into the N upper bits. * 3. The bitstream has two bit containers. You can add * bits to the second container and merge them into * the first container. */ #define HUF_BITS_IN_CONTAINER (sizeof(size_t) * 8) typedef struct { size_t bitContainer[2]; size_t bitPos[2]; BYTE* startPtr; BYTE* ptr; BYTE* endPtr; } HUF_CStream_t; /**! HUF_initCStream(): * Initializes the bistream. * @returns 0 or an error code. */ static size_t HUF_initCStream(HUF_CStream_t* bitC, void* startPtr, size_t dstCapacity) { ZSTD_memset(bitC, 0, sizeof(*bitC)); bitC->startPtr = (BYTE*)startPtr; bitC->ptr = bitC->startPtr; bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer[0]); if (dstCapacity <= sizeof(bitC->bitContainer[0])) return ERROR(dstSize_tooSmall); return 0; } /*! HUF_addBits(): * Adds the symbol stored in HUF_CElt elt to the bitstream. * * @param elt The element we're adding. This is a (nbBits, value) pair. * See the HUF_CStream_t docs for the format. * @param idx Insert into the bistream at this idx. * @param kFast This is a template parameter. If the bitstream is guaranteed * to have at least 4 unused bits after this call it may be 1, * otherwise it must be 0. HUF_addBits() is faster when fast is set. */ FORCE_INLINE_TEMPLATE void HUF_addBits(HUF_CStream_t* bitC, HUF_CElt elt, int idx, int kFast) { assert(idx <= 1); assert(HUF_getNbBits(elt) <= HUF_TABLELOG_ABSOLUTEMAX); /* This is efficient on x86-64 with BMI2 because shrx * only reads the low 6 bits of the register. The compiler * knows this and elides the mask. When fast is set, * every operation can use the same value loaded from elt. */ bitC->bitContainer[idx] >>= HUF_getNbBits(elt); bitC->bitContainer[idx] |= kFast ? HUF_getValueFast(elt) : HUF_getValue(elt); /* We only read the low 8 bits of bitC->bitPos[idx] so it * doesn't matter that the high bits have noise from the value. */ bitC->bitPos[idx] += HUF_getNbBitsFast(elt); assert((bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER); /* The last 4-bits of elt are dirty if fast is set, * so we must not be overwriting bits that have already been * inserted into the bit container. */ #if DEBUGLEVEL >= 1 { size_t const nbBits = HUF_getNbBits(elt); size_t const dirtyBits = nbBits == 0 ? 0 : BIT_highbit32((U32)nbBits) + 1; /* Middle bits are 0. */ assert(((elt >> dirtyBits) << (dirtyBits + nbBits)) == 0); /* We didn't overwrite any bits in the bit container. */ assert(!kFast || (bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER); } #endif } FORCE_INLINE_TEMPLATE void HUF_zeroIndex1(HUF_CStream_t* bitC) { bitC->bitContainer[1] = 0; bitC->bitPos[1] = 0; } /*! HUF_mergeIndex1() : * Merges the bit container @ index 1 into the bit container @ index 0 * and zeros the bit container @ index 1. */ FORCE_INLINE_TEMPLATE void HUF_mergeIndex1(HUF_CStream_t* bitC) { assert((bitC->bitPos[1] & 0xFF) < HUF_BITS_IN_CONTAINER); bitC->bitContainer[0] >>= (bitC->bitPos[1] & 0xFF); bitC->bitContainer[0] |= bitC->bitContainer[1]; bitC->bitPos[0] += bitC->bitPos[1]; assert((bitC->bitPos[0] & 0xFF) <= HUF_BITS_IN_CONTAINER); } /*! HUF_flushBits() : * Flushes the bits in the bit container @ index 0. * * @post bitPos will be < 8. * @param kFast If kFast is set then we must know a-priori that * the bit container will not overflow. */ FORCE_INLINE_TEMPLATE void HUF_flushBits(HUF_CStream_t* bitC, int kFast) { /* The upper bits of bitPos are noisy, so we must mask by 0xFF. */ size_t const nbBits = bitC->bitPos[0] & 0xFF; size_t const nbBytes = nbBits >> 3; /* The top nbBits bits of bitContainer are the ones we need. */ size_t const bitContainer = bitC->bitContainer[0] >> (HUF_BITS_IN_CONTAINER - nbBits); /* Mask bitPos to account for the bytes we consumed. */ bitC->bitPos[0] &= 7; assert(nbBits > 0); assert(nbBits <= sizeof(bitC->bitContainer[0]) * 8); assert(bitC->ptr <= bitC->endPtr); MEM_writeLEST(bitC->ptr, bitContainer); bitC->ptr += nbBytes; assert(!kFast || bitC->ptr <= bitC->endPtr); if (!kFast && bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr; /* bitContainer doesn't need to be modified because the leftover * bits are already the top bitPos bits. And we don't care about * noise in the lower values. */ } /*! HUF_endMark() * @returns The Huffman stream end mark: A 1-bit value = 1. */ static HUF_CElt HUF_endMark(void) { HUF_CElt endMark; HUF_setNbBits(&endMark, 1); HUF_setValue(&endMark, 1); return endMark; } /*! HUF_closeCStream() : * @return Size of CStream, in bytes, * or 0 if it could not fit into dstBuffer */ static size_t HUF_closeCStream(HUF_CStream_t* bitC) { HUF_addBits(bitC, HUF_endMark(), /* idx */ 0, /* kFast */ 0); HUF_flushBits(bitC, /* kFast */ 0); { size_t const nbBits = bitC->bitPos[0] & 0xFF; if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */ return (bitC->ptr - bitC->startPtr) + (nbBits > 0); } } FORCE_INLINE_TEMPLATE void HUF_encodeSymbol(HUF_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable, int idx, int fast) { HUF_addBits(bitCPtr, CTable[symbol], idx, fast); } FORCE_INLINE_TEMPLATE void HUF_compress1X_usingCTable_internal_body_loop(HUF_CStream_t* bitC, const BYTE* ip, size_t srcSize, const HUF_CElt* ct, int kUnroll, int kFastFlush, int kLastFast) { /* Join to kUnroll */ int n = (int)srcSize; int rem = n % kUnroll; if (rem > 0) { for (; rem > 0; --rem) { HUF_encodeSymbol(bitC, ip[--n], ct, 0, /* fast */ 0); } HUF_flushBits(bitC, kFastFlush); } assert(n % kUnroll == 0); /* Join to 2 * kUnroll */ if (n % (2 * kUnroll)) { int u; for (u = 1; u < kUnroll; ++u) { HUF_encodeSymbol(bitC, ip[n - u], ct, 0, 1); } HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, 0, kLastFast); HUF_flushBits(bitC, kFastFlush); n -= kUnroll; } assert(n % (2 * kUnroll) == 0); for (; n>0; n-= 2 * kUnroll) { /* Encode kUnroll symbols into the bitstream @ index 0. */ int u; for (u = 1; u < kUnroll; ++u) { HUF_encodeSymbol(bitC, ip[n - u], ct, /* idx */ 0, /* fast */ 1); } HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, /* idx */ 0, /* fast */ kLastFast); HUF_flushBits(bitC, kFastFlush); /* Encode kUnroll symbols into the bitstream @ index 1. * This allows us to start filling the bit container * without any data dependencies. */ HUF_zeroIndex1(bitC); for (u = 1; u < kUnroll; ++u) { HUF_encodeSymbol(bitC, ip[n - kUnroll - u], ct, /* idx */ 1, /* fast */ 1); } HUF_encodeSymbol(bitC, ip[n - kUnroll - kUnroll], ct, /* idx */ 1, /* fast */ kLastFast); /* Merge bitstream @ index 1 into the bitstream @ index 0 */ HUF_mergeIndex1(bitC); HUF_flushBits(bitC, kFastFlush); } assert(n == 0); } /** * Returns a tight upper bound on the output space needed by Huffman * with 8 bytes buffer to handle over-writes. If the output is at least * this large we don't need to do bounds checks during Huffman encoding. */ static size_t HUF_tightCompressBound(size_t srcSize, size_t tableLog) { return ((srcSize * tableLog) >> 3) + 8; } FORCE_INLINE_TEMPLATE size_t HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) { U32 const tableLog = (U32)CTable[0]; HUF_CElt const* ct = CTable + 1; const BYTE* ip = (const BYTE*) src; BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstSize; BYTE* op = ostart; HUF_CStream_t bitC; /* init */ if (dstSize < 8) return 0; /* not enough space to compress */ { size_t const initErr = HUF_initCStream(&bitC, op, (size_t)(oend-op)); if (HUF_isError(initErr)) return 0; } if (dstSize < HUF_tightCompressBound(srcSize, (size_t)tableLog) || tableLog > 11) HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ MEM_32bits() ? 2 : 4, /* kFast */ 0, /* kLastFast */ 0); else { if (MEM_32bits()) { switch (tableLog) { case 11: HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 0); break; case 10: case 9: case 8: HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 1); break; case 7: default: HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 3, /* kFastFlush */ 1, /* kLastFast */ 1); break; } } else { switch (tableLog) { case 11: HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 0); break; case 10: HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 1); break; case 9: HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 6, /* kFastFlush */ 1, /* kLastFast */ 0); break; case 8: HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 7, /* kFastFlush */ 1, /* kLastFast */ 0); break; case 7: HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 8, /* kFastFlush */ 1, /* kLastFast */ 0); break; case 6: default: HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 9, /* kFastFlush */ 1, /* kLastFast */ 1); break; } } } assert(bitC.ptr <= bitC.endPtr); return HUF_closeCStream(&bitC); } #if DYNAMIC_BMI2 static TARGET_ATTRIBUTE("bmi2") size_t HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) { return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); } static size_t HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) { return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); } static size_t HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, const int bmi2) { if (bmi2) { return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable); } return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable); } #else static size_t HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, const int bmi2) { (void)bmi2; return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); } #endif size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) { return HUF_compress1X_usingCTable_bmi2(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0); } size_t HUF_compress1X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2) { return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, bmi2); } static size_t HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2) { size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */ const BYTE* ip = (const BYTE*) src; const BYTE* const iend = ip + srcSize; BYTE* const ostart = (BYTE*) dst; BYTE* const oend = ostart + dstSize; BYTE* op = ostart; if (dstSize < 6 + 1 + 1 + 1 + 8) return 0; /* minimum space to compress successfully */ if (srcSize < 12) return 0; /* no saving possible : too small input */ op += 6; /* jumpTable */ assert(op <= oend); { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) ); if (cSize == 0 || cSize > 65535) return 0; MEM_writeLE16(ostart, (U16)cSize); op += cSize; } ip += segmentSize; assert(op <= oend); { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) ); if (cSize == 0 || cSize > 65535) return 0; MEM_writeLE16(ostart+2, (U16)cSize); op += cSize; } ip += segmentSize; assert(op <= oend); { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) ); if (cSize == 0 || cSize > 65535) return 0; MEM_writeLE16(ostart+4, (U16)cSize); op += cSize; } ip += segmentSize; assert(op <= oend); assert(ip <= iend); { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, bmi2) ); if (cSize == 0 || cSize > 65535) return 0; op += cSize; } return (size_t)(op-ostart); } size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) { return HUF_compress4X_usingCTable_bmi2(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0); } size_t HUF_compress4X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2) { return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, bmi2); } typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e; static size_t HUF_compressCTable_internal( BYTE* const ostart, BYTE* op, BYTE* const oend, const void* src, size_t srcSize, HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int bmi2) { size_t const cSize = (nbStreams==HUF_singleStream) ? HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2) : HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2); if (HUF_isError(cSize)) { return cSize; } if (cSize==0) { return 0; } /* uncompressible */ op += cSize; /* check compressibility */ assert(op >= ostart); if ((size_t)(op-ostart) >= srcSize-1) { return 0; } return (size_t)(op-ostart); } typedef struct { unsigned count[HUF_SYMBOLVALUE_MAX + 1]; HUF_CElt CTable[HUF_CTABLE_SIZE_ST(HUF_SYMBOLVALUE_MAX)]; union { HUF_buildCTable_wksp_tables buildCTable_wksp; HUF_WriteCTableWksp writeCTable_wksp; U32 hist_wksp[HIST_WKSP_SIZE_U32]; } wksps; } HUF_compress_tables_t; #define SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE 4096 #define SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO 10 /* Must be >= 2 */ /* HUF_compress_internal() : * `workSpace_align4` must be aligned on 4-bytes boundaries, * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U64 unsigned */ static size_t HUF_compress_internal (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, HUF_nbStreams_e nbStreams, void* workSpace, size_t wkspSize, HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat, const int bmi2, unsigned suspectUncompressible) { HUF_compress_tables_t* const table = (HUF_compress_tables_t*)HUF_alignUpWorkspace(workSpace, &wkspSize, sizeof(size_t)); BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstSize; BYTE* op = ostart; HUF_STATIC_ASSERT(sizeof(*table) + HUF_WORKSPACE_MAX_ALIGNMENT <= HUF_WORKSPACE_SIZE); /* checks & inits */ if (wkspSize < sizeof(*table)) return ERROR(workSpace_tooSmall); if (!srcSize) return 0; /* Uncompressed */ if (!dstSize) return 0; /* cannot fit anything within dst budget */ if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */ if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge); if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX; if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT; /* Heuristic : If old table is valid, use it for small inputs */ if (preferRepeat && repeat && *repeat == HUF_repeat_valid) { return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, nbStreams, oldHufTable, bmi2); } /* If uncompressible data is suspected, do a smaller sampling first */ DEBUG_STATIC_ASSERT(SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO >= 2); if (suspectUncompressible && srcSize >= (SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE * SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO)) { size_t largestTotal = 0; { unsigned maxSymbolValueBegin = maxSymbolValue; CHECK_V_F(largestBegin, HIST_count_simple (table->count, &maxSymbolValueBegin, (const BYTE*)src, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) ); largestTotal += largestBegin; } { unsigned maxSymbolValueEnd = maxSymbolValue; CHECK_V_F(largestEnd, HIST_count_simple (table->count, &maxSymbolValueEnd, (const BYTE*)src + srcSize - SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) ); largestTotal += largestEnd; } if (largestTotal <= ((2 * SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) >> 7)+4) return 0; /* heuristic : probably not compressible enough */ } /* Scan input and build symbol stats */ { CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, table->wksps.hist_wksp, sizeof(table->wksps.hist_wksp)) ); if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */ if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */ } /* Check validity of previous table */ if ( repeat && *repeat == HUF_repeat_check && !HUF_validateCTable(oldHufTable, table->count, maxSymbolValue)) { *repeat = HUF_repeat_none; } /* Heuristic : use existing table for small inputs */ if (preferRepeat && repeat && *repeat != HUF_repeat_none) { return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, nbStreams, oldHufTable, bmi2); } /* Build Huffman Tree */ huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue); { size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count, maxSymbolValue, huffLog, &table->wksps.buildCTable_wksp, sizeof(table->wksps.buildCTable_wksp)); CHECK_F(maxBits); huffLog = (U32)maxBits; } /* Zero unused symbols in CTable, so we can check it for validity */ { size_t const ctableSize = HUF_CTABLE_SIZE_ST(maxSymbolValue); size_t const unusedSize = sizeof(table->CTable) - ctableSize * sizeof(HUF_CElt); ZSTD_memset(table->CTable + ctableSize, 0, unusedSize); } /* Write table description header */ { CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, table->CTable, maxSymbolValue, huffLog, &table->wksps.writeCTable_wksp, sizeof(table->wksps.writeCTable_wksp)) ); /* Check if using previous huffman table is beneficial */ if (repeat && *repeat != HUF_repeat_none) { size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue); size_t const newSize = HUF_estimateCompressedSize(table->CTable, table->count, maxSymbolValue); if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) { return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, nbStreams, oldHufTable, bmi2); } } /* Use the new huffman table */ if (hSize + 12ul >= srcSize) { return 0; } op += hSize; if (repeat) { *repeat = HUF_repeat_none; } if (oldHufTable) ZSTD_memcpy(oldHufTable, table->CTable, sizeof(table->CTable)); /* Save new table */ } return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, nbStreams, table->CTable, bmi2); } size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void* workSpace, size_t wkspSize) { return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_singleStream, workSpace, wkspSize, NULL, NULL, 0, 0 /*bmi2*/, 0); } size_t HUF_compress1X_repeat (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void* workSpace, size_t wkspSize, HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible) { return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_singleStream, workSpace, wkspSize, hufTable, repeat, preferRepeat, bmi2, suspectUncompressible); } /* HUF_compress4X_repeat(): * compress input using 4 streams. * provide workspace to generate compression tables */ size_t HUF_compress4X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void* workSpace, size_t wkspSize) { return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_fourStreams, workSpace, wkspSize, NULL, NULL, 0, 0 /*bmi2*/, 0); } /* HUF_compress4X_repeat(): * compress input using 4 streams. * consider skipping quickly * re-use an existing huffman compression table */ size_t HUF_compress4X_repeat (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void* workSpace, size_t wkspSize, HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible) { return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_fourStreams, workSpace, wkspSize, hufTable, repeat, preferRepeat, bmi2, suspectUncompressible); } #ifndef ZSTD_NO_UNUSED_FUNCTIONS /** HUF_buildCTable() : * @return : maxNbBits * Note : count is used before tree is written, so they can safely overlap */ size_t HUF_buildCTable (HUF_CElt* tree, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits) { HUF_buildCTable_wksp_tables workspace; return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, &workspace, sizeof(workspace)); } size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog) { U64 workSpace[HUF_WORKSPACE_SIZE_U64]; return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace)); } size_t HUF_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog) { U64 workSpace[HUF_WORKSPACE_SIZE_U64]; return HUF_compress4X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace)); } size_t HUF_compress (void* dst, size_t maxDstSize, const void* src, size_t srcSize) { return HUF_compress2(dst, maxDstSize, src, srcSize, 255, HUF_TABLELOG_DEFAULT); } #endif
25,447
876
#pragma once namespace rc { namespace test { struct NonCopyableModel { int value = 0; NonCopyableModel() = default; NonCopyableModel(const NonCopyableModel &) = delete; NonCopyableModel &operator=(const NonCopyableModel &) = delete; NonCopyableModel(NonCopyableModel &&) = default; NonCopyableModel &operator=(NonCopyableModel &&) = default; }; using NonCopyableCmd = state::Command<NonCopyableModel, NonCopyableModel>; struct NonCopyableInc : public NonCopyableCmd { void checkPreconditions(const NonCopyableModel &model) const override { RC_PRE(model.value < 20); } void apply(NonCopyableModel &model) const override { model.value++; } void run(const NonCopyableModel &s0, NonCopyableModel &sut) const override { sut.value++; } }; struct NonCopyableDec : public NonCopyableCmd { void checkPreconditions(const NonCopyableModel &model) const override { RC_PRE(model.value > 0); } void apply(NonCopyableModel &model) const override { model.value--; } void run(const NonCopyableModel &s0, NonCopyableModel &sut) const override { sut.value--; } }; inline NonCopyableModel initialNonCopyableModel() { return NonCopyableModel(); } inline Gen<state::Commands<NonCopyableCmd>> genNonCopyableCommands() { return state::gen::commands( &initialNonCopyableModel, [](const NonCopyableModel &model) { return state::gen::execOneOfWithArgs<NonCopyableInc, NonCopyableDec>()( model.value); }); } } // namespace test } // namespace rc
508
368
<reponame>gta-chaos-mod/plugin-sdk /* Plugin-SDK (Grand Theft Auto 3) header file Authors: GTA Community. See more here https://github.com/DK22Pac/plugin-sdk Do not delete this comment block. Respect others' work! */ #pragma once #include "PluginBase.h" #include "CAutomobile.h" class CCam { public: bool m_bBelowMinDist; bool m_bBehindPlayerDesired; bool m_bCamLookingAtVector; bool m_bCollisionChecksOn; bool m_bFixingBeta; bool m_bTheHeightFixerVehicleIsATrain; bool m_bLookBehindCamWasInFront; bool m_bLookingBehind; bool m_bLookingLeft; bool m_bLookingRight; bool m_bResetStatics; bool m_bRotating; short m_nCamMode; //short _padE; unsigned int m_nFinishTime; int m_nDoCollisionChecksOnFrameNum; int m_nDoCollisionCheckEveryNumOfFrames; int m_nFrameNumWereAt; int m_nRunningVectorArrayPos; int m_nRunningVectorCounter; int m_nDirectionWasLooking; float m_fMaxRoleAngle; float m_fRoll; float m_fRollSpeed; float m_fSyphonModeTargetZOffSet; float m_fAmountFractionObscured; float field_40; float m_fAlphaSpeedOverOneFrame; float m_fBetaSpeedOverOneFrame; float m_fBufferedTargetBeta; float m_fBufferedTargetOrientation; float m_fBufferedTargetOrientationSpeed; float m_fCamBufferedHeight; float m_fCamBufferedHeightSpeed; float m_fCloseInPedHeightOffset; float m_fCloseInPedHeightOffsetSpeed; float m_fCloseInCarHeightOffset; float m_fCloseInCarHeightOffsetSpeed; float m_fDimensionOfHighestNearCar; float m_fDistanceBeforeChanges; float m_fFovSpeedOverOneFrame; float m_fMinDistAwayFromCamWhenInterPolating; float m_fPedBetweenCameraHeightOffset; float m_fPlayerInFrontSyphonAngleOffSet; float m_fRadiusForDead; float m_fRealGroundDist; float m_fTargetBeta; float m_fTimeElapsedFloat; float m_fTransitionBeta; float m_fTrueBeta; float m_fTrueAlpha; float m_fInitialPlayerOrientation; float m_fVerticalAngle; float m_fAlphaSpeed; float m_fFOV; float m_fFOVSpeed; float m_fHorizontalAngle; float m_fBetaSpeed; float m_fDistance; float m_fDistanceSpeed; float m_fCaMinDistance; float m_fCaMaxDistance; float m_fSpeedVar; CVector m_vecCamSourceSpeedOverOneFrame; CVector m_vecCamTargetSpeedOverOneFrame; CVector m_vecCamUpOverOneFrame; CVector m_vecTargetCoorsForFudgeInter; CVector m_vecCamFixedModeVector; CVector m_vecCamFixedModeSource; CVector m_vecCamFixedModeUpOffSet; CVector m_vecLastAboveWaterCamPosition; CVector m_vecBufferedPlayerBodyOffset; CVector m_vecFront; CVector m_vecSource; CVector m_vecSourceBeforeLookBehind; CVector m_vecUp; CVector m_avecPreviousVectors[2]; CEntity *m_pCamTargetEntity; float m_fCameraDistance; float m_fIdealAlpha; float m_fPlayerVelocity; CAutomobile *m_pLastCarEntered; CPed *m_pLastPedLookedAt; int m_bFirstPersonRunAboutActive; }; VALIDATE_SIZE(CCam, 0x1A4);
1,261
2,177
<reponame>yagosys/AlfredWorkflow.com { "version": 1.0, "download_url": "https://raw.github.com/vitorgalvao/alfred-workflows/master/bin/PinUnread.alfredworkflow", "description": "First version" }
80
4,639
from django.contrib.auth.models import Permission from django.contrib.contenttypes.models import ContentType from django.test import TestCase from oscar.apps.dashboard.partners import views from oscar.test.factories import PartnerFactory, PermissionFactory, UserFactory class TestPartnerUserUnlinkView(TestCase): def test_remove_dashboard_permission(self): user = UserFactory(is_staff=False) permission = PermissionFactory( codename='dashboard_access', content_type=ContentType.objects.get( app_label='partner', model='partner')) user.user_permissions.add(permission) partner = PartnerFactory() partner.users.add(user) view = views.PartnerUserUnlinkView() view.unlink_user(user, partner) self.assertEqual(partner.users.count(), 0) self.assertTrue(Permission.objects.filter(pk=permission.pk).exists())
350
348
<filename>docs/data/leg-t2/037/03702166.json {"nom":"Neuillé-le-Lierre","circ":"2ème circonscription","dpt":"Indre-et-Loire","inscrits":516,"abs":303,"votants":213,"blancs":27,"nuls":2,"exp":184,"res":[{"nuance":"REM","nom":"<NAME>","voix":94},{"nuance":"LR","nom":"<NAME>","voix":90}]}
121
340
import re n, m = input().strip().split(' ') n, m = [int(n), int(m)] matrix = [] for _ in range(n): matrix_t = str(input()) matrix.append(matrix_t) complete = "" for el in zip(*matrix): complete += "".join(el) print(re.sub(r'(?<=\w)([^\w]+)(?=\w)', " ", complete))
136
449
<gh_stars>100-1000 #include <stdio.h> #include <stdlib.h> #include "Parser.h" #include "Printer.h" #include "Absyn.h" #include "Interpreter.h" int main(int argc, char ** argv) { FILE *input; input = stdin; Exp parse_tree = pExp(input); if (parse_tree) { printf("%d\n", interpret(parse_tree)); return 0; } return 1; }
150
14,668
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_EXTENSIONS_EXTENSION_VIEW_H_ #define CHROME_BROWSER_EXTENSIONS_EXTENSION_VIEW_H_ #include "ui/gfx/native_widget_types.h" namespace content { struct NativeWebKeyboardEvent; class RenderFrameHost; class WebContents; } namespace gfx { class Size; } namespace extensions { // Base class for platform-specific views used by extensions in the Chrome UI. class ExtensionView { public: virtual ~ExtensionView() {} // Returns the extension's native view. virtual gfx::NativeView GetNativeView() = 0; // Method for the ExtensionHost to notify us about the correct size for // extension contents. virtual void ResizeDueToAutoResize(content::WebContents* web_contents, const gfx::Size& new_size) = 0; // Method for the ExtensionHost to notify us when a renderer frame connection // is created. virtual void RenderFrameCreated( content::RenderFrameHost* render_frame_host) = 0; // Handles unhandled keyboard messages coming back from the renderer process. virtual bool HandleKeyboardEvent( content::WebContents* source, const content::NativeWebKeyboardEvent& event) = 0; // Method for the ExtensionHost to notify that the extension page has loaded. virtual void OnLoaded() = 0; }; } // namespace extensions #endif // CHROME_BROWSER_EXTENSIONS_EXTENSION_VIEW_H_
479
3,579
/* * Copyright 2015, The Querydsl Team (http://www.querydsl.com/team) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.querydsl.jpa.domain4; import java.io.Serializable; import java.util.List; import javax.persistence.*; @Embeddable @Access(AccessType.PROPERTY) public class BookDefinition implements Serializable { private static final long serialVersionUID = 3570098308959717614L; private String name; private String description; private List<BookMark> bookMarks; @Basic public String getName() { return name; } public void setName(String name) { this.name = name; } @Basic public String getDescription() { return description; } public void setDescription(String description) { this.description = description; } @ElementCollection() @CollectionTable(name = "book_bookmarks") @OrderColumn() public List<BookMark> getBookMarks() { return bookMarks; } public void setBookMarks(List<BookMark> bookMarks) { this.bookMarks = bookMarks; } }
522
601
package com.java3y.austin.web.vo.amis; import com.fasterxml.jackson.annotation.JsonProperty; import lombok.AllArgsConstructor; import lombok.Builder; import lombok.Data; import lombok.NoArgsConstructor; import java.util.List; /** * @author 3y * 图表的Vo * https://aisuda.bce.baidu.com/amis/zh-CN/components/chart * https://www.runoob.com/echarts/echarts-setup.html */ @Data @Builder @AllArgsConstructor @NoArgsConstructor public class EchartsVo { /** * title 标题 */ @JsonProperty private TitleVO title; /** * tooltip 提示 */ @JsonProperty private TooltipVO tooltip; /** * legend 图例 */ @JsonProperty private LegendVO legend; /** * xAxis x轴 */ @JsonProperty private XAxisVO xAxis; /** * yAxis y轴 */ @JsonProperty private YAxisVO yAxis; /** * series 系列列表 * <p> * 每个系列通过 type 决定自己的图表类型 */ @JsonProperty private List<SeriesVO> series; /** * TitleVO */ @Data @Builder public static class TitleVO { /** * text */ private String text; } /** * TooltipVO */ @Data @Builder public static class TooltipVO { private String color; } /** * LegendVO */ @Data @Builder public static class LegendVO { /** * data */ private List<String> data; } /** * XAxisVO */ @Data @Builder public static class XAxisVO { /** * data */ private List<String> data; } /** * YAxisVO */ @Data @Builder public static class YAxisVO { private String type; } /** * SeriesVO */ @Data @Builder public static class SeriesVO { /** * name */ private String name; /** * type */ private String type; /** * data */ private List<Integer> data; } }
1,064
1,014
<filename>Database/Equities/Countries/Germany/Industries/Shell Companies.json { "TIOAU": { "short_name": "<NAME>", "long_name": "<NAME>", "summary": "Tio Tech A intends to effect a merger, share exchange, asset acquisition, share purchase, reorganization, or similar business combination with one or more businesses. The company was incorporated in 2021 and is based in Berlin, Germany.", "currency": "USD", "sector": "Financial Services", "industry": "Shell Companies", "exchange": "NCM", "market": "us_market", "country": "Germany", "state": null, "city": "Berlin", "zipcode": "10117", "website": null, "market_cap": null } }
311
3,269
<filename>C++/reverse-string-ii.cpp // Time: O(n) // Space: O(1) class Solution { public: string reverseStr(string s, int k) { for (int left = 0; left < s.size(); left += 2 * k) { for (int i = left, j = min(left + k - 1, static_cast<int>(s.size()) - 1); i < j; ++i, --j) { swap(s[i], s[j]); } } return s; } };
218
348
{"nom":"Héricourt-sur-Thérain","dpt":"Oise","inscrits":103,"abs":30,"votants":73,"blancs":6,"nuls":1,"exp":66,"res":[{"panneau":"2","voix":35},{"panneau":"1","voix":31}]}
74
1,142
# Copyright (c) Microsoft Corporation and Fairlearn contributors. # Licensed under the MIT License. """Preprocessing tools to help deal with sensitive attributes.""" from ._correlation_remover import CorrelationRemover __all__ = ["CorrelationRemover"]
61
4,047
<gh_stars>1000+ #include "../lib.h" SYMBOL_EXPORT int get_stnodep_value (void) { return 2; }
46
922
<gh_stars>100-1000 from dataclasses import dataclass, field from typing import List, Type from dataclasses_json import dataclass_json from paiargparse import pai_dataclass from tfaip.data.pipeline.datagenerator import DataGenerator from calamari_ocr.ocr.dataset.datareader.base import CalamariDataGeneratorParams @dataclass_json @dataclass class LineGeneratorParams: fonts: List[str] = field(default_factory=lambda: ["Junicode.ttf", "DejaVuSerif.ttf"]) font_size: int = 32 min_script_offset: float = -0.5 max_script_offset: float = 0.5 @dataclass_json @dataclass class TextGeneratorParams: word_length_mean: float = 11 word_length_sigma: float = 3 charset: List[str] = field( default_factory=lambda: list("ABCEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789{}[]()_-.;:'\" ") ) super_charset: List[str] = field(default_factory=list) sub_charset: List[str] = field(default_factory=list) number_of_words_mean: float = 7 number_of_words_sigma: float = 4 word_separator: str = " " sub_script_p: float = 0 super_script_p: float = 0 bold_p: float = 0 italic_p: float = 0 letter_spacing_p: float = 0.5 letter_spacing_mean: float = 1 letter_spacing_sigma: float = 0.1 @pai_dataclass @dataclass class GeneratedLineDatasetParams(CalamariDataGeneratorParams): lines_per_epoch: int = 100 text_generator: TextGeneratorParams = field(default_factory=TextGeneratorParams) line_generator: LineGeneratorParams = field(default_factory=LineGeneratorParams) def __len__(self): return self.lines_per_epoch def select(self, indices: List[int]): pass def to_prediction(self): raise NotImplementedError @staticmethod def cls() -> Type["DataGenerator"]: from calamari_ocr.ocr.dataset.datareader.generated_line_dataset.dataset import ( GeneratedLineDataset, ) return GeneratedLineDataset
806
2,728
""" Examples to show usage of the azure-core-tracing-opentelemetry with the Eventhub SDK. This example traces calls for senda batch to eventhub. An alternative path to export using AzureMonitor is also mentioned in the sample. Please take a look at the commented code. """ # Declare OpenTelemetry as enabled tracing plugin for Azure SDKs from azure.core.settings import settings from azure.core.tracing.ext.opentelemetry_span import OpenTelemetrySpan settings.tracing_implementation = OpenTelemetrySpan # In the below example, we use a simple console exporter, uncomment these lines to use # the Azure Monitor Exporter. It can be installed from https://pypi.org/project/opentelemetry-azure-monitor/ # Example of Azure Monitor exporter, but you can use anything OpenTelemetry supports # from azure_monitor import AzureMonitorSpanExporter # exporter = AzureMonitorSpanExporter( # instrumentation_key="uuid of the instrumentation key (see your Azure Monitor account)" # ) # Regular open telemetry usage from here, see https://github.com/open-telemetry/opentelemetry-python # for details from opentelemetry import trace from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import ConsoleSpanExporter from opentelemetry.sdk.trace.export import SimpleSpanProcessor # Simple console exporter exporter = ConsoleSpanExporter() trace.set_tracer_provider(TracerProvider()) tracer = trace.get_tracer(__name__) trace.get_tracer_provider().add_span_processor( SimpleSpanProcessor(exporter) ) from azure.eventhub import EventHubProducerClient, EventData import os FULLY_QUALIFIED_NAMESPACE = os.environ['EVENT_HUB_HOSTNAME'] EVENTHUB_NAME = os.environ['EVENT_HUB_NAME'] credential = os.environ['EVENTHUB_CONN_STR'] def on_event(context, event): print(context.partition_id, ":", event) with tracer.start_as_current_span(name="MyApplication"): producer_client = EventHubProducerClient.from_connection_string( conn_str=credential, fully_qualified_namespace=FULLY_QUALIFIED_NAMESPACE, eventhub_name=EVENTHUB_NAME, logging_enable=True ) with producer_client: event_data_batch = producer_client.create_batch() event_data_batch.add(EventData('Single message')) producer_client.send_batch(event_data_batch)
756
515
package br.com.caelum.stella.gateway.integration; import java.math.BigDecimal; import java.util.Calendar; import javax.servlet.http.HttpServletRequest; import junit.framework.Assert; import org.jmock.Expectations; import org.jmock.Mockery; import org.junit.Before; import org.junit.Test; import br.com.caelum.stella.gateway.CalendarCompareHelper; import br.com.caelum.stella.gateway.core.BigDecimalFormatter; import br.com.caelum.stella.gateway.pagseguro.NaoEhRoboPagSeguroException; import br.com.caelum.stella.gateway.pagseguro.PagSeguroRequisicaoInvalidaException; import br.com.caelum.stella.gateway.pagseguro.PagSeguroAutorizacaoReturn; import br.com.caelum.stella.gateway.pagseguro.PagSeguroDadosConfiguracao; import br.com.caelum.stella.gateway.pagseguro.PagSeguroStatusTransacao; import br.com.caelum.stella.gateway.pagseguro.PagSeguroTipoFrete; import br.com.caelum.stella.gateway.pagseguro.PagSeguroTipoPagamento; import br.com.caelum.stella.gateway.pagseguro.PagSeguroVerificaRetornoAutorizacao; import br.com.caelum.stella.gateway.pagseguro.PagSeguroVerificadorRetorno; public class PagSeguroTestRetornoIntegracao { private Mockery mockery; private PagSeguroVerificadorRetorno verificadorRetorno; private HttpServletRequest request; private static final PagSeguroDadosConfiguracao configuracao = new PagSeguroDadosConfiguracao(); @Before public void prepareMocks() { mockery = new Mockery(); request = mockery.mock(HttpServletRequest.class); verificadorRetorno = mockery.mock(PagSeguroVerificadorRetorno.class); } @Test(expected = PagSeguroRequisicaoInvalidaException.class) public void testSimulacaoDeAprovacaoIndevidaDeCompra() { mockery.checking(new Expectations() { { one(request).getParameter("TransacaoID"); will(returnValue("123456")); one(verificadorRetorno) .verificaSeRetornoFoiEnviadoPelaPagSeguro(configuracao); will(returnValue(false)); } }); new PagSeguroVerificaRetornoAutorizacao(request, verificadorRetorno, configuracao).handle(); mockery.assertIsSatisfied(); } @Test(expected = NaoEhRoboPagSeguroException.class) public void testRetornoFeitoPeloBrowser() { mockery.checking(new Expectations() { { one(request).getParameter("TransacaoID"); will(returnValue("")); } }); new PagSeguroVerificaRetornoAutorizacao(request, verificadorRetorno, configuracao).handle(); } @Test public void testRetornoValido() { mockery.checking(new Expectations() { { one(verificadorRetorno) .verificaSeRetornoFoiEnviadoPelaPagSeguro(configuracao); will(returnValue(true)); one(request).getParameter("VendedorEmail"); will(returnValue("<EMAIL>")); one(request).getParameter("TransacaoID"); will(returnValue("123456")); one(request).getParameter("Referencia"); will(returnValue("7654763")); one(request).getParameter("TipoFrete"); will(returnValue("SD")); one(request).getParameter("ValorFrete"); will(returnValue("101,50")); one(request).getParameter("Anotacao"); will(returnValue("qualquer coisa")); one(request).getParameter("DataTransacao"); will(returnValue("25/04/2009 09:14:30")); one(request).getParameter("TipoPagamento"); will(returnValue("CARTAO DE CREDITO")); one(request).getParameter("StatusTransacao"); will(returnValue("EM ANALISE")); one(request).getParameter("CliTelefone"); will(returnValue("71 33468900")); one(request).getParameter("CliCEP"); will(returnValue("41900050")); one(request).getParameter("CliCidade"); will(returnValue("Salvador")); one(request).getParameter("CliEndereco"); will(returnValue("Endereco do cidadao")); one(request).getParameter("CliNome"); will(returnValue("Alberto")); one(request).getParameter("CliEstado"); will(returnValue("BA")); one(request).getParameter("CliNumero"); will(returnValue("2626")); one(request).getParameter("CliComplemento"); will(returnValue("Complementando meus dados")); one(request).getParameter("CliBairro"); will(returnValue("<NAME>")); one(request).getParameter("CliEmail"); will(returnValue("<EMAIL>")); one(request).getParameter("NumItens"); will(returnValue("2")); for (int i = 1; i <= 2; i++) { one(request).getParameter("ProdID_" + i); will(returnValue("12345" + i)); one(request).getParameter("ProdDescricao_" + i); will(returnValue("descricao do item " + i)); one(request).getParameter("ProdQuantidade_" + i); will(returnValue(i + "")); one(request).getParameter("ProdValor_" + i); will(returnValue("1000")); one(request).getParameter("ProdFrete_" + i); will(returnValue(49 + i + ",25")); one(request).getParameter("ProdExtras_" + i); will(returnValue("0,00")); } } }); PagSeguroAutorizacaoReturn autorizacaoReturn = new PagSeguroVerificaRetornoAutorizacao( request, verificadorRetorno, configuracao).handle(); Calendar dataTransacao = Calendar.getInstance(); dataTransacao.set(Calendar.DAY_OF_MONTH, 25); dataTransacao.set(Calendar.MONTH, Calendar.APRIL); dataTransacao.set(Calendar.YEAR, 2009); dataTransacao.set(Calendar.HOUR_OF_DAY, 9); dataTransacao.set(Calendar.MINUTE, 14); dataTransacao.set(Calendar.SECOND, 30); Assert.assertTrue(new CalendarCompareHelper().compareCalendars( dataTransacao, autorizacaoReturn.getDataTransacao())); Assert.assertEquals(autorizacaoReturn.getStatusTransacao(), PagSeguroStatusTransacao.EM_ANALISE); Assert.assertEquals(autorizacaoReturn.getTipoPagamento(), PagSeguroTipoPagamento.CARTAO_CREDITO); Assert.assertEquals(autorizacaoReturn.getTipoFrete(), PagSeguroTipoFrete.SEDEX); Assert.assertEquals(new BigDecimal(101.50).setScale( BigDecimalFormatter.SCALE, BigDecimalFormatter.ROUNDING_MODE), autorizacaoReturn.getValorTotalFrete()); Assert.assertEquals(new BigDecimal(101.50).setScale( BigDecimalFormatter.SCALE, BigDecimalFormatter.ROUNDING_MODE), autorizacaoReturn.getValorTotalFreteCheckout()); Assert.assertEquals(new BigDecimal(20).setScale( BigDecimalFormatter.SCALE, BigDecimalFormatter.ROUNDING_MODE), autorizacaoReturn.getValorTotalItens()); mockery.assertIsSatisfied(); } }
2,730
1,795
#!/usr/bin/python # coding=utf-8 ########################################################################## from test import CollectorTestCase from test import get_collector_config from test import unittest from mock import patch from diamond.collector import Collector from cassandra_jolokia import CassandraJolokiaCollector ########################################################################## class TestCassandraJolokiaCollector(CollectorTestCase): def setUp(self): config = get_collector_config('CassandraJolokiaCollector', {}) self.collector = CassandraJolokiaCollector(config, None) # Used for all the tests so the expected numbers are all the same. def fixture_a(self): values = [0] * 92 values[30:56] = [3, 3, 1, 1, 8, 5, 6, 1, 6, 5, 3, 8, 9, 10, 7, 8, 7, 5, 5, 5, 3, 3, 2, 2, 2] return values def empty_fixture_values(self): return [0] * 91 def expected_fixture_a_p(self, percentile_key): return { 'p25': 192.0, 'p50': 398.0, 'p75': 824.0, 'p95': 2050.0, 'p99': 2952.0 }[percentile_key] def test_import(self): self.assertTrue(CassandraJolokiaCollector) def test_should_compute_percentiles_accurately(self): ninety_offsets = self.collector.create_offsets(90) percentile_value = self.collector.compute_percentile( ninety_offsets, self.fixture_a(), 50) self.assertEqual(percentile_value, 398.0) def test_should_compute_percentiles_accurately_when_empty(self): ninety_offsets = self.collector.create_offsets(90) self.assertEqual(self.collector.compute_percentile( ninety_offsets, self.empty_fixture_values(), 50), 0.0) self.assertEqual(self.collector.compute_percentile( ninety_offsets, self.empty_fixture_values(), 95), 0.0) self.assertEqual(self.collector.compute_percentile( ninety_offsets, self.empty_fixture_values(), 99), 0.0) @patch.object(Collector, 'publish') def test_should_not_collect_non_histogram_attributes(self, publish_mock): self.collector.interpret_bean_with_list( 'RecentReadLatencyMicros', self.fixture_a()) self.assertPublishedMany(publish_mock, {}) @patch.object(Collector, 'publish') def test_should_collect_metrics_histogram_attributes(self, publish_mock): self.collector.interpret_bean_with_list( 'RecentReadLatencyHistogramMicros', self.fixture_a()) self.assertPublishedMany(publish_mock, { 'RecentReadLatencyHistogramMicros.p50': self.expected_fixture_a_p('p50'), 'RecentReadLatencyHistogramMicros.p95': self.expected_fixture_a_p('p95'), 'RecentReadLatencyHistogramMicros.p99': self.expected_fixture_a_p('p99') }) @patch.object(Collector, 'publish') # db:columnfamily=HintsColumnFamily,keyspace=system,type=ColumnFamilies: def test_should_escape_histogram_attributes(self, publish_mock): test_bean = ','.join([ 'db:columnfamily=HintsColumnFamily', 'keyspace=system', 'type=ColumnFamilies:RecentReadLatencyHistogramMicros' ]) self.collector.interpret_bean_with_list(test_bean, self.fixture_a()) expected_base = '.'.join([ 'db.columnfamily_HintsColumnFamily', 'keyspace_system', 'type_ColumnFamilies', 'RecentReadLatencyHistogramMicros' ]) self.assertPublishedMany(publish_mock, { '.'.join([expected_base, 'p50']): self.expected_fixture_a_p('p50'), '.'.join([expected_base, 'p95']): self.expected_fixture_a_p('p95'), '.'.join([expected_base, 'p99']): self.expected_fixture_a_p('p99') }) @patch.object(Collector, 'publish') def test_should_respect_percentiles_config(self, publish_mock): self.collector.update_config({ 'percentiles': ['25', '75'] }) self.collector.interpret_bean_with_list( 'RecentReadLatencyHistogramMicros', self.fixture_a()) self.assertPublishedMany(publish_mock, { 'RecentReadLatencyHistogramMicros.p25': self.expected_fixture_a_p('p25'), 'RecentReadLatencyHistogramMicros.p75': self.expected_fixture_a_p('p75'), }) @patch.object(Collector, 'publish') def test_should_respect_histogram_regex_config(self, publish_mock): self.collector.update_config({ 'histogram_regex': '^WackyMetric' }) self.collector.interpret_bean_with_list( 'WackyMetricSeventeen', self.fixture_a()) self.assertPublishedMany(publish_mock, { 'WackyMetricSeventeen.p50': self.expected_fixture_a_p('p50'), 'WackyMetricSeventeen.p95': self.expected_fixture_a_p('p95'), 'WackyMetricSeventeen.p99': self.expected_fixture_a_p('p99') }) ########################################################################## if __name__ == "__main__": unittest.main()
2,313
5,169
<gh_stars>1000+ { "name": "RMDownloadAdapter", "version": "0.1.1", "summary": "Object and Data async downloader with progress and completion handler", "description": "Async Object Downloader using URL - converted to ObjC object or returning NSData. Easy to use and includes cache with a user set size. Concurrent downloads are allowed and cancelling one won't cancel the other. Example Project included for some help, and Unit Tests are included as well'", "homepage": "https://github.com/rjmiller2543/RMDownloadAdapter", "license": { "type": "MIT", "file": "LICENSE" }, "authors": { "rjmiller2543": "MBA11" }, "source": { "git": "https://github.com/rjmiller2543/RMDownloadAdapter.git", "tag": "0.1.1" }, "platforms": { "ios": "8.0" }, "source_files": "RMDownloadAdapter/Classes/**/*" }
291
1,738
/* * All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or * its licensors. * * For complete copyright and license terms please see the LICENSE at the root of this * distribution (the "License"). All use of this software is governed by the License, * or, if provided, by the license below or the license accompanying this file. Do not * remove or modify any license notices. This file is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * */ // Original file Copyright Crytek GMBH or its affiliates, used under license. #ifndef CRYINCLUDE_EDITOR_AI_NAVDATAGENERATION_POLYGONSETOPS_UTILS_H #define CRYINCLUDE_EDITOR_AI_NAVDATAGENERATION_POLYGONSETOPS_UTILS_H #pragma once #include "ISystem.h" typedef Vec2d Vector2d; // needed to use Vector2d in a map template<class F> bool operator<(const Vec2_tpl<F>& op1, const Vec2_tpl<F>& op2) { if (op1.x < op2.x) { return true; } else if (op1.x > op2.x) { return false; } if (op1.y < op2.y) { return true; } else { return false; } } #endif // CRYINCLUDE_EDITOR_AI_NAVDATAGENERATION_POLYGONSETOPS_UTILS_H
478
1,350
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. // Code generated by Microsoft (R) AutoRest Code Generator. package com.azure.resourcemanager.costmanagement; import com.azure.core.util.Context; /** Samples for Exports GetExecutionHistory. */ public final class ExportsGetExecutionHistorySamples { /** * Sample code: BillingAccountExecutionHistoryExport. * * @param costManagementManager Entry point to CostManagementManager. */ public static void billingAccountExecutionHistoryExport( com.azure.resourcemanager.costmanagement.CostManagementManager costManagementManager) { costManagementManager .exports() .getExecutionHistoryWithResponse( "providers/Microsoft.Billing/billingAccounts/123456", "TestExport", Context.NONE); } /** * Sample code: DepartmentExecutionHistoryExport. * * @param costManagementManager Entry point to CostManagementManager. */ public static void departmentExecutionHistoryExport( com.azure.resourcemanager.costmanagement.CostManagementManager costManagementManager) { costManagementManager .exports() .getExecutionHistoryWithResponse( "providers/Microsoft.Billing/billingAccounts/12/departments/1234", "TestExport", Context.NONE); } /** * Sample code: SubscriptionExecutionHistoryExport. * * @param costManagementManager Entry point to CostManagementManager. */ public static void subscriptionExecutionHistoryExport( com.azure.resourcemanager.costmanagement.CostManagementManager costManagementManager) { costManagementManager .exports() .getExecutionHistoryWithResponse( "subscriptions/00000000-0000-0000-0000-000000000000", "TestExport", Context.NONE); } /** * Sample code: EnrollmentAccountExecutionHistoryExport. * * @param costManagementManager Entry point to CostManagementManager. */ public static void enrollmentAccountExecutionHistoryExport( com.azure.resourcemanager.costmanagement.CostManagementManager costManagementManager) { costManagementManager .exports() .getExecutionHistoryWithResponse( "providers/Microsoft.Billing/billingAccounts/100/enrollmentAccounts/456", "TestExport", Context.NONE); } /** * Sample code: ManagementGroupExecutionHistoryExport. * * @param costManagementManager Entry point to CostManagementManager. */ public static void managementGroupExecutionHistoryExport( com.azure.resourcemanager.costmanagement.CostManagementManager costManagementManager) { costManagementManager .exports() .getExecutionHistoryWithResponse( "providers/Microsoft.Management/managementGroups/TestMG", "TestExport", Context.NONE); } /** * Sample code: ResourceGroupExecutionHistoryExport. * * @param costManagementManager Entry point to CostManagementManager. */ public static void resourceGroupExecutionHistoryExport( com.azure.resourcemanager.costmanagement.CostManagementManager costManagementManager) { costManagementManager .exports() .getExecutionHistoryWithResponse( "subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/MYDEVTESTRG", "TestExport", Context.NONE); } }
1,257
388
<gh_stars>100-1000 # -*- coding: utf-8 -*- import logging import re import mistune from PySide6 import QtCore, QtWidgets from fastflix.resources import changes_file, local_changes_file __all__ = ["Changes"] logger = logging.getLogger("fastflix") markdown = mistune.Markdown() issues = re.compile(r"\s(#\d+)\s") class Changes(QtWidgets.QScrollArea): def __init__(self, parent=None): super().__init__(parent) self.setWidgetResizable(True) self.setMinimumWidth(500) self.setMinimumHeight(500) content = QtWidgets.QWidget(self) self.setWidget(content) lay = QtWidgets.QVBoxLayout(content) if changes_file.exists(): content = changes_file.read_text(encoding="utf-8", errors="ignore") else: if not local_changes_file.exists(): raise Exception("Could not locate changlog file") content = local_changes_file.read_text(encoding="utf-8", errors="ignore") linked_content = issues.sub( " <a href='https://github.com/cdgriffith/FastFlix/issues/\\1' style='color: black' >\\1</a> ", content ).replace("issues/#", "issues/") self.label = QtWidgets.QLabel(markdown(linked_content)) self.label.setOpenExternalLinks(True) # setting alignment to the text self.label.setAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop) # making label multi-line self.label.setWordWrap(True) # adding label to the layout lay.addWidget(self.label) def closeEvent(self, event): self.hide() # event.accept()
697
480
<filename>polardbx-parser/src/main/java/com/alibaba/polardbx/druid/sql/dialect/mysql/ast/statement/SQLAlterResourceGroupStatement.java<gh_stars>100-1000 /* * Copyright 1999-2017 Alibaba Group Holding Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.alibaba.polardbx.druid.sql.dialect.mysql.ast.statement; import com.alibaba.polardbx.druid.sql.ast.SQLExpr; import com.alibaba.polardbx.druid.sql.ast.SQLName; import com.alibaba.polardbx.druid.sql.ast.SQLStatementImpl; import com.alibaba.polardbx.druid.sql.ast.statement.SQLCreateStatement; import com.alibaba.polardbx.druid.sql.visitor.SQLASTVisitor; import java.util.HashMap; import java.util.Map; public class SQLAlterResourceGroupStatement extends SQLStatementImpl implements SQLCreateStatement { private SQLName name; private Map<String, SQLExpr> properties = new HashMap<String, SQLExpr>(); private Boolean enable; public SQLName getName() { return name; } public void setName(SQLName x) { if (x != null) { x.setParent(this); } this.name = x; } public void addProperty(String name, SQLExpr value) { if (value != null) { value.setParent(this); } properties.put(name, value); } public Boolean getEnable() { return enable; } public void setEnable(Boolean enable) { this.enable = enable; } public Map<String, SQLExpr> getProperties() { return properties; } public void accept0(SQLASTVisitor v) { if (v.visit(this)) { acceptChild(v, name); for (SQLExpr value : properties.values()) { acceptChild(v, value); } } v.endVisit(this); } }
906
3,952
#ifndef __PYWHERE_H #define __PYWHERE_H #include <string> #include <atomic> /** * Examines the current Python stack frame and let us know where in the code we are. */ extern "C" int whereInPython(std::string& filename, int& lineno, int& bytei); /** * Pointer to "whereInPython" for efficient linkage between pywhere and libscalene. */ extern "C" std::atomic<decltype(whereInPython)*> p_whereInPython; #endif
141
528
import numpy as np from opytimizer.optimizers.science import eo from opytimizer.spaces import search def test_eo_params(): params = { 'a1': 2.0, 'a2': 1.0, 'GP': 0.5, 'V': 1.0 } new_eo = eo.EO(params=params) assert new_eo.a1 == 2.0 assert new_eo.a2 == 1.0 assert new_eo.GP == 0.5 assert new_eo.V == 1.0 def test_eo_params_setter(): new_eo = eo.EO() try: new_eo.a1 = 'a' except: new_eo.a1 = 2.0 try: new_eo.a1 = -1 except: new_eo.a1 = 2.0 assert new_eo.a1 == 2.0 try: new_eo.a2 = 'b' except: new_eo.a2 = 1.0 try: new_eo.a2 = -1 except: new_eo.a2 = 1.0 assert new_eo.a2 == 1.0 try: new_eo.GP = 'c' except: new_eo.GP = 0.5 try: new_eo.GP = -1 except: new_eo.GP = 0.5 assert new_eo.GP == 0.5 try: new_eo.V = 'd' except: new_eo.V = 1.0 try: new_eo.V = -1 except: new_eo.V = 1.0 assert new_eo.V == 1.0 def test_eo_compile(): search_space = search.SearchSpace(n_agents=10, n_variables=2, lower_bound=[1, 1], upper_bound=[10, 10]) new_eo = eo.EO() new_eo.compile(search_space) try: new_eo.C = 1 except: new_eo.C = [] assert new_eo.C == [] def test_eo_calculate_equilibrium(): search_space = search.SearchSpace(n_agents=10, n_variables=2, lower_bound=[1, 1], upper_bound=[10, 10]) new_eo = eo.EO() new_eo.compile(search_space) new_eo._calculate_equilibrium(search_space.agents) def test_eo_average_concentration(): def square(x): return np.sum(x**2) search_space = search.SearchSpace(n_agents=10, n_variables=2, lower_bound=[1, 1], upper_bound=[10, 10]) new_eo = eo.EO() new_eo.compile(search_space) C_avg = new_eo._average_concentration(square) assert type(C_avg).__name__ == 'Agent' def test_eo_update(): def square(x): return np.sum(x**2) search_space = search.SearchSpace(n_agents=10, n_variables=2, lower_bound=[1, 1], upper_bound=[10, 10]) new_eo = eo.EO() new_eo.compile(search_space) new_eo.update(search_space, square, 1, 10)
1,355
4,234
<reponame>roblabs/maplibre-gl-native #include <mbgl/gfx/headless_backend.hpp> namespace mbgl { namespace gfx { HeadlessBackend::HeadlessBackend(Size size_) : mbgl::gfx::Renderable(size_, nullptr) { } void HeadlessBackend::setSize(Size size_) { size = size_; resource.reset(); } } // namespace gfx } // namespace mbgl
140
2,587
<reponame>Fernando-De-Santiago/godeye package cn.hikyson.godeye.core.internal.modules.imagecanary; import android.os.Build; import androidx.test.core.app.ApplicationProvider; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.robolectric.Robolectric; import org.robolectric.RobolectricTestRunner; import org.robolectric.android.controller.ActivityController; import org.robolectric.annotation.Config; import java.lang.ref.WeakReference; import java.util.HashSet; import cn.hikyson.godeye.core.GodEye; import cn.hikyson.godeye.core.helper.Log4Test; import cn.hikyson.godeye.core.helper.RoboTestApplication; import cn.hikyson.godeye.core.helper.Test4ImageActivity; import io.reactivex.functions.Predicate; @RunWith(RobolectricTestRunner.class) @Config(sdk = Build.VERSION_CODES.LOLLIPOP, application = RoboTestApplication.class) public class ImageCanaryInternalTest { @Before public void setUp() throws Exception { GodEye.instance().init(ApplicationProvider.getApplicationContext()); } @After public void tearDown() throws Exception { } @Test public void inspectInner() { ActivityController<Test4ImageActivity> activityController = Robolectric.buildActivity(Test4ImageActivity.class).create().start().resume(); Test4ImageActivity activity = activityController.get(); ImageCanaryInternal imageCanaryInternal = new ImageCanaryInternal(new DefaultImageCanaryConfigProvider()); ImageCanary imageCanary = new ImageCanary(); HashSet<ImageIssue> hashSet = new HashSet<ImageIssue>(); // mock issue already exist ImageIssue imageIssue = new ImageIssue(); imageIssue.activityHashCode = activity.hashCode(); imageIssue.imageViewHashCode = activity.imageView3().hashCode(); imageIssue.bitmapWidth = 200; imageIssue.bitmapHeight = 100; imageIssue.issueType = ImageIssue.IssueType.BITMAP_QUALITY_TOO_HIGH; hashSet.add(imageIssue); imageCanaryInternal.inspectInner(new WeakReference<>(activity), imageCanary, hashSet).run(); imageCanary.subject().test().assertValueAt(0, new Predicate<ImageIssue>() { @Override public boolean test(ImageIssue imageIssue) throws Exception { Log4Test.d(imageIssue); return imageIssue.activityHashCode == activity.hashCode() && imageIssue.bitmapWidth == 200 && imageIssue.bitmapHeight == 100 && imageIssue.imageViewWidth == 50 && imageIssue.imageViewHeight == 50 && imageIssue.issueType == ImageIssue.IssueType.BITMAP_QUALITY_TOO_HIGH; } }).assertValueAt(1, new Predicate<ImageIssue>() { @Override public boolean test(ImageIssue imageIssue) throws Exception { Log4Test.d(imageIssue); return imageIssue.activityHashCode == activity.hashCode() && imageIssue.bitmapWidth == 200 && imageIssue.bitmapHeight == 100 && imageIssue.imageViewWidth == 500 && imageIssue.imageViewHeight == 500 && imageIssue.issueType == ImageIssue.IssueType.BITMAP_QUALITY_TOO_LOW; } }).assertValueCount(2); Assert.assertEquals(3, hashSet.size()); } }
1,436
324
<reponame>tormath1/jclouds {"status": "success", "data": {}, "job_id": 254506373, "msgs": [{"INFO": "logout: Logout successful", "SOURCE": "BLL", "ERR_CD": null, "LVL": "INFO"}]}
74
339
<gh_stars>100-1000 # -*- coding: utf-8 -*- """ @author: truthless """ import os import json import logging import sys root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))) sys.path.append(root_dir) from convlab2.util.train_util import init_logging_handler from convlab2.task.camrest.goal_generator import GoalGenerator from convlab2.policy.vhus.camrest.usermanager import UserDataManager from convlab2.policy.vhus.train import VHUS_Trainer if __name__ == '__main__': with open('config.json', 'r') as f: cfg = json.load(f) init_logging_handler(cfg['log_dir']) manager = UserDataManager() goal_gen = GoalGenerator() env = VHUS_Trainer(cfg, manager, goal_gen) logging.debug('start training') best = float('inf') for e in range(cfg['epoch']): env.imitating(e) best = env.imit_test(e, best)
376
359
<reponame>laenNoCode/thcrap<filename>thcrap/src/vfs.h /** * Touhou Community Reliant Automatic Patcher * Main DLL * * ---- * * Virtual file system. */ #pragma once #include <string> #include <unordered_map> #include <unordered_set> typedef json_t* jsonvfs_generator_t(std::unordered_map<std::string, json_t*> in_data, const std::string out_fn, size_t* out_size); extern "C" { /** * Add a handler to create files in the virtual file system. * * in_fns contains a list of json files needed to generate the virtual files. * * Each time a jdiff file matching out_pattern is resolved, gen will be called. * If it returns a non-NULL value, the return value will be used as if a file * with this content exists at the top of the patch stack. */ void jsonvfs_add(const char* out_pattern, std::unordered_set<std::string> in_fns, jsonvfs_generator_t *gen); // Same as jsonvfs_add, but all file names are game-relative. void jsonvfs_game_add(const char* out_pattern, std::unordered_set<std::string> in_fns, jsonvfs_generator_t *gen); /** * Generate a VFS file from a JSON map file. * The map file is called <filename>.map and will generate a file called <filename>.jdiff. * The map file is a json file, with strings being references to JSON values in in_fn. * For example, if you have this in the map file: * { "key": "object.example" } * and this in in_fn: * { "object": { "example": 5 } } * the generated jdiff file will be like this: * { "key": 5 } */ void jsonvfs_add_map(const char* out_pattern, std::unordered_set<std::string> in_fns); // Same as jsonvfs_add_map, but all file names are game-relative. void jsonvfs_game_add_map(const char* out_pattern, std::unordered_set<std::string> in_fns); // Return a file from the vfs if it exists. json_t *jsonvfs_get(const char* fn, size_t* size); }
665
4,013
<reponame>kylelaker/checkov<filename>tests/terraform/checks/resource/azure/test_MSSQLServerMinTLSVersion.py<gh_stars>1000+ import unittest import hcl2 from checkov.terraform.checks.resource.azure.MSSQLServerMinTLSVersion import check from checkov.common.models.enums import CheckResult class TestMSSQLServerMinTLSVersion(unittest.TestCase): def test_failure(self): hcl_res = hcl2.loads(""" resource "azurerm_mssql_server" "examplea" { name = var.server_name resource_group_name = var.resource_group.name location = var.resource_group.location version = var.sql["version"] administrator_login = var.sql["administrator_login"] administrator_login_password = <PASSWORD> minimum_tls_version = "1.1" public_network_access_enabled = var.sql["public_network_access_enabled"] identity { type = "SystemAssigned" } tags = var.common_tags } """) resource_conf = hcl_res['resource'][0]['azurerm_mssql_server']['examplea'] scan_result = check.scan_resource_conf(conf=resource_conf) self.assertEqual(CheckResult.FAILED, scan_result) def test_success(self): hcl_res = hcl2.loads(""" resource "azurerm_mssql_server" "examplea" { name = var.server_name resource_group_name = var.resource_group.name location = var.resource_group.location version = var.sql["version"] administrator_login = var.sql["administrator_login"] administrator_login_password = <PASSWORD> minimum_tls_version = "1.2" public_network_access_enabled = var.sql["public_network_access_enabled"] identity { type = "SystemAssigned" } tags = var.common_tags } """) resource_conf = hcl_res['resource'][0]['azurerm_mssql_server']['examplea'] scan_result = check.scan_resource_conf(conf=resource_conf) self.assertEqual(CheckResult.PASSED, scan_result) if __name__ == '__main__': unittest.main()
1,267
1,511
/* * File: linux/nfsacl.h * * (C) 2003 <NAME> <<EMAIL>> */ #ifndef __LINUX_NFSACL_H
44
3,227
/*! \ingroup PkgArrangementOnSurface2Concepts \cgalConcept A model of the `ArrangementPointLocation_2` concept can answer point-location queries on an arrangement attached to it. Namely, given a `Arrangement_2::Point_2` object, representing a point in the plane, it returns the arrangement cell containing it. In the general case, the query point is contained inside an arrangement face, but in degenerate situations it may lie on an edge or coincide with an arrangement vertex. \cgalHeading{A note on Backwards compatibility} The `locate` member function used to return `CGAL::Object` up to \cgal version 4.2. Starting with \cgal version 4.3 the return type is determined by a metafunction. To preserve backwards compatibility `CGAL::Object` can be constructed from the new return types implicitly, but switching to the new style is recommended. To enable the old style without any overhead, the macro `CGAL_ARR_POINT_LOCATION_VERSION` can be defined to 1 before any \cgal header is included. \cgalHasModel `CGAL::Arr_naive_point_location<Arrangement>` \cgalHasModel `CGAL::Arr_walk_along_line_point_location<Arrangement>` \cgalHasModel `CGAL::Arr_trapezoid_ric_point_location<Arrangement>` \cgalHasModel `CGAL::Arr_landmarks_point_location<Arrangement,Generator>` \sa `CGAL::Arr_naive_point_location<Arrangement>` \sa `CGAL::Arr_walk_along_line_point_location<Arrangement>` \sa `CGAL::Arr_trapezoid_ric_point_location<Arrangement>` \sa `CGAL::Arr_landmarks_point_location<Arrangement,Generator>` \sa `CGAL::Arr_point_location_result<Arrangement>` \sa `CGAL_ARR_POINT_LOCATION_VERSION` */ class ArrangementPointLocation_2 { public: /// \name Types /// @{ /*! the associated arrangement type. */ typedef unspecified_type Arrangement_2; /*! equivalent to `Arrangement_2::Point_2`. */ typedef unspecified_type Point_2; /// @} /// \name Creation /// @{ /*! default constructor. */ ArrangementPointLocation_2(); /*! constructs a point-location object `pl` attached to the given arrangement `arr`. */ ArrangementPointLocation_2 (const Arrangement_2& arr); /// @} /// \name Query Functions /// @{ /*! locates the arrangement cell that contains the query point `q` and returns a discriminated union container of the following bounded types: <UL> <LI>`Arrangement_2::Face_const_handle`, in case `q` is contained inside an arrangement face; <LI>`Arrangement_2::Halfedge_const_handle`, in case `q` lies on an arrangement edge; <LI>`Arrangement_2::Vertex_const_handle`, in case `q` coincides with an arrangement vertex. </UL> \pre `pl` is attached to a valid arrangement object. */ Arr_point_location_result<Arrangement_2>::Type locate(const Point_2& q) const; /// @} /// \name Operations /// @{ /*! attaches `pl` to the given arrangement `arr`. */ void attach (const Arrangement_2& arr); /*! detaches `pl` from the arrangement it is currently attached to. */ void detach (); /// @} }; /* end ArrangementPointLocation_2 */
961
320
// Copyright (c) 2015, <NAME>. // Distributed under the BSD 2-Clause License (see LICENSE.txt for details) #include "Checkbox.h" bool Checkbox::Checked() { return IsDlgButtonChecked(_parent, _id) == BST_CHECKED; } bool Checkbox::Checked(bool checked) { return CheckDlgButton( _parent,_id, checked ? BST_CHECKED : BST_UNCHECKED) == TRUE; }
135
326
package com.bihell.dice.service; import com.bihell.dice.blog.service.tool.MediaService; import lombok.extern.slf4j.Slf4j; import org.junit.Test; import org.springframework.beans.factory.annotation.Autowired; /** * @author zhangbowen * @since 2019/7/9 17:58 */ @Slf4j public class MediaServiceTests extends BaseTests { @Autowired private MediaService mediaService; @Test public void pageAdminMedias() { // System.out.println(mediaService.getMediaList(1, 20)); } }
191
325
<gh_stars>100-1000 import lxml.html import tempfile import webbrowser import time from lxmlTree import lxmlTree from helper import * class ElhancedTree(): def __init__(self, tree): self.leafs = set() self.children = set() self.tree = tree self._reference = list(self.tree.iter()) self.addDepth(self._reference[0]) def addDepth(self, node, depth = 0): tc = node.text_content() node.lenchars = len(tc) if tc else 0 node.depth = depth node.isLeaf = False for n in node.iterchildren(): if bool(list(n.iterchildren())): self.addDepth(n, depth + 1) else: self.leafs.add(n) tc = n.text_content() n.lenchars = len(tc) if tc else 0 n.isLeaf = True def view(self, *args): lxmlTree(*args) def viewPage(self): with tempfile.NamedTemporaryFile('r+', suffix = '.html') as f: f.write(lxml.html.tostring(self.tree).decode('utf8')) f.flush() webbrowser.open('file://' + f.name) time.sleep(1) def compare(self, other): if isinstance(other, ElhancedTree): other = other.tree lxmlTree([self.tree, other]) # e1 = ElhancedTree(getQuickTree('http://www.nieuwsdumper.nl/nieuws/1454/eerste-volvo-fmx-410-8x4-tridem-betonmixer-voor-bck.html')) # e2 = ElhancedTree(getQuickTree('http://www.nieuwsdumper.nl/nieuws/1666/ihi-38n-voor-van-zuijlen.html')) # e1 = ElhancedTree(getQuickTree('http://www.bbc.com/news/world-africa-33049312')) # e2 = ElhancedTree(getQuickTree('http://www.bbc.com/news/business-28978881')) lxmlTree([e1.tree, prune_first(e1.tree, e2.tree), e2.tree]) for i in range(1000): #body = prune_first(e1.tree, e2.tree) body = get_first_body(e2.tree, e1.tree) print(get_first_body(e2.tree, e1.tree)) viewNode(e1a.tree) def add_multi_body(args) for x in prune_first(e1.tree, e2.tree).iter(): print(x.tag, x.attrib, x.text) view_diff(e2.tree, prune_first(e2.tree, e1.tree), url = 'http://www.bbc.com/') view_node(prune_first(e2.tree, e1.tree), url = 'http://www.bbc.com/')
1,131
737
<reponame>etnrlz/rtbkit /* augmentor_base.cc <NAME>, 4 March 2012 Copyright (c) 2012 Datacratic. All rights reserved. Object that handles doing augmented bid requests. */ #include "augmentor_base.h" #include "soa/service/zmq_utils.h" #include "jml/arch/timers.h" #include "jml/utils/vector_utils.h" #include "jml/arch/futex.h" #include <memory> using namespace std; using namespace ML; namespace RTBKIT { /*****************************************************************************/ /* AUGMENTOR */ /*****************************************************************************/ // Determined via a very scientific method: 2^16 should be enough... right? // \todo Need to make this queue way smaller so that we don't build up a backlog // of requests. enum { QueueSize = 65536 }; Augmentor:: Augmentor(const std::string & augmentorName, const std::string & serviceName, std::shared_ptr<ServiceProxies> proxies) : ServiceBase(serviceName, proxies), augmentorName(augmentorName), toRouters(getZmqContext()), responseQueue(QueueSize), requestQueue(QueueSize), loopMonitor(*this), loadStabilizer(loopMonitor) { } Augmentor:: Augmentor(const std::string & augmentorName, const std::string & serviceName, ServiceBase& parent) : ServiceBase(serviceName, parent), augmentorName(augmentorName), toRouters(getZmqContext()), responseQueue(QueueSize), requestQueue(QueueSize), loopMonitor(*this), loadStabilizer(loopMonitor) { } Augmentor:: ~Augmentor() { shutdown(); } void Augmentor:: init(int numThreads) { responseQueue.onEvent = [=] (const Response& resp) { const AugmentationRequest& request = resp.first; const AugmentationList& response = resp.second; toRouters.sendMessage( request.router, "RESPONSE", "1.0", request.startTime, request.id.toString(), request.augmentor, chomp(response.toJson().toString())); recordHit("messages.RESPONSE"); }; addSource("Augmentor::responseQueue", responseQueue); toRouters.init(getServices()->config, serviceName()); toRouters.connectHandler = [=] (const std::string & newRouter) { toRouters.sendMessage(newRouter, "CONFIG", "1.0", augmentorName); recordHit("messages.CONFIG"); }; toRouters.disconnectHandler = [=] (const std::string & oldRouter) { cerr << "disconnected from router " << oldRouter << endl; }; toRouters.messageHandler = [=] (const std::string & router, std::vector<std::string> message) { handleRouterMessage(router, message); }; toRouters.connectAllServiceProviders("rtbRouterAugmentation", "augmentors"); addSource("Augmentor::toRouters", toRouters); stopWorkers = false; for (size_t i = 0; i < numThreads; ++i) workers.create_thread([=] { this->runWorker(); }); loopMonitor.init(); loopMonitor.addMessageLoop("augmentor", this); loopMonitor.onLoadChange = [=] (double) { recordLevel(this->loadStabilizer.shedProbability(), "shedProbability"); }; addSource("Augmentor::loopMonitor", loopMonitor); } void Augmentor:: start() { MessageLoop::start(); } void Augmentor:: shutdown() { stopWorkers = true; workers.join_all(); MessageLoop::shutdown(); toRouters.shutdown(); } void Augmentor:: respond(const AugmentationRequest & request, const AugmentationList & response) { if (responseQueue.tryPush(make_pair(request, response))) return; cerr << "Dropping augmentation response: response queue is full" << endl; } void Augmentor:: parseMessage(AugmentationRequest& request, Message& message) { const string & version = message.second.at(1); ExcCheckEqual(version, "1.0", "unexpected version in augment"); request.router = message.first; request.timeAvailableMs = 0.05; request.augmentor = std::move(message.second.at(2)); request.id = Id(std::move(message.second.at(3))); const string & brSource = std::move(message.second.at(4)); const string & brStr = std::move(message.second.at(5)); request.bidRequest.reset(BidRequest::parse(brSource, brStr)); istringstream agentsStr(message.second.at(6)); ML::DB::Store_Reader reader(agentsStr); reader.load(request.agents); const string & startTimeStr = message.second.at(7); request.startTime = Date::fromSecondsSinceEpoch(strtod(startTimeStr.c_str(), 0)); } void Augmentor:: handleRouterMessage(const std::string & router, std::vector<std::string> & message) { ExcCheck(handleRequest, "No request callback set"); const std::string & type = message.at(0); recordHit("messages." + type); if (type == "CONFIGOK") {} else if (type == "AUGMENT") { bool shedMessage = loadStabilizer.shedMessage(); if (!shedMessage) { Message value = make_pair(router, std::move(message)); shedMessage = !requestQueue.tryPush(std::move(value)); } if (shedMessage) { toRouters.sendMessage( router, "RESPONSE", message.at(1), // version message.at(7), // startTime message.at(3), // auctionId message.at(2), // augmentor "null"); // response recordHit("shedMessages"); } } else cerr << "unknown router message type: " << type << endl; } void Augmentor:: runWorker() { AugmentationRequest request; Message message; while(!stopWorkers) { if (!requestQueue.tryPop(message, 1.0)) continue; try { parseMessage(request, message); } catch (const std::exception& ex) { cerr << "error while parsing message: " << message << " -> " << ex.what() << endl; continue; } handleRequest(request); } } } // namespace RTBKIT
2,668
852
import FWCore.ParameterSet.Config as cms from RecoVertex.BeamSpotProducer.BeamSpot_cfi import * from RecoVertex.BeamSpotProducer.offlineBeamSpotToCUDA_cfi import offlineBeamSpotToCUDA offlineBeamSpotTask = cms.Task(offlineBeamSpot) from Configuration.ProcessModifiers.gpu_cff import gpu _offlineBeamSpotTask_gpu = offlineBeamSpotTask.copy() _offlineBeamSpotTask_gpu.add(offlineBeamSpotToCUDA) gpu.toReplaceWith(offlineBeamSpotTask, _offlineBeamSpotTask_gpu)
168
521
/* $Xorg: PsDef.h,v 1.4 2001/02/09 02:04:36 xorgcvs Exp $ */ /* Copyright 1996, 1998 The Open Group Permission to use, copy, modify, distribute, and sell this software and its documentation for any purpose is hereby granted without fee, provided that the above copyright notice appear in all copies and that both that copyright notice and this permission notice appear in supporting documentation. The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE OPEN GROUP BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Except as contained in this notice, the name of The Open Group shall not be used in advertising or otherwise to promote the sale, use or other dealings in this Software without prior written authorization from The Open Group. */ /* * (c) Copyright 1996 Hewlett-Packard Company * (c) Copyright 1996 International Business Machines Corp. * (c) Copyright 1996 Sun Microsystems, Inc. * (c) Copyright 1996 Novell, Inc. * (c) Copyright 1996 Digital Equipment Corp. * (c) Copyright 1996 Fujitsu Limited * (c) Copyright 1996 Hitachi, Ltd. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject * to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Except as contained in this notice, the names of the copyright holders * shall not be used in advertising or otherwise to promote the sale, use * or other dealings in this Software without prior written authorization * from said copyright holders. */ /******************************************************************* ** ** ********************************************************* ** * ** * File: PsDef.h ** * ** * Contents: extran defines and includes for the Ps driver ** * for a printing X server. ** * ** * Created By: <NAME> (Liberty Systems) ** * ** * Copyright: Copyright 1996 The Open Group, Inc. ** * ** ********************************************************* ** ********************************************************************/ #ifdef HAVE_DIX_CONFIG_H #include <dix-config.h> #endif #ifndef _PSDEF_H_ #define _PSDEF_H_ #define DT_PRINT_JOB_HEADER "DT_PRINT_JOB_HEADER" #define DT_PRINT_JOB_TRAILER "DT_PRINT_JOB_TRAILER" #define DT_PRINT_JOB_COMMAND "DT_PRINT_JOB_COMMAND" #define DT_PRINT_JOB_EXEC_COMMAND "DT_PRINT_JOB_EXEC_COMMAND" #define DT_PRINT_JOB_EXEC_OPTIONS "DT_PRINT_JOB_EXEC_OPTION" #define DT_PRINT_PAGE_HEADER "DT_PRINT_PAGE_HEADER" #define DT_PRINT_PAGE_TRAILER "DT_PRINT_PAGE_TRAILER" #define DT_PRINT_PAGE_COMMAND "DT_PRINT_PAGE_COMMAND" #define DT_IN_FILE_STRING "%(InFile)%" #define DT_OUT_FILE_STRING "%(OutFile)%" #define DT_ALLOWED_COMMANDS_FILE "printCommands" #endif /* _PSDEF_H_ */
1,224
1,845
<filename>rajawali/src/main/java/org/rajawali3d/materials/plugins/PosableMaterialPlugin.java package org.rajawali3d.materials.plugins; import android.opengl.GLES20; import androidx.annotation.FloatRange; import org.rajawali3d.Geometry3D; import org.rajawali3d.materials.Material; import org.rajawali3d.materials.plugins.IMaterialPlugin; import org.rajawali3d.materials.shaders.AShader; import org.rajawali3d.materials.shaders.AShaderBase; import org.rajawali3d.materials.shaders.IShaderFragment; import static org.rajawali3d.math.MathUtil.clamp; import java.nio.FloatBuffer; public class PosableMaterialPlugin implements IMaterialPlugin { PosingVertexShaderFragment mVertexShader; public enum PosingShaderVar implements AShaderBase.IGlobalShaderVar { U_POSE_INTERPOLATION("uPoseInterpolation", AShaderBase.DataType.FLOAT), A_POSE_POSITION("aPosePosition", AShaderBase.DataType.VEC3), A_POSE_NORMAL("aPoseNormal", AShaderBase.DataType.VEC3); private String mVarString; private AShaderBase.DataType mDataType; PosingShaderVar(String varString, AShaderBase.DataType dataType) { mVarString = varString; mDataType = dataType; } public String getVarString() { return mVarString; } public AShaderBase.DataType getDataType() { return mDataType; } } public PosableMaterialPlugin(Geometry3D geometry) { mVertexShader = new PosingVertexShaderFragment(geometry); } public void setInterpolation(@FloatRange(from = 0.0d, to = 1.0d) double interpolation) { mVertexShader.setInterpolation((float)clamp(interpolation, 0, 1)); } @Override public Material.PluginInsertLocation getInsertLocation() { return Material.PluginInsertLocation.PRE_TRANSFORM; } @Override public IShaderFragment getVertexShaderFragment() { return mVertexShader; } @Override public IShaderFragment getFragmentShaderFragment() { return null; } @Override public void bindTextures(int i) { mVertexShader.bindTextures(i); } @Override public void unbindTextures() { } class PosingVertexShaderFragment extends AShader implements IShaderFragment { static final String SHADER_ID = "POSING_VERTEX_SHADER_FRAGMENT"; FloatBuffer mVertices; FloatBuffer mNormals; RVec3 maPosePosition; FloatBuffer maPosePositionBuffer; int maPosePositionBufferHandle; int maPosePositionHandle; RVec3 maPoseNormal; FloatBuffer maPoseNormalBuffer; int maPoseNormalBufferHandle; int maPoseNormalHandle; RFloat muInterpolation; float mInterpolation = 0; int muInterpolationHandle; public PosingVertexShaderFragment(Geometry3D geometry) { super(ShaderType.VERTEX_SHADER_FRAGMENT); initialize(geometry); } @Override public String getShaderId() { return SHADER_ID; } public void initialize(Geometry3D geometry) { super.initialize(); mInterpolation = 0; muInterpolation = (RFloat) addUniform(PosingShaderVar.U_POSE_INTERPOLATION); maPosePositionBuffer = geometry.getVertices(); maPosePosition = (RVec3) addAttribute(PosingShaderVar.A_POSE_POSITION); maPosePositionBuffer.compact().position(0); maPoseNormalBuffer = geometry.getNormals(); maPoseNormal = (RVec3) addAttribute(PosingShaderVar.A_POSE_NORMAL); maPoseNormalBuffer.compact().position(0); } public void setInterpolation(float interpolation) { if(interpolation<0) interpolation=0; if(interpolation>1) interpolation=1; mInterpolation = interpolation; } @Override public Material.PluginInsertLocation getInsertLocation() { return null; } @Override public void applyParams() { super.applyParams(); GLES20.glUniform1f(muInterpolationHandle, mInterpolation); } @Override public void setLocations(int programHandle) { super.setLocations(programHandle); muInterpolationHandle = getUniformLocation(programHandle, PosingShaderVar.U_POSE_INTERPOLATION); int buff[] = new int[2]; GLES20.glGenBuffers(2, buff, 0); maPosePositionBufferHandle = buff[0]; maPoseNormalBufferHandle = buff[1]; status = GLES20.glGetError(); GLES20.glBindAttribLocation(programHandle, maPosePositionBufferHandle, PosingShaderVar.A_POSE_POSITION.getVarString()); GLES20.glBindAttribLocation(programHandle, maPoseNormalBufferHandle, PosingShaderVar.A_POSE_NORMAL.getVarString()); status = GLES20.glGetError(); maPosePositionHandle = GLES20.glGetAttribLocation(programHandle, PosingShaderVar.A_POSE_POSITION.getVarString()); maPoseNormalHandle = GLES20.glGetAttribLocation(programHandle, PosingShaderVar.A_POSE_NORMAL.getVarString()); status = GLES20.glGetError(); } int status; @Override public void bindTextures(int i) { if(maPosePositionHandle > 0) { GLES20.glBindBuffer(GLES20.GL_ARRAY_BUFFER, maPosePositionBufferHandle); GLES20.glBufferData(GLES20.GL_ARRAY_BUFFER, maPosePositionBuffer.capacity() * Geometry3D.FLOAT_SIZE_BYTES, maPosePositionBuffer, GLES20.GL_STATIC_DRAW); GLES20.glVertexAttribPointer(maPosePositionHandle, 3, GLES20.GL_FLOAT, false, 0, 0); GLES20.glEnableVertexAttribArray(maPosePositionHandle); status = GLES20.glGetError(); } if(maPoseNormalHandle > 0) { GLES20.glBindBuffer(GLES20.GL_ARRAY_BUFFER, maPoseNormalBufferHandle); GLES20.glBufferData(GLES20.GL_ARRAY_BUFFER, maPoseNormalBuffer.capacity() * Geometry3D.FLOAT_SIZE_BYTES, maPoseNormalBuffer, GLES20.GL_STATIC_DRAW); GLES20.glVertexAttribPointer(maPoseNormalHandle, 3, GLES20.GL_FLOAT, false, 0, 0); GLES20.glEnableVertexAttribArray(maPoseNormalHandle); status = GLES20.glGetError(); } } @Override public void unbindTextures() { } @Override public void main() { RVec4 position = (RVec4)getGlobal(DefaultShaderVar.G_POSITION); RVec3 normal = (RVec3)getGlobal(DefaultShaderVar.G_NORMAL); RVec4 aPosition = (RVec4)getGlobal(DefaultShaderVar.A_POSITION); RVec3 aNormal = (RVec3)getGlobal(DefaultShaderVar.A_NORMAL); position.assign(mix(aPosition, castVec4(maPosePosition, 1), muInterpolation)); normal.assign(mix(aNormal, maPoseNormal, muInterpolation)); } } }
3,138
984
<reponame>om-sharma/java-driver<gh_stars>100-1000 /* * Copyright DataStax, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.datastax.dse.driver.internal.core.data.geometry; import com.datastax.dse.driver.api.core.data.geometry.Point; import com.esri.core.geometry.ogc.OGCPoint; import edu.umd.cs.findbugs.annotations.NonNull; import net.jcip.annotations.Immutable; @Immutable public class DefaultPoint extends DefaultGeometry implements Point { private static final long serialVersionUID = -8337622213980781285L; public DefaultPoint(double x, double y) { this( new OGCPoint( new com.esri.core.geometry.Point(x, y), DefaultGeometry.SPATIAL_REFERENCE_4326)); } public DefaultPoint(@NonNull OGCPoint point) { super(point); } @NonNull @Override public OGCPoint getOgcGeometry() { return (OGCPoint) super.getOgcGeometry(); } @Override public double X() { return getOgcGeometry().X(); } @Override public double Y() { return getOgcGeometry().Y(); } /** * This object gets replaced by an internal proxy for serialization. * * @serialData a single byte array containing the Well-Known Binary representation. */ private Object writeReplace() { return new WkbSerializationProxy(this.asWellKnownBinary()); } }
591
382
<gh_stars>100-1000 import math from pathlib import Path import numba import numpy as np from det3d.utils.buildtools.pybind11_build import load_pb11 from numba import cuda try: from det3d.ops.nms.nms import non_max_suppression except: current_dir = Path(__file__).resolve().parents[0] load_pb11( ["./nms_kernel.cu.cc", "./nms.cc"], current_dir / "nms.so", current_dir, cuda=True, ) from det3d.ops.nms.nms import non_max_suppression @cuda.jit("(float32[:], float32[:])", device=True, inline=True) def iou_device(a, b): left = max(a[0], b[0]) right = min(a[2], b[2]) top = max(a[1], b[1]) bottom = min(a[3], b[3]) width = max(right - left + 1, 0.0) height = max(bottom - top + 1, 0.0) interS = width * height Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1) Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1) return interS / (Sa + Sb - interS) @cuda.jit("(int64, float32, float32[:, :], uint64[:])") def nms_kernel_v2(n_boxes, nms_overlap_thresh, dev_boxes, dev_mask): threadsPerBlock = 8 * 8 row_start = cuda.blockIdx.y col_start = cuda.blockIdx.x tx = cuda.threadIdx.x row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock) col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock) block_boxes = cuda.shared.array(shape=(threadsPerBlock, 5), dtype=numba.float32) dev_box_idx = threadsPerBlock * col_start + tx if tx < col_size: block_boxes[tx, 0] = dev_boxes[dev_box_idx, 0] block_boxes[tx, 1] = dev_boxes[dev_box_idx, 1] block_boxes[tx, 2] = dev_boxes[dev_box_idx, 2] block_boxes[tx, 3] = dev_boxes[dev_box_idx, 3] block_boxes[tx, 4] = dev_boxes[dev_box_idx, 4] cuda.syncthreads() if cuda.threadIdx.x < row_size: cur_box_idx = threadsPerBlock * row_start + cuda.threadIdx.x # cur_box = dev_boxes + cur_box_idx * 5; i = 0 t = 0 start = 0 if row_start == col_start: start = tx + 1 for i in range(start, col_size): if iou_device(dev_boxes[cur_box_idx], block_boxes[i]) > nms_overlap_thresh: t |= 1 << i col_blocks = (n_boxes) // (threadsPerBlock) + ( (n_boxes) % (threadsPerBlock) > 0 ) dev_mask[cur_box_idx * col_blocks + col_start] = t @cuda.jit("(int64, float32, float32[:], uint64[:])") def nms_kernel(n_boxes, nms_overlap_thresh, dev_boxes, dev_mask): threadsPerBlock = 8 * 8 row_start = cuda.blockIdx.y col_start = cuda.blockIdx.x tx = cuda.threadIdx.x row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock) col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock) block_boxes = cuda.shared.array(shape=(64 * 5,), dtype=numba.float32) dev_box_idx = threadsPerBlock * col_start + tx if tx < col_size: block_boxes[tx * 5 + 0] = dev_boxes[dev_box_idx * 5 + 0] block_boxes[tx * 5 + 1] = dev_boxes[dev_box_idx * 5 + 1] block_boxes[tx * 5 + 2] = dev_boxes[dev_box_idx * 5 + 2] block_boxes[tx * 5 + 3] = dev_boxes[dev_box_idx * 5 + 3] block_boxes[tx * 5 + 4] = dev_boxes[dev_box_idx * 5 + 4] cuda.syncthreads() if tx < row_size: cur_box_idx = threadsPerBlock * row_start + tx # cur_box = dev_boxes + cur_box_idx * 5; t = 0 start = 0 if row_start == col_start: start = tx + 1 for i in range(start, col_size): iou = iou_device( dev_boxes[cur_box_idx * 5 : cur_box_idx * 5 + 4], block_boxes[i * 5 : i * 5 + 4], ) if iou > nms_overlap_thresh: t |= 1 << i col_blocks = (n_boxes) // (threadsPerBlock) + ( (n_boxes) % (threadsPerBlock) > 0 ) dev_mask[cur_box_idx * col_blocks + col_start] = t @numba.jit(nopython=True) def div_up(m, n): return m // n + (m % n > 0) @numba.jit(nopython=True) def nms_postprocess(keep_out, mask_host, boxes_num): threadsPerBlock = 8 * 8 col_blocks = div_up(boxes_num, threadsPerBlock) remv = np.zeros((col_blocks), dtype=np.uint64) num_to_keep = 0 for i in range(boxes_num): nblock = i // threadsPerBlock inblock = i % threadsPerBlock mask = np.array(1 << inblock, dtype=np.uint64) if not (remv[nblock] & mask): keep_out[num_to_keep] = i num_to_keep += 1 # unsigned long long *p = &mask_host[0] + i * col_blocks; for j in range(nblock, col_blocks): remv[j] |= mask_host[i * col_blocks + j] # remv[j] |= p[j]; return num_to_keep def nms_gpu(dets, nms_overlap_thresh, device_id=0): """nms in gpu. Args: dets ([type]): [description] nms_overlap_thresh ([type]): [description] device_id ([type], optional): Defaults to 0. [description] Returns: [type]: [description] """ boxes_num = dets.shape[0] keep_out = np.zeros([boxes_num], dtype=np.int32) scores = dets[:, 4] order = scores.argsort()[::-1].astype(np.int32) boxes_host = dets[order, :] threadsPerBlock = 8 * 8 col_blocks = div_up(boxes_num, threadsPerBlock) cuda.select_device(device_id) mask_host = np.zeros((boxes_num * col_blocks,), dtype=np.uint64) blockspergrid = ( div_up(boxes_num, threadsPerBlock), div_up(boxes_num, threadsPerBlock), ) stream = cuda.stream() with stream.auto_synchronize(): boxes_dev = cuda.to_device(boxes_host.reshape([-1]), stream) mask_dev = cuda.to_device(mask_host, stream) nms_kernel[blockspergrid, threadsPerBlock, stream]( boxes_num, nms_overlap_thresh, boxes_dev, mask_dev ) mask_dev.copy_to_host(mask_host, stream=stream) # stream.synchronize() num_out = nms_postprocess(keep_out, mask_host, boxes_num) keep = keep_out[:num_out] return list(order[keep]) def nms_gpu_cc(dets, nms_overlap_thresh, device_id=0): boxes_num = dets.shape[0] keep = np.zeros(boxes_num, dtype=np.int32) scores = dets[:, 4] order = scores.argsort()[::-1].astype(np.int32) sorted_dets = dets[order, :] num_out = non_max_suppression(sorted_dets, keep, nms_overlap_thresh, device_id) keep = keep[:num_out] return list(order[keep]) @cuda.jit("(float32[:], float32[:], float32[:])", device=True, inline=True) def trangle_area(a, b, c): return ((a[0] - c[0]) * (b[1] - c[1]) - (a[1] - c[1]) * (b[0] - c[0])) / 2.0 @cuda.jit("(float32[:], int32)", device=True, inline=True) def area(int_pts, num_of_inter): area_val = 0.0 for i in range(num_of_inter - 2): area_val += abs( trangle_area( int_pts[:2], int_pts[2 * i + 2 : 2 * i + 4], int_pts[2 * i + 4 : 2 * i + 6], ) ) return area_val @cuda.jit("(float32[:], int32)", device=True, inline=True) def sort_vertex_in_convex_polygon(int_pts, num_of_inter): if num_of_inter > 0: center = cuda.local.array((2,), dtype=numba.float32) center[:] = 0.0 for i in range(num_of_inter): center[0] += int_pts[2 * i] center[1] += int_pts[2 * i + 1] center[0] /= num_of_inter center[1] /= num_of_inter v = cuda.local.array((2,), dtype=numba.float32) vs = cuda.local.array((16,), dtype=numba.float32) for i in range(num_of_inter): v[0] = int_pts[2 * i] - center[0] v[1] = int_pts[2 * i + 1] - center[1] d = math.sqrt(v[0] * v[0] + v[1] * v[1]) v[0] = v[0] / d v[1] = v[1] / d if v[1] < 0: v[0] = -2 - v[0] vs[i] = v[0] j = 0 temp = 0 for i in range(1, num_of_inter): if vs[i - 1] > vs[i]: temp = vs[i] tx = int_pts[2 * i] ty = int_pts[2 * i + 1] j = i while j > 0 and vs[j - 1] > temp: vs[j] = vs[j - 1] int_pts[j * 2] = int_pts[j * 2 - 2] int_pts[j * 2 + 1] = int_pts[j * 2 - 1] j -= 1 vs[j] = temp int_pts[j * 2] = tx int_pts[j * 2 + 1] = ty @cuda.jit( "(float32[:], float32[:], int32, int32, float32[:])", device=True, inline=True ) def line_segment_intersection(pts1, pts2, i, j, temp_pts): A = cuda.local.array((2,), dtype=numba.float32) B = cuda.local.array((2,), dtype=numba.float32) C = cuda.local.array((2,), dtype=numba.float32) D = cuda.local.array((2,), dtype=numba.float32) A[0] = pts1[2 * i] A[1] = pts1[2 * i + 1] B[0] = pts1[2 * ((i + 1) % 4)] B[1] = pts1[2 * ((i + 1) % 4) + 1] C[0] = pts2[2 * j] C[1] = pts2[2 * j + 1] D[0] = pts2[2 * ((j + 1) % 4)] D[1] = pts2[2 * ((j + 1) % 4) + 1] BA0 = B[0] - A[0] BA1 = B[1] - A[1] DA0 = D[0] - A[0] CA0 = C[0] - A[0] DA1 = D[1] - A[1] CA1 = C[1] - A[1] acd = DA1 * CA0 > CA1 * DA0 bcd = (D[1] - B[1]) * (C[0] - B[0]) > (C[1] - B[1]) * (D[0] - B[0]) if acd != bcd: abc = CA1 * BA0 > BA1 * CA0 abd = DA1 * BA0 > BA1 * DA0 if abc != abd: DC0 = D[0] - C[0] DC1 = D[1] - C[1] ABBA = A[0] * B[1] - B[0] * A[1] CDDC = C[0] * D[1] - D[0] * C[1] DH = BA1 * DC0 - BA0 * DC1 Dx = ABBA * DC0 - BA0 * CDDC Dy = ABBA * DC1 - BA1 * CDDC temp_pts[0] = Dx / DH temp_pts[1] = Dy / DH return True return False @cuda.jit( "(float32[:], float32[:], int32, int32, float32[:])", device=True, inline=True ) def line_segment_intersection_v1(pts1, pts2, i, j, temp_pts): a = cuda.local.array((2,), dtype=numba.float32) b = cuda.local.array((2,), dtype=numba.float32) c = cuda.local.array((2,), dtype=numba.float32) d = cuda.local.array((2,), dtype=numba.float32) a[0] = pts1[2 * i] a[1] = pts1[2 * i + 1] b[0] = pts1[2 * ((i + 1) % 4)] b[1] = pts1[2 * ((i + 1) % 4) + 1] c[0] = pts2[2 * j] c[1] = pts2[2 * j + 1] d[0] = pts2[2 * ((j + 1) % 4)] d[1] = pts2[2 * ((j + 1) % 4) + 1] area_abc = trangle_area(a, b, c) area_abd = trangle_area(a, b, d) if area_abc * area_abd >= 0: return False area_cda = trangle_area(c, d, a) area_cdb = area_cda + area_abc - area_abd if area_cda * area_cdb >= 0: return False t = area_cda / (area_abd - area_abc) dx = t * (b[0] - a[0]) dy = t * (b[1] - a[1]) temp_pts[0] = a[0] + dx temp_pts[1] = a[1] + dy return True @cuda.jit("(float32, float32, float32[:])", device=True, inline=True) def point_in_quadrilateral(pt_x, pt_y, corners): ab0 = corners[2] - corners[0] ab1 = corners[3] - corners[1] ad0 = corners[6] - corners[0] ad1 = corners[7] - corners[1] ap0 = pt_x - corners[0] ap1 = pt_y - corners[1] abab = ab0 * ab0 + ab1 * ab1 abap = ab0 * ap0 + ab1 * ap1 adad = ad0 * ad0 + ad1 * ad1 adap = ad0 * ap0 + ad1 * ap1 return abab >= abap and abap >= 0 and adad >= adap and adap >= 0 @cuda.jit("(float32[:], float32[:], float32[:])", device=True, inline=True) def quadrilateral_intersection(pts1, pts2, int_pts): num_of_inter = 0 for i in range(4): if point_in_quadrilateral(pts1[2 * i], pts1[2 * i + 1], pts2): int_pts[num_of_inter * 2] = pts1[2 * i] int_pts[num_of_inter * 2 + 1] = pts1[2 * i + 1] num_of_inter += 1 if point_in_quadrilateral(pts2[2 * i], pts2[2 * i + 1], pts1): int_pts[num_of_inter * 2] = pts2[2 * i] int_pts[num_of_inter * 2 + 1] = pts2[2 * i + 1] num_of_inter += 1 temp_pts = cuda.local.array((2,), dtype=numba.float32) for i in range(4): for j in range(4): has_pts = line_segment_intersection(pts1, pts2, i, j, temp_pts) if has_pts: int_pts[num_of_inter * 2] = temp_pts[0] int_pts[num_of_inter * 2 + 1] = temp_pts[1] num_of_inter += 1 return num_of_inter @cuda.jit("(float32[:], float32[:])", device=True, inline=True) def rbbox_to_corners(corners, rbbox): # generate clockwise corners and rotate it clockwise angle = rbbox[4] a_cos = math.cos(angle) a_sin = math.sin(angle) center_x = rbbox[0] center_y = rbbox[1] x_d = rbbox[2] y_d = rbbox[3] corners_x = cuda.local.array((4,), dtype=numba.float32) corners_y = cuda.local.array((4,), dtype=numba.float32) corners_x[0] = -x_d / 2 corners_x[1] = -x_d / 2 corners_x[2] = x_d / 2 corners_x[3] = x_d / 2 corners_y[0] = -y_d / 2 corners_y[1] = y_d / 2 corners_y[2] = y_d / 2 corners_y[3] = -y_d / 2 for i in range(4): corners[2 * i] = a_cos * corners_x[i] + a_sin * corners_y[i] + center_x corners[2 * i + 1] = -a_sin * corners_x[i] + a_cos * corners_y[i] + center_y @cuda.jit("(float32[:], float32[:])", device=True, inline=True) def inter(rbbox1, rbbox2): corners1 = cuda.local.array((8,), dtype=numba.float32) corners2 = cuda.local.array((8,), dtype=numba.float32) intersection_corners = cuda.local.array((16,), dtype=numba.float32) rbbox_to_corners(corners1, rbbox1) rbbox_to_corners(corners2, rbbox2) num_intersection = quadrilateral_intersection( corners1, corners2, intersection_corners ) sort_vertex_in_convex_polygon(intersection_corners, num_intersection) # print(intersection_corners.reshape([-1, 2])[:num_intersection]) return area(intersection_corners, num_intersection) @cuda.jit("(float32[:], float32[:])", device=True, inline=True) def devRotateIoU(rbox1, rbox2): area1 = rbox1[2] * rbox1[3] area2 = rbox2[2] * rbox2[3] area_inter = inter(rbox1, rbox2) return area_inter / (area1 + area2 - area_inter) @cuda.jit("(int64, float32, float32[:], uint64[:])") def rotate_nms_kernel(n_boxes, nms_overlap_thresh, dev_boxes, dev_mask): threadsPerBlock = 8 * 8 row_start = cuda.blockIdx.y col_start = cuda.blockIdx.x tx = cuda.threadIdx.x row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock) col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock) block_boxes = cuda.shared.array(shape=(64 * 6,), dtype=numba.float32) dev_box_idx = threadsPerBlock * col_start + tx if tx < col_size: block_boxes[tx * 6 + 0] = dev_boxes[dev_box_idx * 6 + 0] block_boxes[tx * 6 + 1] = dev_boxes[dev_box_idx * 6 + 1] block_boxes[tx * 6 + 2] = dev_boxes[dev_box_idx * 6 + 2] block_boxes[tx * 6 + 3] = dev_boxes[dev_box_idx * 6 + 3] block_boxes[tx * 6 + 4] = dev_boxes[dev_box_idx * 6 + 4] block_boxes[tx * 6 + 5] = dev_boxes[dev_box_idx * 6 + 5] cuda.syncthreads() if tx < row_size: cur_box_idx = threadsPerBlock * row_start + tx # cur_box = dev_boxes + cur_box_idx * 5; t = 0 start = 0 if row_start == col_start: start = tx + 1 for i in range(start, col_size): iou = devRotateIoU( dev_boxes[cur_box_idx * 6 : cur_box_idx * 6 + 5], block_boxes[i * 6 : i * 6 + 5], ) # print('iou', iou, cur_box_idx, i) if iou > nms_overlap_thresh: t |= 1 << i col_blocks = (n_boxes) // (threadsPerBlock) + ( (n_boxes) % (threadsPerBlock) > 0 ) dev_mask[cur_box_idx * col_blocks + col_start] = t def rotate_nms_gpu(dets, nms_overlap_thresh, device_id=0): """nms in gpu. WARNING: this function can provide right result but its performance isn't be tested Args: dets ([type]): [description] nms_overlap_thresh ([type]): [description] device_id ([type], optional): Defaults to 0. [description] Returns: [type]: [description] """ dets = dets.astype(np.float32) boxes_num = dets.shape[0] keep_out = np.zeros([boxes_num], dtype=np.int32) scores = dets[:, 5] order = scores.argsort()[::-1].astype(np.int32) boxes_host = dets[order, :] threadsPerBlock = 8 * 8 col_blocks = div_up(boxes_num, threadsPerBlock) cuda.select_device(device_id) # mask_host shape: boxes_num * col_blocks * sizeof(np.uint64) mask_host = np.zeros((boxes_num * col_blocks,), dtype=np.uint64) blockspergrid = ( div_up(boxes_num, threadsPerBlock), div_up(boxes_num, threadsPerBlock), ) stream = cuda.stream() with stream.auto_synchronize(): boxes_dev = cuda.to_device(boxes_host.reshape([-1]), stream) mask_dev = cuda.to_device(mask_host, stream) rotate_nms_kernel[blockspergrid, threadsPerBlock, stream]( boxes_num, nms_overlap_thresh, boxes_dev, mask_dev ) mask_dev.copy_to_host(mask_host, stream=stream) num_out = nms_postprocess(keep_out, mask_host, boxes_num) keep = keep_out[:num_out] return list(order[keep]) @cuda.jit("(int64, int64, float32[:], float32[:], float32[:])", fastmath=False) def rotate_iou_kernel(N, K, dev_boxes, dev_query_boxes, dev_iou): threadsPerBlock = 8 * 8 row_start = cuda.blockIdx.x col_start = cuda.blockIdx.y tx = cuda.threadIdx.x row_size = min(N - row_start * threadsPerBlock, threadsPerBlock) col_size = min(K - col_start * threadsPerBlock, threadsPerBlock) block_boxes = cuda.shared.array(shape=(64 * 5,), dtype=numba.float32) block_qboxes = cuda.shared.array(shape=(64 * 5,), dtype=numba.float32) dev_query_box_idx = threadsPerBlock * col_start + tx dev_box_idx = threadsPerBlock * row_start + tx if tx < col_size: block_qboxes[tx * 5 + 0] = dev_query_boxes[dev_query_box_idx * 5 + 0] block_qboxes[tx * 5 + 1] = dev_query_boxes[dev_query_box_idx * 5 + 1] block_qboxes[tx * 5 + 2] = dev_query_boxes[dev_query_box_idx * 5 + 2] block_qboxes[tx * 5 + 3] = dev_query_boxes[dev_query_box_idx * 5 + 3] block_qboxes[tx * 5 + 4] = dev_query_boxes[dev_query_box_idx * 5 + 4] if tx < row_size: block_boxes[tx * 5 + 0] = dev_boxes[dev_box_idx * 5 + 0] block_boxes[tx * 5 + 1] = dev_boxes[dev_box_idx * 5 + 1] block_boxes[tx * 5 + 2] = dev_boxes[dev_box_idx * 5 + 2] block_boxes[tx * 5 + 3] = dev_boxes[dev_box_idx * 5 + 3] block_boxes[tx * 5 + 4] = dev_boxes[dev_box_idx * 5 + 4] cuda.syncthreads() if tx < row_size: for i in range(col_size): offset = ( row_start * threadsPerBlock * K + col_start * threadsPerBlock + tx * K + i ) dev_iou[offset] = devRotateIoU( block_qboxes[i * 5 : i * 5 + 5], block_boxes[tx * 5 : tx * 5 + 5] ) def rotate_iou_gpu(boxes, query_boxes, device_id=0): """rotated box iou running in gpu. 500x faster than cpu version (take 5ms in one example with numba.cuda code). convert from [this project]( https://github.com/hongzhenwang/RRPN-revise/tree/master/lib/rotation). Args: boxes (float tensor: [N, 5]): rbboxes. format: centers, dims, angles(clockwise when positive) query_boxes (float tensor: [K, 5]): [description] device_id (int, optional): Defaults to 0. [description] Returns: [type]: [description] """ box_dtype = boxes.dtype boxes = boxes.astype(np.float32) query_boxes = query_boxes.astype(np.float32) N = boxes.shape[0] K = query_boxes.shape[0] iou = np.zeros((N, K), dtype=np.float32) if N == 0 or K == 0: return iou threadsPerBlock = 8 * 8 cuda.select_device(device_id) blockspergrid = (div_up(N, threadsPerBlock), div_up(K, threadsPerBlock)) stream = cuda.stream() with stream.auto_synchronize(): boxes_dev = cuda.to_device(boxes.reshape([-1]), stream) query_boxes_dev = cuda.to_device(query_boxes.reshape([-1]), stream) iou_dev = cuda.to_device(iou.reshape([-1]), stream) rotate_iou_kernel[blockspergrid, threadsPerBlock, stream]( N, K, boxes_dev, query_boxes_dev, iou_dev ) iou_dev.copy_to_host(iou.reshape([-1]), stream=stream) return iou.astype(boxes.dtype) @cuda.jit("(float32[:], float32[:], int32)", device=True, inline=True) def devRotateIoUEval(rbox1, rbox2, criterion=-1): area1 = rbox1[2] * rbox1[3] area2 = rbox2[2] * rbox2[3] area_inter = inter(rbox1, rbox2) if criterion == -1: return area_inter / (area1 + area2 - area_inter) elif criterion == 0: return area_inter / area1 elif criterion == 1: return area_inter / area2 else: return area_inter @cuda.jit("(int64, int64, float32[:], float32[:], float32[:], int32)", fastmath=False) def rotate_iou_kernel_eval(N, K, dev_boxes, dev_query_boxes, dev_iou, criterion=-1): threadsPerBlock = 8 * 8 row_start = cuda.blockIdx.x col_start = cuda.blockIdx.y tx = cuda.threadIdx.x row_size = min(N - row_start * threadsPerBlock, threadsPerBlock) col_size = min(K - col_start * threadsPerBlock, threadsPerBlock) block_boxes = cuda.shared.array(shape=(64 * 5,), dtype=numba.float32) block_qboxes = cuda.shared.array(shape=(64 * 5,), dtype=numba.float32) dev_query_box_idx = threadsPerBlock * col_start + tx dev_box_idx = threadsPerBlock * row_start + tx if tx < col_size: block_qboxes[tx * 5 + 0] = dev_query_boxes[dev_query_box_idx * 5 + 0] block_qboxes[tx * 5 + 1] = dev_query_boxes[dev_query_box_idx * 5 + 1] block_qboxes[tx * 5 + 2] = dev_query_boxes[dev_query_box_idx * 5 + 2] block_qboxes[tx * 5 + 3] = dev_query_boxes[dev_query_box_idx * 5 + 3] block_qboxes[tx * 5 + 4] = dev_query_boxes[dev_query_box_idx * 5 + 4] if tx < row_size: block_boxes[tx * 5 + 0] = dev_boxes[dev_box_idx * 5 + 0] block_boxes[tx * 5 + 1] = dev_boxes[dev_box_idx * 5 + 1] block_boxes[tx * 5 + 2] = dev_boxes[dev_box_idx * 5 + 2] block_boxes[tx * 5 + 3] = dev_boxes[dev_box_idx * 5 + 3] block_boxes[tx * 5 + 4] = dev_boxes[dev_box_idx * 5 + 4] cuda.syncthreads() if tx < row_size: for i in range(col_size): offset = ( row_start * threadsPerBlock * K + col_start * threadsPerBlock + tx * K + i ) dev_iou[offset] = devRotateIoUEval( block_qboxes[i * 5 : i * 5 + 5], block_boxes[tx * 5 : tx * 5 + 5], criterion, ) def rotate_iou_gpu_eval(boxes, query_boxes, criterion=-1, device_id=0): """rotated box iou running in gpu. 500x faster than cpu version (take 5ms in one example with numba.cuda code). convert from [this project]( https://github.com/hongzhenwang/RRPN-revise/tree/master/lib/rotation). Args: boxes (float tensor: [N, 5]): rbboxes. format: centers, dims, angles(clockwise when positive) query_boxes (float tensor: [K, 5]): [description] device_id (int, optional): Defaults to 0. [description] Returns: [type]: [description] """ box_dtype = boxes.dtype boxes = boxes.astype(np.float32) query_boxes = query_boxes.astype(np.float32) N = boxes.shape[0] K = query_boxes.shape[0] iou = np.zeros((N, K), dtype=np.float32) if N == 0 or K == 0: return iou threadsPerBlock = 8 * 8 cuda.select_device(device_id) blockspergrid = (div_up(N, threadsPerBlock), div_up(K, threadsPerBlock)) stream = cuda.stream() with stream.auto_synchronize(): boxes_dev = cuda.to_device(boxes.reshape([-1]), stream) query_boxes_dev = cuda.to_device(query_boxes.reshape([-1]), stream) iou_dev = cuda.to_device(iou.reshape([-1]), stream) rotate_iou_kernel_eval[blockspergrid, threadsPerBlock, stream]( N, K, boxes_dev, query_boxes_dev, iou_dev, criterion ) iou_dev.copy_to_host(iou.reshape([-1]), stream=stream) return iou.astype(boxes.dtype)
12,250
362
// Copyright (c) 2018-2020, <NAME>. For more information see 'LICENSE' #pragma once #ifdef FG_ENABLE_VULKAN # include "framework/VR/IVRDevice.h" # include "framework/Window/IWindow.h" # include "framework/Vulkan/VulkanSwapchain.h" namespace FGC { // // VR Device Emulator // class VRDeviceEmulator final : public IVRDevice, public VulkanDeviceFn { // types private: using Listeners_t = HashSet< IVRDeviceEventListener *>; using TimePoint_t = std::chrono::high_resolution_clock::time_point; using SecondsF = std::chrono::duration< float >; struct PerQueue { static constexpr uint MaxFrames = 8; using CmdBuffers_t = StaticArray< VkCommandBuffer, MaxFrames >; using Fences_t = StaticArray< VkFence, MaxFrames >; using Semaphores_t = StaticArray< VkSemaphore, MaxFrames >; VkCommandPool cmdPool = VK_NULL_HANDLE; CmdBuffers_t cmdBuffers; Fences_t fences; Semaphores_t waitSemaphores; Semaphores_t signalSemaphores; uint frame = 0; }; struct HandController { float2 dpad; float2 dpadDelta; bool dpadChanged = false; }; struct ControllerEmulator { HandController left, right; }; class WindowEventListener final : public IWindowEventListener { // variables private: ControllerEmulator _controller; float2 _cameraAngle; float2 _lastMousePos; bool _mousePressed = false; const float _mouseSens = 0.01f; bool _isActive = true; bool _isVisible = true; // methods public: void OnResize (const uint2 &) override; void OnRefresh () override {} void OnDestroy () override; void OnUpdate () override {} void OnKey (StringView key, EKeyAction action) override; void OnMouseMove (const float2 &pos) override; void Update (OUT Mat4_t &pose, INOUT ControllerEmulator &cont); ND_ bool IsActive () const { return _isActive; } ND_ bool IsVisible () const { return _isVisible; } }; using Queues_t = FixedArray< PerQueue, 16 >; // variables private: Listeners_t _listeners; VulkanDeviceFnTable _deviceFnTable; VRCamera _camera; WindowEventListener _wndListener; VkInstance _vkInstance; VkPhysicalDevice _vkPhysicalDevice; VkDevice _vkLogicalDevice; Queues_t _queues; VkSemaphore _lastSignal; WindowPtr _output; VulkanSwapchainPtr _swapchain; ControllerEmulator _controller; TimePoint_t _lastUpdateTime; VRControllers_t _vrControllers; EHmdStatus _hmdStatus = EHmdStatus::PowerOff; BitSet<2> _submitted; bool _isCreated; // methods public: explicit VRDeviceEmulator (WindowPtr); ~VRDeviceEmulator () override; bool Create () override; bool SetVKDevice (InstanceVk_t instance, PhysicalDeviceVk_t physicalDevice, DeviceVk_t logicalDevice) override; void Destroy () override; void AddListener (IVRDeviceEventListener *listener) override; void RemoveListener (IVRDeviceEventListener *listener) override; bool Update () override; void SetupCamera (const float2 &clipPlanes) override; bool Submit (const VRImage &, Eye) override; VRCamera const& GetCamera () const override { return _camera; } VRControllers_t const& GetControllers () const override { return _vrControllers; } EHmdStatus GetHmdStatus () const override { return _hmdStatus; } Array<String> GetRequiredInstanceExtensions () const override; Array<String> GetRequiredDeviceExtensions (InstanceVk_t) const override; uint2 GetRenderTargetDimension () const override; }; } // FGC #endif // FG_ENABLE_VULKAN
1,427
1,932
package cn.springcloud.book.gateway.filter; import org.springframework.cloud.gateway.filter.GatewayFilterChain; import org.springframework.cloud.gateway.filter.GlobalFilter; import org.springframework.core.Ordered; import org.springframework.stereotype.Component; import org.springframework.web.server.ServerWebExchange; import org.springframework.web.util.UriComponentsBuilder; import reactor.core.publisher.Mono; import java.net.URI; import static org.springframework.cloud.gateway.support.ServerWebExchangeUtils.GATEWAY_REQUEST_URL_ATTR; /** * 在LoadBalancerClientFilter执行之后将Https修改为http */ @Component public class HttpSchemeFilter implements GlobalFilter, Ordered { private static final int HTTPS_TO_HTTP_FILTER_ORDER = 10101; @Override public Mono<Void> filter(ServerWebExchange exchange, GatewayFilterChain chain) { Object uriObj = exchange.getAttributes().get(GATEWAY_REQUEST_URL_ATTR); if (uriObj != null) { URI uri = (URI) uriObj; uri = this.upgradeConnection(uri, "http"); exchange.getAttributes().put(GATEWAY_REQUEST_URL_ATTR, uri); } return chain.filter(exchange); } private URI upgradeConnection(URI uri, String scheme) { UriComponentsBuilder uriComponentsBuilder = UriComponentsBuilder.fromUri(uri).scheme(scheme); if (uri.getRawQuery() != null) { // When building the URI, UriComponentsBuilder verify the allowed characters and does not // support the '+' so we replace it for its equivalent '%20'. // See issue https://jira.spring.io/browse/SPR-10172 uriComponentsBuilder.replaceQuery(uri.getRawQuery().replace("+", "%20")); } return uriComponentsBuilder.build(true).toUri(); } /** * 由于LoadBalancerClientFilter的order是10100, * 所以设置HttpSchemeFilter的的order是10101, * 在LoadBalancerClientFilter之后将https修改为http * @return */ @Override public int getOrder() { return HTTPS_TO_HTTP_FILTER_ORDER; } }
809
333
<gh_stars>100-1000 module com.fasterxml.jackson.dataformat.toml { requires com.fasterxml.jackson.core; requires com.fasterxml.jackson.databind; exports com.fasterxml.jackson.dataformat.toml; provides com.fasterxml.jackson.core.TokenStreamFactory with com.fasterxml.jackson.dataformat.toml.TomlFactory; provides com.fasterxml.jackson.databind.ObjectMapper with com.fasterxml.jackson.dataformat.toml.TomlMapper; }
187
372
<filename>clients/google-api-services-compute/alpha/1.30.1/com/google/api/services/compute/model/BfdStatus.java /* * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ /* * This code was generated by https://github.com/googleapis/google-api-java-client-services/ * Modify at your own risk. */ package com.google.api.services.compute.model; /** * Next free: 15 * * <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is * transmitted over HTTP when working with the Compute Engine API. For a detailed explanation see: * <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a> * </p> * * @author Google, Inc. */ @SuppressWarnings("javadoc") public final class BfdStatus extends com.google.api.client.json.GenericJson { /** * The BFD session initialization mode for this BGP peer. If set to ACTIVE, the Cloud Router will * initiate the BFD session for this BGP peer. If set to PASSIVE, the Cloud Router will wait for * the peer router to initiate the BFD session for this BGP peer. If set to DISABLED, BFD is * disabled for this BGP peer. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.String bfdSessionInitializationMode; /** * Unix timestamp of the most recent config update. * The value may be {@code null}. */ @com.google.api.client.util.Key @com.google.api.client.json.JsonString private java.lang.Long configUpdateTimestampMicros; /** * Control packet counts for the current BFD session. * The value may be {@code null}. */ @com.google.api.client.util.Key private BfdStatusPacketCounts controlPacketCounts; /** * Inter-packet time interval statistics for control packets. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.util.List<PacketIntervals> controlPacketIntervals; /** * Echo packet counts for the current BFD session. * The value may be {@code null}. */ @com.google.api.client.util.Key private BfdStatusPacketCounts echoPacketCounts; /** * Inter-packet time interval statistics for echo packets. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.util.List<PacketIntervals> echoPacketIntervals; /** * The diagnostic code specifies the local system's reason for the last change in session state. * This allows remote systems to determine the reason that the previous session failed, for * example. These diagnostic codes are specified in section 4.1 of RFC5880 * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.String localDiagnostic; /** * The current BFD session state as seen by the transmitting system. These states are specified in * section 4.1 of RFC5880 * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.String localState; /** * Negotiated transmit interval for control packets. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.Long negotiatedLocalControlTxIntervalMs; /** * Negotiated transmit interval for echo packets. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.Long negotiatedLocalEchoTxIntervalMs; /** * The most recent Rx control packet for this BFD session. * The value may be {@code null}. */ @com.google.api.client.util.Key private BfdPacket rxPacket; /** * The most recent Tx control packet for this BFD session. * The value may be {@code null}. */ @com.google.api.client.util.Key private BfdPacket txPacket; /** * Session uptime in milliseconds. Value will be 0 if session is not up. * The value may be {@code null}. */ @com.google.api.client.util.Key @com.google.api.client.json.JsonString private java.lang.Long uptimeMs; /** * Indicates if echo mode is currently being used. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.Boolean usingEchoMode; /** * The BFD session initialization mode for this BGP peer. If set to ACTIVE, the Cloud Router will * initiate the BFD session for this BGP peer. If set to PASSIVE, the Cloud Router will wait for * the peer router to initiate the BFD session for this BGP peer. If set to DISABLED, BFD is * disabled for this BGP peer. * @return value or {@code null} for none */ public java.lang.String getBfdSessionInitializationMode() { return bfdSessionInitializationMode; } /** * The BFD session initialization mode for this BGP peer. If set to ACTIVE, the Cloud Router will * initiate the BFD session for this BGP peer. If set to PASSIVE, the Cloud Router will wait for * the peer router to initiate the BFD session for this BGP peer. If set to DISABLED, BFD is * disabled for this BGP peer. * @param bfdSessionInitializationMode bfdSessionInitializationMode or {@code null} for none */ public BfdStatus setBfdSessionInitializationMode(java.lang.String bfdSessionInitializationMode) { this.bfdSessionInitializationMode = bfdSessionInitializationMode; return this; } /** * Unix timestamp of the most recent config update. * @return value or {@code null} for none */ public java.lang.Long getConfigUpdateTimestampMicros() { return configUpdateTimestampMicros; } /** * Unix timestamp of the most recent config update. * @param configUpdateTimestampMicros configUpdateTimestampMicros or {@code null} for none */ public BfdStatus setConfigUpdateTimestampMicros(java.lang.Long configUpdateTimestampMicros) { this.configUpdateTimestampMicros = configUpdateTimestampMicros; return this; } /** * Control packet counts for the current BFD session. * @return value or {@code null} for none */ public BfdStatusPacketCounts getControlPacketCounts() { return controlPacketCounts; } /** * Control packet counts for the current BFD session. * @param controlPacketCounts controlPacketCounts or {@code null} for none */ public BfdStatus setControlPacketCounts(BfdStatusPacketCounts controlPacketCounts) { this.controlPacketCounts = controlPacketCounts; return this; } /** * Inter-packet time interval statistics for control packets. * @return value or {@code null} for none */ public java.util.List<PacketIntervals> getControlPacketIntervals() { return controlPacketIntervals; } /** * Inter-packet time interval statistics for control packets. * @param controlPacketIntervals controlPacketIntervals or {@code null} for none */ public BfdStatus setControlPacketIntervals(java.util.List<PacketIntervals> controlPacketIntervals) { this.controlPacketIntervals = controlPacketIntervals; return this; } /** * Echo packet counts for the current BFD session. * @return value or {@code null} for none */ public BfdStatusPacketCounts getEchoPacketCounts() { return echoPacketCounts; } /** * Echo packet counts for the current BFD session. * @param echoPacketCounts echoPacketCounts or {@code null} for none */ public BfdStatus setEchoPacketCounts(BfdStatusPacketCounts echoPacketCounts) { this.echoPacketCounts = echoPacketCounts; return this; } /** * Inter-packet time interval statistics for echo packets. * @return value or {@code null} for none */ public java.util.List<PacketIntervals> getEchoPacketIntervals() { return echoPacketIntervals; } /** * Inter-packet time interval statistics for echo packets. * @param echoPacketIntervals echoPacketIntervals or {@code null} for none */ public BfdStatus setEchoPacketIntervals(java.util.List<PacketIntervals> echoPacketIntervals) { this.echoPacketIntervals = echoPacketIntervals; return this; } /** * The diagnostic code specifies the local system's reason for the last change in session state. * This allows remote systems to determine the reason that the previous session failed, for * example. These diagnostic codes are specified in section 4.1 of RFC5880 * @return value or {@code null} for none */ public java.lang.String getLocalDiagnostic() { return localDiagnostic; } /** * The diagnostic code specifies the local system's reason for the last change in session state. * This allows remote systems to determine the reason that the previous session failed, for * example. These diagnostic codes are specified in section 4.1 of RFC5880 * @param localDiagnostic localDiagnostic or {@code null} for none */ public BfdStatus setLocalDiagnostic(java.lang.String localDiagnostic) { this.localDiagnostic = localDiagnostic; return this; } /** * The current BFD session state as seen by the transmitting system. These states are specified in * section 4.1 of RFC5880 * @return value or {@code null} for none */ public java.lang.String getLocalState() { return localState; } /** * The current BFD session state as seen by the transmitting system. These states are specified in * section 4.1 of RFC5880 * @param localState localState or {@code null} for none */ public BfdStatus setLocalState(java.lang.String localState) { this.localState = localState; return this; } /** * Negotiated transmit interval for control packets. * @return value or {@code null} for none */ public java.lang.Long getNegotiatedLocalControlTxIntervalMs() { return negotiatedLocalControlTxIntervalMs; } /** * Negotiated transmit interval for control packets. * @param negotiatedLocalControlTxIntervalMs negotiatedLocalControlTxIntervalMs or {@code null} for none */ public BfdStatus setNegotiatedLocalControlTxIntervalMs(java.lang.Long negotiatedLocalControlTxIntervalMs) { this.negotiatedLocalControlTxIntervalMs = negotiatedLocalControlTxIntervalMs; return this; } /** * Negotiated transmit interval for echo packets. * @return value or {@code null} for none */ public java.lang.Long getNegotiatedLocalEchoTxIntervalMs() { return negotiatedLocalEchoTxIntervalMs; } /** * Negotiated transmit interval for echo packets. * @param negotiatedLocalEchoTxIntervalMs negotiatedLocalEchoTxIntervalMs or {@code null} for none */ public BfdStatus setNegotiatedLocalEchoTxIntervalMs(java.lang.Long negotiatedLocalEchoTxIntervalMs) { this.negotiatedLocalEchoTxIntervalMs = negotiatedLocalEchoTxIntervalMs; return this; } /** * The most recent Rx control packet for this BFD session. * @return value or {@code null} for none */ public BfdPacket getRxPacket() { return rxPacket; } /** * The most recent Rx control packet for this BFD session. * @param rxPacket rxPacket or {@code null} for none */ public BfdStatus setRxPacket(BfdPacket rxPacket) { this.rxPacket = rxPacket; return this; } /** * The most recent Tx control packet for this BFD session. * @return value or {@code null} for none */ public BfdPacket getTxPacket() { return txPacket; } /** * The most recent Tx control packet for this BFD session. * @param txPacket txPacket or {@code null} for none */ public BfdStatus setTxPacket(BfdPacket txPacket) { this.txPacket = txPacket; return this; } /** * Session uptime in milliseconds. Value will be 0 if session is not up. * @return value or {@code null} for none */ public java.lang.Long getUptimeMs() { return uptimeMs; } /** * Session uptime in milliseconds. Value will be 0 if session is not up. * @param uptimeMs uptimeMs or {@code null} for none */ public BfdStatus setUptimeMs(java.lang.Long uptimeMs) { this.uptimeMs = uptimeMs; return this; } /** * Indicates if echo mode is currently being used. * @return value or {@code null} for none */ public java.lang.Boolean getUsingEchoMode() { return usingEchoMode; } /** * Indicates if echo mode is currently being used. * @param usingEchoMode usingEchoMode or {@code null} for none */ public BfdStatus setUsingEchoMode(java.lang.Boolean usingEchoMode) { this.usingEchoMode = usingEchoMode; return this; } @Override public BfdStatus set(String fieldName, Object value) { return (BfdStatus) super.set(fieldName, value); } @Override public BfdStatus clone() { return (BfdStatus) super.clone(); } }
4,171
503
<filename>src/main/java/com/zhaxd/core/mapper/KQuartzDao.java<gh_stars>100-1000 package com.zhaxd.core.mapper; import org.beetl.sql.core.mapper.BaseMapper; import com.zhaxd.core.model.*; public interface KQuartzDao extends BaseMapper<KQuartz> { }
105
348
<reponame>chamberone/Leaflet.PixiOverlay<gh_stars>100-1000 {"nom":"Gelles","circ":"3ème circonscription","dpt":"Puy-de-Dôme","inscrits":756,"abs":354,"votants":402,"blancs":10,"nuls":14,"exp":378,"res":[{"nuance":"UDI","nom":"<NAME>","voix":220},{"nuance":"MDM","nom":"<NAME>","voix":158}]}
123
1,792
<filename>whatsmars-spring/src/main/java/org/hongxi/whatsmars/spring/initializing/InitBean.java package org.hongxi.whatsmars.spring.initializing; import org.springframework.beans.factory.DisposableBean; import org.springframework.beans.factory.InitializingBean; import org.springframework.stereotype.Component; import javax.annotation.PostConstruct; import javax.annotation.PreDestroy; /** * Created by shenhongxi on 2017/11/16. */ @Component public class InitBean implements InitializingBean, DisposableBean { @PostConstruct public void init() { System.out.println("================init"); // before afterPropertiesSet } @Override public void afterPropertiesSet() throws Exception { System.out.println("================afterPropertiesSet"); } @PreDestroy public void clear() { System.out.println("================clear"); // before destroy } @Override public void destroy() throws Exception { System.out.println("================destroy"); } }
343
2,236
<reponame>ws1993/TABAnimated #import <UIKit/UIKit.h> typedef NS_ENUM(NSInteger, GradientType) { GradientFromTopToBottom = 1, //从上到下 GradientFromLeftToRight, //从做到右 GradientFromLeftTopToRightBottom, //从上到下 GradientFromLeftBottomToRightTop //从上到下 }; @interface UIImage (Gradient) /** * 根据给定的颜色,生成渐变色的图片 * @param imageSize 要生成的图片的大小 * @param colorArr 渐变颜色的数组 * @param percents 渐变颜色的占比数组 * @param gradientType 渐变色的类型 */ - (UIImage *)createImageWithSize:(CGSize)imageSize gradientColors:(NSArray *)colorArr percentage:(NSArray *)percents gradientType:(GradientType)gradientType; @end
398
370
int test_des(void);
8
575
<filename>chrome/browser/chromeos/policy/affiliation_mixin.cc // Copyright 2020 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/chromeos/policy/affiliation_mixin.h" #include <set> #include <string> #include "chrome/browser/chromeos/policy/affiliation_test_helper.h" #include "chrome/browser/chromeos/policy/device_policy_builder.h" #include "chrome/browser/chromeos/policy/device_policy_cros_browser_test.h" #include "chromeos/dbus/authpolicy/authpolicy_client.h" #include "chromeos/dbus/authpolicy/fake_authpolicy_client.h" #include "chromeos/dbus/session_manager/fake_session_manager_client.h" #include "chromeos/dbus/session_manager/session_manager_client.h" #include "components/account_id/account_id.h" #include "components/policy/core/common/cloud/device_management_service.h" #include "components/policy/core/common/cloud/policy_builder.h" namespace policy { namespace { // If running with `affiliated==true`, the test will use the same // `kAffiliationID` as user and device affiliation ID, which makes the user // affiliated (affiliation IDs overlap). // If running with `affiliated==false`, the test will use `kAffiliationID` as // device and `kAnotherAffiliationID` as user affiliation ID, which makes the // user non-affiliated (affiliation IDs don't overlap). constexpr char kAffiliationID[] = "some-affiliation-id"; constexpr char kAnotherAffiliationID[] = "another-affiliation-id"; constexpr char kAffiliatedUserEmail[] = "<EMAIL>"; constexpr char kAffiliatedUserGaiaId[] = "1029384756"; constexpr char kAffiliatedUserObjGuid[] = "{11111111-1111-1111-1111-111111111111}"; } // namespace AffiliationMixin::AffiliationMixin( InProcessBrowserTestMixinHost* host, DevicePolicyCrosTestHelper* device_policy_cros_test_helper) : InProcessBrowserTestMixin(host), policy_test_helper_(device_policy_cros_test_helper), account_id_(AccountId::FromUserEmailGaiaId(kAffiliatedUserEmail, kAffiliatedUserGaiaId)), user_policy_(std::make_unique<UserPolicyBuilder>()) {} AffiliationMixin::~AffiliationMixin() = default; void AffiliationMixin::SetUpInProcessBrowserTestFixture() { AffiliationTestHelper affiliation_helper = GetAffiliationTestHelper(); std::set<std::string> device_affiliation_ids; device_affiliation_ids.insert(kAffiliationID); ASSERT_NO_FATAL_FAILURE(affiliation_helper.SetDeviceAffiliationIDs( policy_test_helper_, device_affiliation_ids)); policy_test_helper_->InstallOwnerKey(); std::set<std::string> user_affiliation_ids; if (affiliated_) { user_affiliation_ids.insert(kAffiliationID); } else { user_affiliation_ids.insert(kAnotherAffiliationID); } ASSERT_TRUE(user_policy_.get()); ASSERT_NO_FATAL_FAILURE(affiliation_helper.SetUserAffiliationIDs( user_policy_.get(), account_id_, user_affiliation_ids)); } void AffiliationMixin::SetIsForActiveDirectory(bool is_for_active_directory) { if (is_for_active_directory == is_for_active_directory_) return; is_for_active_directory_ = is_for_active_directory; if (is_for_active_directory) { account_id_ = AccountId::AdFromUserEmailObjGuid(kAffiliatedUserEmail, kAffiliatedUserObjGuid); } else { account_id_ = AccountId::FromUserEmailGaiaId(kAffiliatedUserEmail, kAffiliatedUserGaiaId); } } AffiliationTestHelper AffiliationMixin::GetAffiliationTestHelper() const { auto* session_manager_client = chromeos::FakeSessionManagerClient::Get(); CHECK(session_manager_client); if (is_for_active_directory_) { auto* fake_auth_policy_client = chromeos::FakeAuthPolicyClient::Get(); CHECK(fake_auth_policy_client); return AffiliationTestHelper::CreateForActiveDirectory( session_manager_client, fake_auth_policy_client); } return AffiliationTestHelper::CreateForCloud(session_manager_client); } } // namespace policy
1,444
1,150
# Code heavily inspired (lifted pretty much verbatim) from https://djangostars.com/blog/how-to-create-and-deploy-a-telegram-bot/ # Changes made by @rmoff to add call out to ksqlDB # # To run this: # # 1. Sign up for ngrok and run it: # ./ngrok http 8080 # 2. Note the provided external URL from ngrok and set it as webhook for your telegram bot # curl -L http://api.telegram.org/botXXXXXYYYYYYYY/setWebHook?url=https://xxxyyy12345.ngrok.io # 3. Run the bot # python pull_bot.py # # Don't forget to also update the bot code for the hardcoded elements # with telegram bot auth token, ksqlDB connection details, etc. import requests from bottle import Bottle, response, request as bottle_request import requests, json,datetime class BotHandlerMixin: BOT_URL = None def get_chat_id(self, data): """ Method to extract chat id from telegram request. """ chat_id = data['message']['chat']['id'] return chat_id def get_message(self, data): """ Method to extract message id from telegram request. """ message_text = data['message']['text'] return message_text def send_message(self, prepared_data): """ Prepared data should be json which includes at least `chat_id` and `text` """ message_url = self.BOT_URL + 'sendMessage' requests.post(message_url, json=prepared_data) class TelegramBot(BotHandlerMixin, Bottle): BOT_URL = 'https://api.telegram.org/botXXXXXXXXYYYYYY/' def __init__(self, *args, **kwargs): super(TelegramBot, self).__init__() self.route('/', callback=self.post_handler, method="POST") def lookup_last_probe_enriched(self,device): ksqldb_url = "http://localhost:8088/query" headers = {'Content-Type':'application/vnd.ksql.v1+json; charset=utf-8'} query={'ksql':'SELECT PROBE_COUNT, FIRST_PROBE, LAST_PROBE, UNIQUE_SSIDS_PROBED, SSIDS_PROBED FROM PCAP_STATS_ENRICHED_01 WHERE ROWKEY = \''+device+'\';'} r = requests.post(ksqldb_url, data=json.dumps(query), headers=headers) if r.status_code==200: result=r.json() if len(result)==2: probe_count=result[1]['row']['columns'][0] probe_first=datetime.datetime.fromtimestamp(float(result[1]['row']['columns'][1])/1000).strftime("%Y-%m-%d %H:%M:%S") probe_last= datetime.datetime.fromtimestamp(float(result[1]['row']['columns'][2])/1000).strftime("%Y-%m-%d %H:%M:%S") unique_ssids=result[1]['row']['columns'][3] probed_ssids=result[1]['row']['columns'][4] return('📡 Wi-Fi probe stats for %s\n\tEarliest probe : %s\n\tLatest probe : %s\n\tProbe count : %d\n\tUnique SSIDs : %s' % (device, probe_first, probe_last, probe_count, probed_ssids)) else: return('🛎 No result found for device %s' % (device)) else: return('❌ Query failed (%s %s)\n%s' % (r.status_code, r.reason, r.text)) def lookup_last_probe(self,device): ksqldb_url = "http://localhost:8088/query" headers = {'Content-Type':'application/vnd.ksql.v1+json; charset=utf-8'} query={'ksql':'SELECT PROBE_COUNT, FIRST_PROBE, LAST_PROBE, UNIQUE_SSIDS_PROBED, SSIDS_PROBED FROM PCAP_STATS_01 WHERE ROWKEY = \''+device+'\';'} r = requests.post(ksqldb_url, data=json.dumps(query), headers=headers) if r.status_code==200: result=r.json() if len(result)==2: probe_count=result[1]['row']['columns'][0] probe_first=result[1]['row']['columns'][1] probe_last=result[1]['row']['columns'][2] unique_ssids=result[1]['row']['columns'][3] probed_ssids=result[1]['row']['columns'][4] return('📡 Wi-Fi probe stats for %s\n\tEarliest probe : %s\n\tLatest probe : %s\n\tProbe count : %d\n\tUnique SSIDs : %d (%s)' % (device, probe_first, probe_last, probe_count, unique_ssids, probed_ssids)) else: return('🛎 No result found for device %s' % (device)) else: return('❌ Query failed (%s %s)\n%s' % (r.status_code, r.reason, r.text)) def prepare_data_for_answer(self, data): message = self.get_message(data) print('👉 Received message sent to us:\n\t%s' % (message)) answer = self.lookup_last_probe_enriched(message) print('👈 Returning message back to sender:\n\t%s' % (answer)) chat_id = self.get_chat_id(data) json_data = { "chat_id": chat_id, "text": answer, } return json_data def post_handler(self): data = bottle_request.json answer_data = self.prepare_data_for_answer(data) self.send_message(answer_data) return response if __name__ == '__main__': app = TelegramBot() app.run(host='localhost', port=8080)
2,307
462
<gh_stars>100-1000 { "appDesc": { "description": "App description.", "message": "Créez et modifiez des feuilles de calcul" }, "appName": { "description": "App name.", "message": "Google Sheets" } }
102
488
// t0173.cc // another from jrvb template<typename foo> class bar;
28
1,334
<filename>mockserver-examples/src/main/java/org/mockserver/examples/proxy/servicebackend/BackEndServiceConfiguration.java package org.mockserver.examples.proxy.servicebackend; import org.mockserver.socket.PortFactory; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.context.annotation.Profile; import org.springframework.context.annotation.PropertySource; import org.springframework.core.env.Environment; import javax.annotation.Resource; /** * This configuration contains top level beans and any configuration required by filters (as WebMvcConfiguration only loaded within Dispatcher Servlet) * * @author jamesdbloom */ @Configuration @Profile("backend") @PropertySource({"classpath:application.properties"}) public class BackEndServiceConfiguration { @Resource private Environment environment; @Bean public BookServer bookServer() { System.setProperty("bookService.port", "" + PortFactory.findFreePort()); return new BookServer(Integer.parseInt(System.getProperty("bookService.port")), false); } }
327
572
from flask import Flask, abort, jsonify, make_response, request, url_for from flask_restx import Api, Namespace, Resource, fields app = Flask(__name__, static_url_path='') api = Api( app, 'todos', description='Operações ligadas as suas tarefas', ) ns = Namespace('todos', description='Operações ligadas as suas tarefas') api.add_namespace(ns) todo = api.model( 'Todo', { 'id': fields.Integer( readonly=True, description='Identificador único da tarefa' ), 'tittle': fields.String(required=True, description='Nome da Tarefa'), 'description': fields.String( required=True, description='Descrição da tarefa' ), 'done': fields.Boolean( required=True, description='A tarefa será concluída?' ), }, ) @app.errorhandler(400) def not_found(error): return make_response(jsonify({'error': 'Bad request'}), 400) @app.errorhandler(404) def not_found(error): return make_response(jsonify({'error': 'Not Found'}), 404) class TodoDAO(object): def __init__(self): self.counter = 0 self.todos = [] def get(self, id): for todo in self.todos: if todo['id'] == id: return todo api.abort(404, "Todo {} doens't exist".format(id)) def create(self, data): todo = data todo['id'] = self.counter = self.counter + 1 self.todos.append(todo) return todo def update(self, id, data): todo = self.get(id) todo.update(data) return todo def delete(self, id): todo = self.get(id) self.todos.remove(todo) DAO = TodoDAO() @ns.route('/api/tasks') class TodoList(Resource): @ns.marshal_list_with(todo, code=200) def get(self): return DAO.todos @ns.expect(todo) @ns.marshal_with(todo, code=201) def post(self): return DAO.create(api.payload), 201 @ns.route('/api/tasks/<int:task_id>') class Todo(Resource): @ns.marshal_with(todo) def get(self, task_id): return DAO.get(task_id) @ns.marshal_with(todo) def put(self, task_id): return DAO.update(task_id, api.payload) @ns.expect(todo) @ns.marshal_with(todo) def patch(self, task_id): return DAO.update(task_id, api.payload) @ns.marshal_with(todo) def delete(self, task_id): DAO.delete(task_id) return '', 204
1,156
60,067
<reponame>Hacky-DH/pytorch<gh_stars>1000+ import operator_benchmark as op_bench import torch """Microbenchmarks for channel_shuffle operator.""" # Configs for PT channel_shuffle operator channel_shuffle_long_configs = op_bench.cross_product_configs( batch_size=[4, 8], channels_per_group=[32, 64], height=[32, 64], width=[32, 64], groups=[4, 8], channel_last=[True, False], tags=["long"] ) channel_shuffle_short_configs = op_bench.config_list( attr_names=["batch_size", "channels_per_group", "height", "width", "groups"], attrs=[ [2, 16, 16, 16, 2], [2, 32, 32, 32, 2], [4, 32, 32, 32, 4], [4, 64, 64, 64, 4], [8, 64, 64, 64, 8], [16, 64, 64, 64, 16], ], cross_product_configs={ "channel_last": [True, False], }, tags=["short"] ) class ChannelSHuffleBenchmark(op_bench.TorchBenchmarkBase): def init(self, batch_size, channels_per_group, height, width, groups, channel_last): channels = channels_per_group * groups data_shape = (batch_size, channels, height, width) input_data = torch.rand(data_shape) if channel_last: input_data = input_data.contiguous(memory_format=torch.channels_last) self.inputs = { "input_data": input_data, "groups": groups } self.set_module_name('channel_shuffle') def forward(self, input_data, groups: int): return torch.channel_shuffle(input_data, groups) op_bench.generate_pt_test(channel_shuffle_short_configs + channel_shuffle_long_configs, ChannelSHuffleBenchmark) if __name__ == "__main__": op_bench.benchmark_runner.main()
751
646
<gh_stars>100-1000 package pvrms; import java.io.FileReader; import java.io.IOException; import java.text.ParseException; import java.util.Properties; import com.opencsv.CSVReader; import com.toshiba.mwcloud.gs.Collection; import com.toshiba.mwcloud.gs.GSException; import com.toshiba.mwcloud.gs.GridStore; import com.toshiba.mwcloud.gs.GridStoreFactory; import com.toshiba.mwcloud.gs.RowKey; // 設備情報 class Equip { @RowKey String id; String name; //Blob spec; // 簡単化のため、仕様情報は未使用とする } public class SimplePv0 { /* * CSVファイルから、設備情報をロードする */ public static void main(String[] args) throws GSException, ParseException, IOException { final String equipColName = "equipment_col"; if (args.length != 5) { System.out.println("Usage:pvrms.SimplePv0 Addr Port ClusterName User Passwd "); System.exit(1); } // GridStore接続時のパラメータ設定 Properties props = new Properties(); props.setProperty("notificationAddress", args[0]); props.setProperty("notificationPort", args[1]); props.setProperty("clusterName", args[2]); props.setProperty("user", args[3]); props.setProperty("password", args[4]); GridStore store = GridStoreFactory.getInstance().getGridStore(props); /* * CSVファイルの読み込み * *先頭行はセンサID   * センサIDは、センサ名1,センサ名2,センサ名3・・・ *それ以降の行はデータ 同一時刻に発生する各センサの測定値 * データは、date,time,センサ1値,ステータス,センサ2値,ステータス, ... */ String dataFileName = "equipName.csv"; CSVReader reader = new CSVReader(new FileReader(dataFileName)); String[] nextLine; // コレクションを作成 Collection<String,Equip> equipCol = store.putCollection(equipColName, Equip.class); // カラムに索引を設定  カラム タイプがStringなのでTREE索引 equipCol.createIndex("id"); equipCol.createIndex("name"); // 自動コミットモードをオフ equipCol.setAutoCommit(false); //コミット間隔 Long commitInterval = (long) 1; //値を登録 Equip equip = new Equip(); Long cnt = (long) 0; byte[] b = new byte[1]; b[0] = 1; while ((nextLine = reader.readNext()) != null) { // 設備情報登録 equip.id = nextLine[0]; equip.name = nextLine[1]; equipCol.put(equip); cnt++; if(0 == cnt%commitInterval) { // トランザクションの確定 equipCol.commit(); } } // トランザクションの確定 equipCol.commit(); System.out.println("◆ equip_colコンテナを作成し、ROWを"+cnt+"件登録しました。◆"); // リソースの解放 store.close(); reader.close(); } }
1,539
3,269
<gh_stars>1000+ # Time: O(n) # Space: O(1) class Solution(object): def minFlipsMonoIncr(self, S): """ :type S: str :rtype: int """ flip0, flip1 = 0, 0 for c in S: flip0 += int(c == '1') flip1 = min(flip0, flip1 + int(c == '0')) return flip1
188
6,098
from __future__ import print_function import sys sys.path.insert(1,"../../../") import h2o from tests import pyunit_utils from h2o.estimators.glrm import H2OGeneralizedLowRankEstimator #Analogous to GLRMTest.java#testSubset #Test based on bug found in glrm_census_large.ipynb def glrm_subset(): acs_orig = h2o.upload_file(path=pyunit_utils.locate("bigdata/laptop/census/ACS_13_5YR_DP02_cleaned.zip"), col_types = (['enum'] + ['numeric']*149)) acs_full = acs_orig.drop("ZCTA5") acs_model = H2OGeneralizedLowRankEstimator(k = 10, transform = 'STANDARDIZE', loss = 'Quadratic', regularization_x = 'Quadratic', regularization_y = 'L1', gamma_x = 0.25, gamma_y = 0.5, max_iterations = 1) acs_model.train(x = acs_full.names, training_frame= acs_full) zcta_arch_x = h2o.get_frame(acs_model._model_json['output']['representation_name']) print (zcta_arch_x) acs_zcta_col = acs_orig["ZCTA5"].asfactor() idx = ((acs_zcta_col == '10065') | # Manhattan, NY (Upper East Side)\n", (acs_zcta_col == '11219') | # Manhattan, NY (East Harlem)\n", (acs_zcta_col == '66753') | # McCune, KS\n", (acs_zcta_col == '84104') | # Salt Lake City, UT\n", (acs_zcta_col == '94086') | # Sunnyvale, CA\n", (acs_zcta_col == '95014')) # Cupertino, CA\n", print(zcta_arch_x[idx,[0,1]]) if __name__ == "__main__": pyunit_utils.standalone_test(glrm_subset) else: glrm_subset()
943
808
<gh_stars>100-1000 /* * Copyright DDDplus Authors. * * Licensed under the Apache License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0 */ package io.github.dddplus.ext; import io.github.dddplus.api.ApiResult; import io.github.dddplus.model.IDomainModel; import io.github.dddplus.api.RequestProfile; import javax.validation.constraints.NotNull; /** * 领域模型扩展属性的解析、落库和渲染扩展点. * <p> * <p>中台作为中间环节负责控制扩展属性的传递和持久化,而前台作为中台调用方和扩展点执行方,负责两侧的扩展属性解释和业务处理</p> * <p>Data extensions provide a mechanism for attaching arbitrary data to an interface to avoid bloat in core platform data models.</p> * <pre> * +-------------+ +----+ +---------------+ * | BP client |-----| CP |------| BP extensions | * +-------------+ +----+ +---------------+ * | * +----------+ * | Database | * +----------+ * | * | id | order_no | ... | x1 varchar(100) | x2 | x3 * | ---- | -------------- | --- | ---------------------------------------------- | ------ | --------- * | 1 | 22010391388764 | | 10.9 | 12,abc | * | 2 | 22010397315689 | | {"foo":1, "bar":"egg", "baz":{"a":1, "b":2}} | | 2020-01-09 * </pre> * * @param <Model> 领域模型 */ public interface IModelAttachmentExt<Model extends IDomainModel> extends IDomainExtension { /** * 扩展属性数据的解析和处理. * <p> * <p>适用场景:扩展属性数据,从API里传递进来</p> * * @param source 扩展属性数据的来源,从系统入参里获取 {@link RequestProfile#ext} * @param target 把扩展信息传递到的目标领域模型 */ void explain(@NotNull RequestProfile source, @NotNull Model target); /** * 扩展属性数据的解析和处理. * <p> * <p>适用场景:扩展属性数据从数据库里获取,中台存放到模型里;前台扩展点对模型里的弱类型数据处理,还可以存放回模型的强类型扩展容器里</p> * * @param model 对模型里的扩展属性进行解析和处理 */ default void explain(@NotNull Model model) { } /** * 扩展属性数据的API显示. * <p> * <p>典型场景:订单详情页显示,需要把{@code x1, x2}等预留属性名称转换为明确业务含义的key value</p> * * @param source 扩展属性数据的来源 * @param target 把扩展信息传递到的目标 {@link ApiResult#ext} */ default void render(@NotNull Model source, @NotNull ApiResult target) { } }
1,612
6,443
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.tomcat.util.http; import java.net.URI; import java.net.URISyntaxException; import java.util.Locale; import jakarta.servlet.http.HttpServletRequest; public class RequestUtil { private RequestUtil() { // Hide default constructor as this is a utility class } /** * Normalize a relative URI path that may have relative values ("/./", * "/../", and so on ) it it. <strong>WARNING</strong> - This method is * useful only for normalizing application-generated paths. It does not * try to perform security checks for malicious input. * * @param path Relative path to be normalized * * @return The normalized path or <code>null</code> if the path cannot be * normalized */ public static String normalize(String path) { return normalize(path, true); } /** * Normalize a relative URI path that may have relative values ("/./", * "/../", and so on ) it it. <strong>WARNING</strong> - This method is * useful only for normalizing application-generated paths. It does not * try to perform security checks for malicious input. * * @param path Relative path to be normalized * @param replaceBackSlash Should '\\' be replaced with '/' * * @return The normalized path or <code>null</code> if the path cannot be * normalized */ public static String normalize(String path, boolean replaceBackSlash) { if (path == null) { return null; } // Create a place for the normalized path String normalized = path; if (replaceBackSlash && normalized.indexOf('\\') >= 0) { normalized = normalized.replace('\\', '/'); } // Add a leading "/" if necessary if (!normalized.startsWith("/")) { normalized = "/" + normalized; } boolean addedTrailingSlash = false; if (normalized.endsWith("/.") || normalized.endsWith("/..")) { normalized = normalized + "/"; addedTrailingSlash = true; } // Resolve occurrences of "//" in the normalized path while (true) { int index = normalized.indexOf("//"); if (index < 0) { break; } normalized = normalized.substring(0, index) + normalized.substring(index + 1); } // Resolve occurrences of "/./" in the normalized path while (true) { int index = normalized.indexOf("/./"); if (index < 0) { break; } normalized = normalized.substring(0, index) + normalized.substring(index + 2); } // Resolve occurrences of "/../" in the normalized path while (true) { int index = normalized.indexOf("/../"); if (index < 0) { break; } if (index == 0) { return null; // Trying to go outside our context } int index2 = normalized.lastIndexOf('/', index - 1); normalized = normalized.substring(0, index2) + normalized.substring(index + 3); } if (normalized.length() > 1 && addedTrailingSlash) { // Remove the trailing '/' we added to that input and output are // consistent w.r.t. to the presence of the trailing '/'. normalized = normalized.substring(0, normalized.length() - 1); } // Return the normalized path that we have completed return normalized; } public static boolean isSameOrigin(HttpServletRequest request, String origin) { // Build scheme://host:port from request StringBuilder target = new StringBuilder(); String scheme = request.getScheme(); if (scheme == null) { return false; } else { scheme = scheme.toLowerCase(Locale.ENGLISH); } target.append(scheme); target.append("://"); String host = request.getServerName(); if (host == null) { return false; } target.append(host); int port = request.getServerPort(); // Origin may or may not include the (default) port. // At this point target doesn't include a port. if (target.length() == origin.length()) { // origin and target can only be equal if both are using default // ports. Therefore only append the port to the target if a // non-default port is used. if (("http".equals(scheme) || "ws".equals(scheme)) && port != 80 || ("https".equals(scheme) || "wss".equals(scheme)) && port != 443) { target.append(':'); target.append(port); } } else { // origin and target can only be equal if: // a) origin includes an explicit default port // b) origin is using a non-default port // Either way, add the port to the target so it can be compared target.append(':'); target.append(port); } // Both scheme and host are case-insensitive but the CORS spec states // this check should be case-sensitive return origin.equals(target.toString()); } /** * Checks if a given origin is valid or not. Criteria: * <ul> * <li>If an encoded character is present in origin, it's not valid.</li> * <li>If origin is "null", it's valid.</li> * <li>Origin should be a valid {@link URI}</li> * </ul> * * @param origin The origin URI * @return <code>true</code> if the origin was valid * @see <a href="http://tools.ietf.org/html/rfc952">RFC952</a> */ public static boolean isValidOrigin(String origin) { // Checks for encoded characters. Helps prevent CRLF injection. if (origin.contains("%")) { return false; } // "null" is a valid origin if ("null".equals(origin)) { return true; } // RFC6454, section 4. "If uri-scheme is file, the implementation MAY // return an implementation-defined value.". No limits are placed on // that value so treat all file URIs as valid origins. if (origin.startsWith("file://")) { return true; } URI originURI; try { originURI = new URI(origin); } catch (URISyntaxException e) { return false; } // If scheme for URI is null, return false. Return true otherwise. return originURI.getScheme() != null; } }
2,979
691
import torch from torch.fft import fftn def roll_quadrants(data, backwards=False): """ Shift low frequencies to the center of fourier transform, i.e. [-N/2, ..., +N/2] -> [0, ..., N-1] Args: data: fourier transform, (NxHxW) backwards: bool, if True shift high frequencies back to center Returns: Shifted fourier transform. """ dim = data.ndim - 1 if dim != 2: raise AttributeError(f'Data must be 2d but it is {dim}d.') if any(s % 2 == 0 for s in data.shape[1:]): raise RuntimeWarning('Roll quadrants for 2d input should only be used with uneven spatial sizes.') # for each dimension swap left and right half dims = tuple(range(1, dim+1)) # add one for batch dimension shifts = torch.tensor(data.shape[1:]) // 2 #.div(2, rounding_mode='floor') # N/2 if N even, (N-1)/2 if N odd if backwards: shifts *= -1 return data.roll(shifts.tolist(), dims=dims) def batch_fft(data, normalize=False): """ Compute fourier transform of batch. Args: data: input tensor, (NxHxW) Returns: Batch fourier transform of input data. """ dim = data.ndim - 1 # subtract one for batch dimension if dim != 2: raise AttributeError(f'Data must be 2d but it is {dim}d.') dims = tuple(range(1, dim + 1)) # add one for batch dimension if normalize: norm = 'ortho' else: norm = 'backward' if not torch.is_complex(data): data = torch.complex(data, torch.zeros_like(data)) freq = fftn(data, dim=dims, norm=norm) return freq def azimuthal_average(image, center=None): # modified to tensor inputs from https://www.astrobetter.com/blog/2010/03/03/fourier-transforms-of-images-in-python/ """ Calculate the azimuthally averaged radial profile. Requires low frequencies to be at the center of the image. Args: image: Batch of 2D images, NxHxW center: The [x,y] pixel coordinates used as the center. The default is None, which then uses the center of the image (including fracitonal pixels). Returns: Azimuthal average over the image around the center """ # Check input shapes assert center is None or (len(center) == 2), f'Center has to be None or len(center)=2 ' \ f'(but it is len(center)={len(center)}.' # Calculate the indices from the image H, W = image.shape[-2:] h, w = torch.meshgrid(torch.arange(0, H), torch.arange(0, W)) if center is None: center = torch.tensor([(w.max() - w.min()) / 2.0, (h.max() - h.min()) / 2.0]) # Compute radius for each pixel wrt center r = torch.stack([w-center[0], h-center[1]]).norm(2, 0) # Get sorted radii r_sorted, ind = r.flatten().sort() i_sorted = image.flatten(-2, -1)[..., ind] # Get the integer part of the radii (bin size = 1) r_int = r_sorted.long() # attribute to the smaller integer # Find all pixels that fall within each radial bin. deltar = r_int[1:] - r_int[:-1] # Assumes all radii represented, computes bin change between subsequent radii rind = torch.where(deltar)[0] # location of changed radius # compute number of elements in each bin nind = rind + 1 # number of elements = idx + 1 nind = torch.cat([torch.tensor([0]), nind, torch.tensor([H*W])]) # add borders nr = nind[1:] - nind[:-1] # number of radius bin, i.e. counter for bins belonging to each radius # Cumulative sum to figure out sums for each radius bin if H % 2 == 0: raise NotImplementedError('Not sure if implementation correct, please check') rind = torch.cat([torch.tensor([0]), rind, torch.tensor([H * W - 1])]) # add borders else: rind = torch.cat([rind, torch.tensor([H * W - 1])]) # add borders csim = i_sorted.cumsum(-1, dtype=torch.float64) # integrate over all values with smaller radius tbin = csim[..., rind[1:]] - csim[..., rind[:-1]] # add mean tbin = torch.cat([csim[:, 0:1], tbin], 1) radial_prof = tbin / nr.to(tbin.device) # normalize by counted bins return radial_prof def get_spectrum(data, normalize=False): dim = data.ndim - 1 # subtract one for batch dimension if dim != 2: raise AttributeError(f'Data must be 2d but it is {dim}d.') freq = batch_fft(data, normalize=normalize) power_spec = freq.real ** 2 + freq.imag ** 2 N = data.shape[1] if N % 2 == 0: # duplicate value for N/2 so it is put at the end of the spectrum # and is not averaged with the mean value N_2 = N//2 power_spec = torch.cat([power_spec[:, :N_2+1], power_spec[:, N_2:N_2+1], power_spec[:, N_2+1:]], dim=1) power_spec = torch.cat([power_spec[:, :, :N_2+1], power_spec[:, :, N_2:N_2+1], power_spec[:, :, N_2+1:]], dim=2) power_spec = roll_quadrants(power_spec) power_spec = azimuthal_average(power_spec) return power_spec def plot_std(mean, std, x=None, ax=None, **kwargs): import matplotlib.pyplot as plt if ax is None: fig, ax = plt.subplots(1) # plot error margins in same color as line err_kwargs = { 'alpha': 0.3 } if 'c' in kwargs.keys(): err_kwargs['color'] = kwargs['c'] elif 'color' in kwargs.keys(): err_kwargs['color'] = kwargs['color'] if x is None: x = torch.linspace(0, 1, len(mean)) # use normalized x axis ax.plot(x, mean, **kwargs) ax.fill_between(x, mean-std, mean+std, **err_kwargs) return ax
2,392
918
<filename>gobblin-data-management/src/test/java/org/apache/gobblin/data/management/conversion/hive/util/HiveAvroORCQueryGeneratorTest.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.data.management.conversion.hive.util; import java.io.IOException; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.avro.Schema; import org.apache.hadoop.hive.metastore.api.Table; import org.testng.Assert; import org.testng.annotations.Test; import com.google.common.base.Optional; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import org.apache.gobblin.data.management.ConversionHiveTestUtils; import org.apache.gobblin.data.management.conversion.hive.query.HiveAvroORCQueryGenerator; import org.apache.gobblin.util.AvroFlattener; import static org.apache.gobblin.data.management.conversion.hive.utils.AvroHiveTypeUtils.generateAvroToHiveColumnMapping; @Test(groups = { "gobblin.data.management.conversion" }) public class HiveAvroORCQueryGeneratorTest { private static String resourceDir = "avroToOrcQueryUtilsTest"; private static Optional<Table> destinationTableMeta = Optional.absent(); private static boolean isEvolutionEnabled = true; private static Optional<Integer> rowLimit = Optional.absent(); /** * Testing DDL generation for schema containing logical types. * DDL comparison doesn't include any spacing and blank. * @throws Exception */ public void testLogicalTypeResolutionWithDDL() throws Exception { String schemaName = "schemaWithLogicalFieldDDL"; Schema schema = ConversionHiveTestUtils.readSchemaFromJsonFile(resourceDir, "schemaWithLogicalField.json"); String q = HiveAvroORCQueryGenerator .generateCreateTableDDL(schema, schemaName, "file:/user/hive/warehouse/" + schemaName, Optional.<String>absent(), Optional.<Map<String, String>>absent(), Optional.<List<String>>absent(), Optional.<Map<String, HiveAvroORCQueryGenerator.COLUMN_SORT_ORDER>>absent(), Optional.<Integer>absent(), Optional.<String>absent(), Optional.<String>absent(), Optional.<String>absent(), null, isEvolutionEnabled, true, destinationTableMeta, new HashMap<String, String>()); /** * This unit has a known flaw: Due to the fact that hive-1.0.1 does not support "Date" as the logical type, * the "date" type is not being recognized by Hive's library when translating Avro schema to * TypeInfo( An TypeDescription equivalent). Therefore in schemaWithLogicalField.ddl, for the `nestedLogicalFieldDate` * value in `columns.types` as part of tableProperties, we will use "int" --- the physical type of date instead of "date" */ Assert.assertEquals(q.trim().replaceAll("\\s+",""), ConversionHiveTestUtils.readQueryFromFile(resourceDir, "schemaWithLogicalField.ddl").trim().replaceAll("\\s+","")); } /*** * Test DDL generation for schema structured as: Array within record within array within record * @throws IOException */ @Test public void testArrayWithinRecordWithinArrayWithinRecordDDL() throws IOException { String schemaName = "testArrayWithinRecordWithinArrayWithinRecordDDL"; Schema schema = ConversionHiveTestUtils.readSchemaFromJsonFile(resourceDir, "arrayWithinRecordWithinArrayWithinRecord_nested.json"); String q = HiveAvroORCQueryGenerator .generateCreateTableDDL(schema, schemaName, "file:/user/hive/warehouse/" + schemaName, Optional.<String>absent(), Optional.<Map<String, String>>absent(), Optional.<List<String>>absent(), Optional.<Map<String, HiveAvroORCQueryGenerator.COLUMN_SORT_ORDER>>absent(), Optional.<Integer>absent(), Optional.<String>absent(), Optional.<String>absent(), Optional.<String>absent(), null, isEvolutionEnabled, true, destinationTableMeta, new HashMap<String, String>()); Assert.assertEquals(q, ConversionHiveTestUtils.readQueryFromFile(resourceDir, "arrayWithinRecordWithinArrayWithinRecord_nested.ddl")); } /*** * Test DDL generation for schema structured as: option within option within record * @throws IOException */ @Test public void testOptionWithinOptionWithinRecordDDL() throws IOException { String schemaName = "testOptionWithinOptionWithinRecordDDL"; Schema schema = ConversionHiveTestUtils.readSchemaFromJsonFile(resourceDir, "optionWithinOptionWithinRecord_nested.json"); String q = HiveAvroORCQueryGenerator .generateCreateTableDDL(schema, schemaName, "file:/user/hive/warehouse/" + schemaName, Optional.<String>absent(), Optional.<Map<String, String>>absent(), Optional.<List<String>>absent(), Optional.<Map<String, HiveAvroORCQueryGenerator.COLUMN_SORT_ORDER>>absent(), Optional.<Integer>absent(), Optional.<String>absent(), Optional.<String>absent(), Optional.<String>absent(), null, isEvolutionEnabled, true, destinationTableMeta, new HashMap<String, String>()); Assert.assertEquals(q, ConversionHiveTestUtils.readQueryFromFile(resourceDir, "optionWithinOptionWithinRecord_nested.ddl")); } /*** * Test DDL generation for schema structured as: record within option within record * @throws IOException */ @Test public void testRecordWithinOptionWithinRecordDDL() throws IOException { String schemaName = "testRecordWithinOptionWithinRecordDDL"; Schema schema = ConversionHiveTestUtils.readSchemaFromJsonFile(resourceDir, "recordWithinOptionWithinRecord_nested.json"); String q = HiveAvroORCQueryGenerator .generateCreateTableDDL(schema, schemaName, "file:/user/hive/warehouse/" + schemaName, Optional.<String>absent(), Optional.<Map<String, String>>absent(), Optional.<List<String>>absent(), Optional.<Map<String, HiveAvroORCQueryGenerator.COLUMN_SORT_ORDER>>absent(), Optional.<Integer>absent(), Optional.<String>absent(), Optional.<String>absent(), Optional.<String>absent(), null, isEvolutionEnabled, true, destinationTableMeta, new HashMap<String, String>()); Assert.assertEquals(q.trim(), ConversionHiveTestUtils.readQueryFromFile(resourceDir, "recordWithinOptionWithinRecord_nested.ddl")); } /*** * Test DDL generation for schema structured as: record within record within record * @throws IOException */ @Test public void testRecordWithinRecordWithinRecordDDL() throws IOException { String schemaName = "testRecordWithinRecordWithinRecordDDL"; Schema schema = ConversionHiveTestUtils.readSchemaFromJsonFile(resourceDir, "recordWithinRecordWithinRecord_nested.json"); String q = HiveAvroORCQueryGenerator .generateCreateTableDDL(schema, schemaName, "file:/user/hive/warehouse/" + schemaName, Optional.<String>absent(), Optional.<Map<String, String>>absent(), Optional.<List<String>>absent(), Optional.<Map<String, HiveAvroORCQueryGenerator.COLUMN_SORT_ORDER>>absent(), Optional.<Integer>absent(), Optional.<String>absent(), Optional.<String>absent(), Optional.<String>absent(), null, isEvolutionEnabled, true, destinationTableMeta, new HashMap<String, String>()); Assert.assertEquals(q.trim(), ConversionHiveTestUtils.readQueryFromFile(resourceDir, "recordWithinRecordWithinRecord_nested.ddl")); } /*** * Test DDL generation for schema structured as: record within record within record after flattening * @throws IOException */ @Test public void testRecordWithinRecordWithinRecordFlattenedDDL() throws IOException { String schemaName = "testRecordWithinRecordWithinRecordDDL"; Schema schema = ConversionHiveTestUtils.readSchemaFromJsonFile(resourceDir, "recordWithinRecordWithinRecord_nested.json"); AvroFlattener avroFlattener = new AvroFlattener(); Schema flattenedSchema = avroFlattener.flatten(schema, true); String q = HiveAvroORCQueryGenerator .generateCreateTableDDL(flattenedSchema, schemaName, "file:/user/hive/warehouse/" + schemaName, Optional.<String>absent(), Optional.<Map<String, String>>absent(), Optional.<List<String>>absent(), Optional.<Map<String, HiveAvroORCQueryGenerator.COLUMN_SORT_ORDER>>absent(), Optional.<Integer>absent(), Optional.<String>absent(), Optional.<String>absent(), Optional.<String>absent(), null, isEvolutionEnabled, true, destinationTableMeta, new HashMap<String, String>()); Assert.assertEquals(q, ConversionHiveTestUtils.readQueryFromFile(resourceDir, "recordWithinRecordWithinRecord_flattened.ddl")); } /*** * Test DML generation * @throws IOException */ @Test public void testRecordWithinRecordWithinRecordFlattenedDML() throws IOException { String schemaName = "testRecordWithinRecordWithinRecordDDL"; Schema schema = ConversionHiveTestUtils.readSchemaFromJsonFile(resourceDir, "recordWithinRecordWithinRecord_nested.json"); AvroFlattener avroFlattener = new AvroFlattener(); Schema flattenedSchema = avroFlattener.flatten(schema, true); String q = HiveAvroORCQueryGenerator .generateTableMappingDML(schema, flattenedSchema, schemaName, schemaName + "_orc", Optional.<String>absent(), Optional.<String>absent(), Optional.<Map<String, String>>absent(), Optional.<Boolean>absent(), Optional.<Boolean>absent(), isEvolutionEnabled, destinationTableMeta, rowLimit); Assert.assertEquals(q.trim(), ConversionHiveTestUtils.readQueryFromFile(resourceDir, "recordWithinRecordWithinRecord.dml")); } /*** * Test Multi-partition DDL generation * @throws IOException */ @Test public void testMultiPartitionDDL() throws IOException { String schemaName = "testMultiPartitionDDL"; Schema schema = ConversionHiveTestUtils.readSchemaFromJsonFile(resourceDir, "recordWithinRecordWithinRecord_nested.json"); AvroFlattener avroFlattener = new AvroFlattener(); Schema flattenedSchema = avroFlattener.flatten(schema, true); Map<String, String> partitionDDLInfo = ImmutableMap.of("datepartition", "string", "id", "int", "country", "string"); String q = HiveAvroORCQueryGenerator .generateCreateTableDDL(flattenedSchema, schemaName, "file:/user/hive/warehouse/" + schemaName, Optional.<String>absent(), Optional.of(partitionDDLInfo), Optional.<List<String>>absent(), Optional.<Map<String, HiveAvroORCQueryGenerator.COLUMN_SORT_ORDER>>absent(), Optional.<Integer>absent(), Optional.<String>absent(), Optional.<String>absent(), Optional.<String>absent(), null, isEvolutionEnabled, true, destinationTableMeta, new HashMap<String, String>()); Assert.assertEquals(q, ConversionHiveTestUtils.readQueryFromFile(resourceDir, "testMultiPartition.ddl")); } /*** * Test Multi-partition DML generation * @throws IOException */ @Test public void testMultiPartitionDML() throws IOException { String schemaName = "testMultiPartitionDML"; Schema schema = ConversionHiveTestUtils.readSchemaFromJsonFile(resourceDir, "recordWithinRecordWithinRecord_nested.json"); AvroFlattener avroFlattener = new AvroFlattener(); Schema flattenedSchema = avroFlattener.flatten(schema, true); Map<String, String> partitionDMLInfo = ImmutableMap.of("datepartition", "2016-01-01", "id", "101", "country", "US"); String q = HiveAvroORCQueryGenerator .generateTableMappingDML(schema, flattenedSchema, schemaName, schemaName + "_orc", Optional.<String>absent(), Optional.<String>absent(), Optional.of(partitionDMLInfo), Optional.<Boolean>absent(), Optional.<Boolean>absent(), isEvolutionEnabled, destinationTableMeta, rowLimit); Assert.assertEquals(q.trim(), ConversionHiveTestUtils.readQueryFromFile(resourceDir, "testMultiPartition.dml")); } /*** * Test bad schema * @throws IOException */ @Test(expectedExceptions = IllegalArgumentException.class) public void testNonRecordRootSchemaDDL() throws Exception { String schemaName = "nonRecordRootSchema"; Schema schema = Schema.create(Schema.Type.STRING); HiveAvroORCQueryGenerator .generateCreateTableDDL(schema, schemaName, "file:/user/hive/warehouse/" + schemaName, Optional.<String>absent(), Optional.<Map<String, String>>absent(), Optional.<List<String>>absent(), Optional.<Map<String, HiveAvroORCQueryGenerator.COLUMN_SORT_ORDER>>absent(), Optional.<Integer>absent(), Optional.<String>absent(), Optional.<String>absent(), Optional.<String>absent(), null, isEvolutionEnabled, true, destinationTableMeta, new HashMap<String, String>()); } /*** * Test DML generation with row limit * @throws IOException */ @Test public void testFlattenedDMLWithRowLimit() throws IOException { String schemaName = "testRecordWithinRecordWithinRecordDDL"; Schema schema = ConversionHiveTestUtils.readSchemaFromJsonFile(resourceDir, "recordWithinRecordWithinRecord_nested.json"); Optional<Integer> rowLimit = Optional.of(1); AvroFlattener avroFlattener = new AvroFlattener(); Schema flattenedSchema = avroFlattener.flatten(schema, true); String q = HiveAvroORCQueryGenerator .generateTableMappingDML(schema, flattenedSchema, schemaName, schemaName + "_orc", Optional.<String>absent(), Optional.<String>absent(), Optional.<Map<String, String>>absent(), Optional.<Boolean>absent(), Optional.<Boolean>absent(), isEvolutionEnabled, destinationTableMeta, rowLimit); Assert.assertEquals(q.trim(), ConversionHiveTestUtils.readQueryFromFile(resourceDir, "flattenedWithRowLimit.dml")); } @Test public void testDropPartitions() throws Exception { // Test multiple partition-spec drop method List<Map<String, String>> partitionDMLInfos = Lists.newArrayList(); partitionDMLInfos.add(ImmutableMap.of("datepartition", "2016-01-01", "sizepartition", "10")); partitionDMLInfos.add(ImmutableMap.of("datepartition", "2016-01-02", "sizepartition", "20")); partitionDMLInfos.add(ImmutableMap.of("datepartition", "2016-01-03", "sizepartition", "30")); List<String> ddl = HiveAvroORCQueryGenerator.generateDropPartitionsDDL("db1", "table1", partitionDMLInfos); Assert.assertEquals(ddl.size(), 2); Assert.assertEquals(ddl.get(0), "USE db1 \n"); Assert.assertEquals(ddl.get(1), "ALTER TABLE table1 DROP IF EXISTS PARTITION (datepartition='2016-01-01',sizepartition='10'), " + "PARTITION (datepartition='2016-01-02',sizepartition='20'), " + "PARTITION (datepartition='2016-01-03',sizepartition='30')"); // Check empty partitions Assert.assertEquals(HiveAvroORCQueryGenerator.generateDropPartitionsDDL("db1", "table1", Collections.<Map<String, String>>emptyList()), Collections.emptyList()); // Test single partition-spec drop method Map<String, String> partitionsDMLInfo = ImmutableMap.of("datepartition", "2016-01-01", "sizepartition", "10"); ddl = HiveAvroORCQueryGenerator.generateDropPartitionsDDL("db1", "table1", partitionsDMLInfo); Assert.assertEquals(ddl.size(), 2); Assert.assertEquals(ddl.get(0), "USE db1\n"); Assert.assertEquals(ddl.get(1), "ALTER TABLE table1 DROP IF EXISTS PARTITION (`datepartition`='2016-01-01', `sizepartition`='10') "); } @Test public void testCreatePartitionDDL() throws Exception { List<String> ddl = HiveAvroORCQueryGenerator.generateCreatePartitionDDL("db1", "table1", "/tmp", ImmutableMap.of("datepartition", "2016-01-01", "sizepartition", "10")); Assert.assertEquals(ddl.size(), 2); Assert.assertEquals(ddl.get(0), "USE db1\n"); Assert.assertEquals(ddl.get(1), "ALTER TABLE `table1` ADD IF NOT EXISTS PARTITION (`datepartition`='2016-01-01', `sizepartition`='10') \n" + " LOCATION '/tmp' "); } @Test public void testDropTableDDL() throws Exception { String ddl = HiveAvroORCQueryGenerator.generateDropTableDDL("db1", "table1"); Assert.assertEquals(ddl, "DROP TABLE IF EXISTS `db1`.`table1`"); } @Test public void testAvroToHiveTypeMapping() throws Exception { // test for record, this record-schema will be reused in the tests afterwards. Schema record_1 = Schema.createRecord("record_1","","", false, ImmutableList.<Schema.Field>of( new Schema.Field("a", Schema.create(Schema.Type.LONG), "", null), new Schema.Field("b", Schema.create(Schema.Type.BOOLEAN), "", null) )); String hiveSchema_1 = generateAvroToHiveColumnMapping(record_1, Optional.absent(), false, ""); // the backtick was added on purpose to avoid preserved keywords appearing as part of column name String expectedHiveSchema_1 = "struct<`a`:bigint,`b`:boolean>"; org.junit.Assert.assertEquals(hiveSchema_1, expectedHiveSchema_1); // test for union (fake union, actually represents default value) Schema union_1 = Schema.createUnion(Schema.create(Schema.Type.NULL), record_1); String hiveSchema_2 = generateAvroToHiveColumnMapping(union_1, Optional.absent(), false, ""); String expectedHiveSchema_2 = "struct<`a`:bigint,`b`:boolean>"; org.junit.Assert.assertEquals(hiveSchema_2, expectedHiveSchema_2); // test for array Schema array_1 = Schema.createArray(record_1); String hiveSchema_3 = generateAvroToHiveColumnMapping(array_1, Optional.absent(), false, ""); String expectedHiveSchema_3 = "array<struct<`a`:bigint,`b`:boolean>>"; org.junit.Assert.assertEquals(hiveSchema_3, expectedHiveSchema_3); // test for map Schema map_1 = Schema.createMap(array_1); String hiveSchema_4 = generateAvroToHiveColumnMapping(map_1, Optional.absent(), false, ""); String expectedHiveSchema_4 = "map<string,array<struct<`a`:bigint,`b`:boolean>>>"; org.junit.Assert.assertEquals(hiveSchema_4, expectedHiveSchema_4); } @Test public void testHiveTypeEscaping() throws Exception { String type = "array<struct<singleItems:array<struct<scoredEntity:struct<id:string,score:float," + "sourceName:string,sourceModel:string>,scores:struct<fprScore:double,fprUtility:double," + "calibratedFprUtility:double,sprScore:double,adjustedSprScore:double,sprUtility:double>," + "sponsoredFlag:string,blendingRequestId:string,forExploration:boolean,d2Resource:string," + "restliFinder:string,trackingId:binary,aggregation:struct<positionInAggregation:struct<index:int>," + "typeOfAggregation:string>,decoratedFeedUpdateData:struct<avoData:struct<actorUrn:string,verbType:" + "string,objectUrn:string,objectType:string>,attributedActivityUrn:string,createdTime:bigint,totalLikes:" + "bigint,totalComments:bigint,rootActivity:struct<activityUrn:string,avoData:struct<actorUrn:string," + "verbType:string,objectUrn:string,objectType:string>>>>>,scores:struct<fprScore:double,fprUtility:double," + "calibratedFprUtility:double,sprScore:double,adjustedSprScore:double,sprUtility:double>,position:int>>"; String expectedEscapedType = "array<struct<`singleItems`:array<struct<`scoredEntity`:struct<`id`:string," + "`score`:float,`sourceName`:string,`sourceModel`:string>,`scores`:struct<`fprScore`:double," + "`fprUtility`:double,`calibratedFprUtility`:double,`sprScore`:double,`adjustedSprScore`:double," + "`sprUtility`:double>,`sponsoredFlag`:string,`blendingRequestId`:string,`forExploration`:boolean," + "`d2Resource`:string,`restliFinder`:string,`trackingId`:binary,`aggregation`:struct<`positionInAggregation`" + ":struct<`index`:int>,`typeOfAggregation`:string>,`decoratedFeedUpdateData`:struct<`avoData`:" + "struct<`actorUrn`:string,`verbType`:string,`objectUrn`:string,`objectType`:string>,`attributedActivityUrn`" + ":string,`createdTime`:bigint,`totalLikes`:bigint,`totalComments`:bigint,`rootActivity`:struct<`activityUrn`" + ":string,`avoData`:struct<`actorUrn`:string,`verbType`:string,`objectUrn`:string,`objectType`:string>>>>>," + "`scores`:struct<`fprScore`:double,`fprUtility`:double,`calibratedFprUtility`:double,`sprScore`:double," + "`adjustedSprScore`:double,`sprUtility`:double>,`position`:int>>"; String actualEscapedType = HiveAvroORCQueryGenerator.escapeHiveType(type); Assert.assertEquals(actualEscapedType, expectedEscapedType); } @Test public void testValidTypeEvolution() throws Exception { // Check a few evolved types Assert.assertTrue(HiveAvroORCQueryGenerator.isTypeEvolved("float", "int")); Assert.assertTrue(HiveAvroORCQueryGenerator.isTypeEvolved("double", "float")); Assert.assertTrue(HiveAvroORCQueryGenerator.isTypeEvolved("string", "varchar")); Assert.assertTrue(HiveAvroORCQueryGenerator.isTypeEvolved("double", "string")); // Check if type is same Assert.assertFalse(HiveAvroORCQueryGenerator.isTypeEvolved("int", "int")); } @Test (expectedExceptions = RuntimeException.class) public void testInvalidTypeEvolution() throws Exception { // Check for in-compatible types HiveAvroORCQueryGenerator.isTypeEvolved("boolean", "int"); } @Test public void testCreateOrUpdateViewDDL() throws Exception { // Check if two queries for Create and Update View have been generated List<String> ddls = HiveAvroORCQueryGenerator.generateCreateOrUpdateViewDDL("db1", "tbl1", "db2" ,"view1", true); Assert.assertEquals(ddls.size(), 2, "Two queries for Create and Update should have been generated"); Assert.assertEquals(ddls.get(0), "CREATE VIEW IF NOT EXISTS `db2`.`view1` AS SELECT * FROM `db1`.`tbl1`"); Assert.assertEquals(ddls.get(1), "ALTER VIEW `db2`.`view1` AS SELECT * FROM `db1`.`tbl1`"); // Check if two queries for Create and Update View have been generated ddls = HiveAvroORCQueryGenerator.generateCreateOrUpdateViewDDL("db1", "tbl1", "db2" ,"view1", false); Assert.assertEquals(ddls.size(), 1, "One query for Create only should have been generated"); Assert.assertEquals(ddls.get(0), "CREATE VIEW IF NOT EXISTS `db2`.`view1` AS SELECT * FROM `db1`.`tbl1`"); } }
8,220
328
<filename>opi_emac_osc_client/lib/buttonsgpio.cpp /** * @file buttonsgpio.cpp * */ /* Copyright (C) 2019-2020 by <NAME> mailto:<EMAIL> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include <stdint.h> #ifndef NDEBUG #include <stdio.h> #endif #include <cassert> #include "buttonsgpio.h" #include "oscclient.h" #include "board/h3_opi_zero.h" #include "h3_gpio.h" #include "debug.h" #define BUTTON(x) ((m_nButtons >> x) & 0x01) #define BUTTON_STATE(x) ((m_nButtons & (1U << x)) == (1U << x)) #define BUTTON0_GPIO GPIO_EXT_13 // PA0 #define BUTTON1_GPIO GPIO_EXT_11 // PA1 #define BUTTON2_GPIO GPIO_EXT_22 // PA2 #define BUTTON3_GPIO GPIO_EXT_15 // PA3 #define INT_MASK ((1U << BUTTON0_GPIO) | (1U << BUTTON1_GPIO) | (1U << BUTTON2_GPIO) | (1U << BUTTON3_GPIO)) #define LED0_GPIO GPIO_EXT_7 // PA6 #define LED1_GPIO GPIO_EXT_12 // PA7 #define LED2_GPIO GPIO_EXT_26 // PA10 #define LED3_GPIO GPIO_EXT_18 // PA18 ButtonsGpio::ButtonsGpio(OscClient *pOscClient) : m_pOscClient(pOscClient), m_nButtons(0) { assert(m_pOscClient != nullptr); } bool ButtonsGpio::Start() { h3_gpio_fsel(LED0_GPIO, GPIO_FSEL_OUTPUT); h3_gpio_fsel(LED1_GPIO, GPIO_FSEL_OUTPUT); h3_gpio_fsel(LED2_GPIO, GPIO_FSEL_OUTPUT); h3_gpio_fsel(LED3_GPIO, GPIO_FSEL_OUTPUT); h3_gpio_fsel(BUTTON0_GPIO, GPIO_FSEL_EINT); h3_gpio_fsel(BUTTON1_GPIO, GPIO_FSEL_EINT); h3_gpio_fsel(BUTTON2_GPIO, GPIO_FSEL_EINT); h3_gpio_fsel(BUTTON3_GPIO, GPIO_FSEL_EINT); h3_gpio_pud(BUTTON0_GPIO, GPIO_PULL_UP); h3_gpio_pud(BUTTON1_GPIO, GPIO_PULL_UP); h3_gpio_pud(BUTTON2_GPIO, GPIO_PULL_UP); h3_gpio_pud(BUTTON3_GPIO, GPIO_PULL_UP); h3_gpio_int_cfg(BUTTON0_GPIO, GPIO_INT_CFG_NEG_EDGE); h3_gpio_int_cfg(BUTTON1_GPIO, GPIO_INT_CFG_NEG_EDGE); h3_gpio_int_cfg(BUTTON2_GPIO, GPIO_INT_CFG_NEG_EDGE); h3_gpio_int_cfg(BUTTON3_GPIO, GPIO_INT_CFG_NEG_EDGE); H3_PIO_PA_INT->CTL |= INT_MASK; H3_PIO_PA_INT->STA = INT_MASK; H3_PIO_PA_INT->DEB = (0x0 << 0) | (0x7 << 4); #ifndef NDEBUG printf("H3_PIO_PORTA->PUL0=%p ", H3_PIO_PORTA->PUL0); debug_print_bits(H3_PIO_PORTA->PUL0); printf("H3_PIO_PA_INT->CFG0=%p ", H3_PIO_PA_INT->CFG0); debug_print_bits(H3_PIO_PA_INT->CFG0); printf("H3_PIO_PA_INT->CTL=%p ", H3_PIO_PA_INT->CTL); debug_print_bits(H3_PIO_PA_INT->CTL); printf("H3_PIO_PA_INT->DEB=%p ", H3_PIO_PA_INT->DEB); debug_print_bits(H3_PIO_PA_INT->DEB); #endif m_nButtonsCount = 4; return true; } void ButtonsGpio::Stop() { h3_gpio_fsel(BUTTON0_GPIO, GPIO_FSEL_DISABLE); h3_gpio_fsel(BUTTON1_GPIO, GPIO_FSEL_DISABLE); h3_gpio_fsel(BUTTON2_GPIO, GPIO_FSEL_DISABLE); h3_gpio_fsel(BUTTON3_GPIO, GPIO_FSEL_DISABLE); h3_gpio_fsel(LED0_GPIO, GPIO_FSEL_DISABLE); h3_gpio_fsel(LED1_GPIO, GPIO_FSEL_DISABLE); h3_gpio_fsel(LED2_GPIO, GPIO_FSEL_DISABLE); h3_gpio_fsel(LED3_GPIO, GPIO_FSEL_DISABLE); } void ButtonsGpio::Run() { m_nButtons = H3_PIO_PA_INT->STA & INT_MASK; if (__builtin_expect((m_nButtons != 0), 0)) { H3_PIO_PA_INT->STA = INT_MASK; DEBUG_PRINTF("%d-%d-%d-%d", BUTTON(BUTTON0_GPIO), BUTTON(BUTTON1_GPIO), BUTTON(BUTTON2_GPIO), BUTTON(BUTTON3_GPIO)); if (BUTTON_STATE(BUTTON0_GPIO)) { m_pOscClient->SendCmd(0); DEBUG_PUTS(""); } if (BUTTON_STATE(BUTTON1_GPIO)) { m_pOscClient->SendCmd(1); DEBUG_PUTS(""); } if (BUTTON_STATE(BUTTON2_GPIO)) { m_pOscClient->SendCmd(2); DEBUG_PUTS(""); } if (BUTTON_STATE(BUTTON3_GPIO)) { m_pOscClient->SendCmd(3); DEBUG_PUTS(""); } } } void ButtonsGpio::SetLed(uint8_t nLed, bool bOn) { DEBUG_PRINTF("led%d %s", nLed, bOn ? "On" : "Off"); switch (nLed) { case 0: bOn ? h3_gpio_set(LED0_GPIO) : h3_gpio_clr(LED0_GPIO); break; case 1: bOn ? h3_gpio_set(LED1_GPIO) : h3_gpio_clr(LED1_GPIO); break; case 2: bOn ? h3_gpio_set(LED2_GPIO) : h3_gpio_clr(LED2_GPIO); break; case 3: bOn ? h3_gpio_set(LED3_GPIO) : h3_gpio_clr(LED3_GPIO); break; default: break; } }
2,361
8,772
package org.apereo.cas.configuration.model.support.saml.idp.metadata; import org.apereo.cas.configuration.model.core.util.EncryptionJwtSigningJwtCryptographyProperties; import org.apereo.cas.configuration.model.support.couchdb.BaseCouchDbProperties; import org.apereo.cas.configuration.support.RequiresModule; import org.apereo.cas.util.crypto.CipherExecutor; import com.fasterxml.jackson.annotation.JsonFilter; import lombok.Getter; import lombok.Setter; import lombok.experimental.Accessors; import org.springframework.boot.context.properties.NestedConfigurationProperty; /** * Configuration properties class for saml metadata based on CouchDB. * * @author <NAME> * @since 6.0.0 */ @RequiresModule(name = "cas-server-support-saml-idp-metadata-couchdb") @Getter @Setter @Accessors(chain = true) @JsonFilter("CouchDbSamlMetadataProperties") public class CouchDbSamlMetadataProperties extends BaseCouchDbProperties { private static final long serialVersionUID = 1673956475847790139L; /** * Whether identity provider metadata artifacts * are expected to be found in the database. */ private boolean idpMetadataEnabled; /** * Crypto settings that sign/encrypt the metadata records. */ @NestedConfigurationProperty private EncryptionJwtSigningJwtCryptographyProperties crypto = new EncryptionJwtSigningJwtCryptographyProperties(); public CouchDbSamlMetadataProperties() { setDbName("saml_metadata"); crypto.getEncryption().setKeySize(CipherExecutor.DEFAULT_STRINGABLE_ENCRYPTION_KEY_SIZE); crypto.getSigning().setKeySize(CipherExecutor.DEFAULT_STRINGABLE_SIGNING_KEY_SIZE); } }
564