max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
5,169
{ "name": "GDQrCode", "version": "1.0.0", "summary": "基于 CoreImage 生成一个自定义二维码(颜色 背景色 alpha logo)", "homepage": "https://github.com/gdLin600/GDQrCode", "license": "MIT", "authors": { "gdLin600": "<EMAIL>" }, "platforms": { "ios": "7.0" }, "source": { "git": "https://github.com/gdLin600/GDQrCode.git", "tag": "1.0.0" }, "source_files": "GDQrCode/*.{h,m}", "frameworks": [ "UIKit", "AVFoundation" ], "requires_arc": true }
259
1,319
<reponame>zhangyimi/Research import pandas as pd import numpy as np import random from collections import defaultdict random.seed(42) class Dataset(object): def __init__(self, data_tag): self.data_path = "../process_data/" self.utterance = self.file_loader(self.data_path + data_tag + "_utterance.txt") self.goal_type = self.file_loader(self.data_path + data_tag + "_type.txt") self.goal_entity = self.file_loader(self.data_path + data_tag + "_entity.txt") self.bot = self.file_loader(self.data_path + data_tag + "_bot.txt") self.label = self.file_loader(self.data_path + data_tag + "_label.txt") self.goal_type_graph = np.load(self.data_path + "graph_type_graph.npy") self.goal_entity_graph = np.load(self.data_path + "graph_entity_graph.npy") self.goal_type_dict = self.get_neighbour_dict(self.goal_type_graph) self.goal_entity_dict = self.get_neighbour_dict(self.goal_entity_graph) def binary_task_data(self): binary_utterance = list() binary_goal_type = list() bianry_label = list() for idx in range(len(self.utterance)): line_len = len(self.utterance[idx]) for jdx in range(line_len): if self.bot[idx][jdx] == 1: binary_utterance.append(self.utterance[idx][jdx - 1]) binary_goal_type.append(self.goal_type[idx][jdx - 1]) bianry_label.append(self.label[idx][jdx]) return binary_utterance, binary_goal_type, bianry_label def next_goal_data(self, undersample=False): binary_goal_type = list() binary_final_goal_type = list() binary_goal_type_label = list() binary_goal_type_idx = list() binary_goal_entity = list() binary_final_goal_entity = list() binary_goal_entity_label = list() binary_goal_entity_idx = list() for idx in range(len(self.goal_type)): line_len = len(self.goal_type[idx]) for jdx in range(line_len): if self.bot[idx][jdx] == 1: pre_type_seq = self.goal_type[idx][:jdx] pre_entity_seq = self.goal_entity[idx][:jdx] if len(pre_type_seq) == 0 or len(pre_entity_seq) == 0: continue pre_type_seq, pre_entity_seq = remove_repeat(pre_type_seq, pre_entity_seq) for nb in self.goal_type_dict[pre_type_seq[-1]]: binary_goal_type.append(pre_type_seq + [nb]) binary_goal_type_idx.append(idx) binary_final_goal_type.append(self.goal_type[idx][-1]) if nb == self.goal_type[idx][jdx]: binary_goal_type_label.append(1) else: binary_goal_type_label.append(0) cnt = 0 for nb in self.goal_entity_dict[pre_entity_seq[-1]]: if nb == self.goal_entity[idx][jdx]: binary_goal_entity.append(pre_entity_seq + [nb]) binary_goal_entity_label.append(1) cnt += 1 else: if cnt > 10 and random.random() > 0.2: continue binary_goal_entity.append(pre_entity_seq + [nb]) binary_goal_entity_label.append(0) cnt += 1 binary_goal_entity_idx.append(idx) binary_final_goal_entity.append(self.goal_entity[idx][-1]) return binary_goal_type, binary_goal_type_label, binary_goal_type_idx, binary_goal_entity, binary_goal_entity_label, binary_goal_entity_idx, binary_final_goal_type, binary_final_goal_entity def get_neighbour_dict(self, graph): graph_dict = defaultdict(list) for idx, line in enumerate(graph): for jdx, num in enumerate(line): if num == 1: graph_dict[idx].append(jdx) return graph_dict def file_loader(self, file_path): data = None with open(file_path, "r") as f: data = eval(f.read()) f.close() return data def remove_repeat(goal_seq, kg_seq): assert len(goal_seq) == len(kg_seq) new_goal_seq, new_kg_seq = list(), list() for idx, (a, b) in enumerate(zip(goal_seq, kg_seq)): if idx > 0: if a == goal_seq[idx - 1] and b == kg_seq[idx - 1]: continue else: new_goal_seq.append(a) new_kg_seq.append(b) else: new_goal_seq.append(a) new_kg_seq.append(b) return new_goal_seq, new_kg_seq def file_saver(file_path, obj): with open(file_path, "w") as f: f.write(str(obj)) f.close() def get_data(data_tag, undersample=False): data = Dataset(data_tag) binary_utterance, binary_goal_type, binary_label = data.binary_task_data() next_goal_type, next_goal_type_label, next_goal_type_idx, next_goal_entity, next_goal_entity_label, next_goal_entity_idx, final_goal_type, final_goal_entity = data.next_goal_data(undersample=undersample) print("Binary Jump Classification...") print("Sample Numebr: %d, Jump Number: %d, Jump Rate: %.2f" % ( len(binary_utterance), np.sum(binary_label), float(np.sum(binary_label))/len(binary_utterance))) print("Next Goal Type Prediction...") print("Sample Numebr: %d, True Number: %d, True Rate: %.2f" % ( len(next_goal_type), np.sum(next_goal_type_label), float(np.sum(next_goal_type_label))/len(next_goal_type))) print("Next Goal Entity Prediction...") print("Sample Numebr: %d, True Number: %d, True Rate: %.2f\n" % ( len(next_goal_entity), np.sum(next_goal_entity_label), float(np.sum(next_goal_entity_label))/len(next_goal_entity))) save_path = "../train_data/" file_saver(save_path + data_tag + "_binary_utterance.txt", binary_utterance) file_saver(save_path + data_tag + "_binary_goal_type.txt", binary_goal_type) file_saver(save_path + data_tag + "_binary_label.txt", binary_label) file_saver(save_path + data_tag + "_next_goal_type.txt", next_goal_type) file_saver(save_path + data_tag + "_next_goal_type_idx.txt", next_goal_type_idx) file_saver(save_path + data_tag + "_next_goal_type_label.txt", next_goal_type_label) file_saver(save_path + data_tag + "_next_goal_entity.txt", next_goal_entity) file_saver(save_path + data_tag + "_next_goal_entity_idx.txt", next_goal_entity_idx) file_saver(save_path + data_tag + "_next_goal_entity_label.txt", next_goal_entity_label) file_saver(save_path + data_tag + "_final_goal_type.txt", final_goal_type) file_saver(save_path + data_tag + "_final_goal_entity.txt", final_goal_entity) if __name__ == "__main__": train_data = get_data(data_tag="train", undersample=True) val_data = get_data(data_tag="val")
3,600
2,164
<gh_stars>1000+ // // LOTAnimationCache.h // Lottie // // Created by <NAME> on 1/9/17. // Copyright © 2017 <NAME>. All rights reserved. // #import <Foundation/Foundation.h> NS_ASSUME_NONNULL_BEGIN @class LOTComposition; @interface LOTAnimationCache : NSObject /// Global Cache + (instancetype)sharedCache; /// Adds animation to the cache - (void)addAnimation:(LOTComposition *)animation forKey:(NSString *)key; /// Returns animation from cache. - (LOTComposition * _Nullable)animationForKey:(NSString *)key; /// Removes a specific animation from the cache - (void)removeAnimationForKey:(NSString *)key; /// Clears Everything from the Cache - (void)clearCache; /// Disables Caching Animation Model Objects - (void)disableCaching; @end NS_ASSUME_NONNULL_END
255
1,682
<gh_stars>1000+ /* Copyright (c) 2021 LinkedIn Corp. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.linkedin.r2.transport.http.client; import com.linkedin.common.callback.Callback; import com.linkedin.common.callback.FutureCallback; import com.linkedin.common.util.None; import com.linkedin.util.clock.Clock; import com.linkedin.util.clock.SettableClock; import com.linkedin.util.clock.SystemClock; import java.time.temporal.ChronoUnit; import java.util.ArrayList; import java.util.NoSuchElementException; import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CyclicBarrier; import java.util.stream.IntStream; import org.junit.Assert; import org.testng.annotations.Test; public class TestEvictingCircularBuffer { private static final int TEST_TIMEOUT = 3000; private static final int TEST_CAPACITY = 5; private static final int TEST_TTL = 5; private static final ChronoUnit TEST_TTL_UNIT = ChronoUnit.SECONDS; private static final SettableClock TEST_CLOCK = new SettableClock(); @Test(timeOut = TEST_TIMEOUT) public void testGettersAfterInstantiateSimple() { EvictingCircularBuffer buffer = new EvictingCircularBuffer(TEST_CAPACITY, TEST_TTL, TEST_TTL_UNIT, SystemClock.instance()); Assert.assertEquals(buffer.getCapacity(), TEST_CAPACITY); Assert.assertEquals(buffer.getTtl().getSeconds(), TEST_TTL); } @Test(timeOut = TEST_TIMEOUT) public void testCreatePutGetRepeatInOrder() { Callback<None> callback = new FutureCallback<>(); Callback<None> callbackAlso = new FutureCallback<>(); EvictingCircularBuffer buffer = getBuffer(); buffer.put(callback); Assert.assertSame(buffer.get(), callback); Assert.assertSame(buffer.get(), callback); buffer.put(callbackAlso); Assert.assertSame(buffer.get(), callbackAlso); Assert.assertSame(buffer.get(), callback); Assert.assertSame(buffer.get(), callbackAlso); } @Test(timeOut = TEST_TIMEOUT) public void testTtlPurge() { Callback<None> callback = new FutureCallback<>(); EvictingCircularBuffer buffer = getBuffer(); buffer.put(callback); Assert.assertSame(buffer.get(), callback); TEST_CLOCK.addDuration(5001); try { buffer.get(); } catch (NoSuchElementException ex) { // get } } @Test(timeOut = TEST_TIMEOUT) public void testParallelPutGet() { CyclicBarrier floodgate = new CyclicBarrier(9); Callback<None> callback = new FutureCallback<>(); EvictingCircularBuffer buffer = getBuffer(); buffer.put(callback); for (int i = 0; i < 4; i++) { new Thread(() -> { try { floodgate.await(); } catch (InterruptedException | BrokenBarrierException ignored) {} buffer.put(new FutureCallback<>()); }).start(); } for (int i = 0; i < 5; i++) { new Thread(() -> { try { floodgate.await(); } catch (InterruptedException | BrokenBarrierException ignored) {} buffer.get(); }).start(); } ArrayList<Callback<None>> results = new ArrayList<>(); IntStream.range(0, 5).forEach(x -> results.add(buffer.get())); Assert.assertTrue(results.contains(callback)); } @Test(timeOut = TEST_TIMEOUT) public void testSetCapacityAfterCreate() { EvictingCircularBuffer buffer = getBuffer(); buffer.put(new FutureCallback<>()); buffer.setCapacity(9001); try { buffer.get(); } catch (NoSuchElementException ex) { // buffer clears after resize by design } } @Test(timeOut = TEST_TIMEOUT) public void testSetTtlAfterCreate() { EvictingCircularBuffer buffer = getBuffer(); Callback<None> callback = new FutureCallback<>(); buffer.put(callback); buffer.setTtl(9001, ChronoUnit.MILLIS); TEST_CLOCK.addDuration(8000); Assert.assertSame(buffer.get(), callback); TEST_CLOCK.addDuration(1002); try { buffer.get(); } catch (NoSuchElementException ex) { // expired ttl } } @Test(timeOut = TEST_TIMEOUT) public void testIllegalTtlAndCapacityArguments() { EvictingCircularBuffer buffer = getBuffer(); try { buffer.setTtl(0, TEST_TTL_UNIT); } catch (IllegalArgumentException ex) { // TTL can't be less than 1. } try { buffer.setTtl(1, null); } catch (IllegalArgumentException ex) { // TTL unit can't be null } try { buffer.setCapacity(0); } catch (IllegalArgumentException ex) { // we can always do puts on EvictingCircularBuffer, so capacity should never be less than 1. } } public static EvictingCircularBuffer getBuffer() { return getBuffer(TEST_CLOCK); } public static EvictingCircularBuffer getBuffer(Clock clock) { return new EvictingCircularBuffer(TEST_CAPACITY, TEST_TTL, TEST_TTL_UNIT, clock); } }
2,028
30,785
<filename>jadx-plugins/jadx-plugins-api/src/main/java/jadx/api/plugins/input/data/IMethodHandle.java package jadx.api.plugins.input.data; public interface IMethodHandle { MethodHandleType getType(); IFieldRef getFieldRef(); IMethodRef getMethodRef(); void load(); }
96
347
<filename>backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/IPTablesDeprecationNotifier.java package org.ovirt.engine.core.bll; import java.util.concurrent.TimeUnit; import javax.annotation.PostConstruct; import javax.enterprise.concurrent.ManagedScheduledExecutorService; import javax.inject.Inject; import javax.inject.Singleton; import org.ovirt.engine.core.common.AuditLogType; import org.ovirt.engine.core.common.BackendService; import org.ovirt.engine.core.common.businessentities.Cluster; import org.ovirt.engine.core.common.network.FirewallType; import org.ovirt.engine.core.compat.Version; import org.ovirt.engine.core.dal.dbbroker.auditloghandling.AuditLogDirector; import org.ovirt.engine.core.dal.dbbroker.auditloghandling.AuditLogable; import org.ovirt.engine.core.dal.dbbroker.auditloghandling.AuditLogableImpl; import org.ovirt.engine.core.dao.ClusterDao; import org.ovirt.engine.core.utils.threadpool.ThreadPools; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @Singleton public class IPTablesDeprecationNotifier implements BackendService { private static final Logger log = LoggerFactory.getLogger(IPTablesDeprecationNotifier.class); @Inject private ClusterDao clusterDao; @Inject private AuditLogDirector auditLogDirector; @Inject @ThreadPools(ThreadPools.ThreadPoolType.EngineScheduledThreadPool) private ManagedScheduledExecutorService executor; @PostConstruct private void init() { log.info("Start initializing {}", getClass().getSimpleName()); executor.scheduleWithFixedDelay(this::checkClustersWithIPTables, 0, 30, TimeUnit.DAYS); log.info("Finished initializing {}", getClass().getSimpleName()); } private void checkClustersWithIPTables() { clusterDao.getAll().stream() .filter(cluster -> cluster.getFirewallType() == FirewallType.IPTABLES) .forEach(cluster -> logWarning(cluster)); } private void logWarning(Cluster cluster) { AuditLogable auditLog = new AuditLogableImpl(); auditLog.setClusterId(cluster.getId()); auditLog.setClusterName(cluster.getName()); auditLog.addCustomValue("DeprecatingVersion", Version.v4_2.toString()); // we cannot add 4.3 to Version enum until 4.2 release is branched auditLog.addCustomValue("RemovingVersion", "4.3"); auditLogDirector.log(auditLog, AuditLogType.DEPRECATED_IPTABLES_FIREWALL); } }
969
777
/* * Artificial Intelligence for Humans * Volume 1: Fundamental Algorithms * Java Version * http://www.aifh.org * http://www.jeffheaton.com * * Code repository: * https://github.com/jeffheaton/aifh * Copyright 2013 by <NAME> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * For more information on Heaton Research copyrights, licenses * and trademarks visit: * http://www.heatonresearch.com/copyright */ package com.heatonresearch.aifh.kmeans; import com.heatonresearch.aifh.general.data.BasicData; import org.junit.Test; import static junit.framework.Assert.assertEquals; /** * Test the cluster class. */ public class TestCluster { @Test public void testDimensions() { final Cluster cluster = new Cluster(3); assertEquals(true, cluster.toString().length() > 0); assertEquals(3, cluster.getDimensions()); } @Test public void testCenter() { final Cluster cluster = new Cluster(3); final double[] ob1 = {2.0, 10.0, 100.0}; final double[] ob2 = {4.0, 20.0, 200.0}; final double[] ob3 = {6.0, 30.0, 300.0}; cluster.getObservations().add(new BasicData(ob1)); cluster.getObservations().add(new BasicData(ob2)); cluster.getObservations().add(new BasicData(ob3)); assertEquals(3, cluster.getObservations().size()); cluster.calculateCenter(); assertEquals(4.0, cluster.getCenter()[0], 0.00001); assertEquals(20.0, cluster.getCenter()[1], 0.00001); assertEquals(200.0, cluster.getCenter()[2], 0.00001); } }
736
679
/************************************************************** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * *************************************************************/ #ifndef SC_UNDOOLK_HXX #define SC_UNDOOLK_HXX #include <tools/solar.h> class SdrUndoAction; class ScDocument; SdrUndoAction* GetSdrUndoAction ( ScDocument* pDoc ); void DoSdrUndoAction ( SdrUndoAction* pUndoAction, ScDocument* pDoc ); void RedoSdrUndoAction ( SdrUndoAction* pUndoAction ); void DeleteSdrUndoAction ( SdrUndoAction* pUndoAction ); void EnableDrawAdjust ( ScDocument* pDoc, sal_Bool bEnable ); #endif
394
790
from django.contrib.gis.db import models class City3D(models.Model): name = models.CharField(max_length=30) point = models.PointField(dim=3) objects = models.GeoManager() def __unicode__(self): return self.name class Interstate2D(models.Model): name = models.CharField(max_length=30) line = models.LineStringField(srid=4269) objects = models.GeoManager() def __unicode__(self): return self.name class Interstate3D(models.Model): name = models.CharField(max_length=30) line = models.LineStringField(dim=3, srid=4269) objects = models.GeoManager() def __unicode__(self): return self.name class InterstateProj2D(models.Model): name = models.CharField(max_length=30) line = models.LineStringField(srid=32140) objects = models.GeoManager() def __unicode__(self): return self.name class InterstateProj3D(models.Model): name = models.CharField(max_length=30) line = models.LineStringField(dim=3, srid=32140) objects = models.GeoManager() def __unicode__(self): return self.name class Polygon2D(models.Model): name = models.CharField(max_length=30) poly = models.PolygonField(srid=32140) objects = models.GeoManager() def __unicode__(self): return self.name class Polygon3D(models.Model): name = models.CharField(max_length=30) poly = models.PolygonField(dim=3, srid=32140) objects = models.GeoManager() def __unicode__(self): return self.name class Point2D(models.Model): point = models.PointField() objects = models.GeoManager() class Point3D(models.Model): point = models.PointField(dim=3) objects = models.GeoManager() class MultiPoint3D(models.Model): mpoint = models.MultiPointField(dim=3) objects = models.GeoManager()
726
839
<reponame>AnEmortalKid/cxf /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.cxf.systest.jaxrs; import java.io.InputStream; import java.net.URL; import java.net.URLConnection; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; import javax.ws.rs.core.GenericType; import javax.ws.rs.core.MediaType; import com.fasterxml.jackson.jaxrs.json.JacksonJsonProvider; import org.apache.cxf.BusFactory; import org.apache.cxf.helpers.IOUtils; import org.apache.cxf.jaxrs.client.JAXRSClientFactory; import org.apache.cxf.jaxrs.client.WebClient; import org.apache.cxf.jaxrs.ext.multipart.Attachment; import org.apache.cxf.jaxrs.model.AbstractResourceInfo; import org.apache.cxf.jaxrs.provider.JAXBElementProvider; import org.apache.cxf.testutil.common.AbstractBusClientServerTestBase; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; public class JAXRSClientServerResourceJacksonSpringProviderTest extends AbstractBusClientServerTestBase { public static final String PORT = BookServerResourceJacksonSpringProviders.PORT; @BeforeClass public static void startServers() throws Exception { AbstractResourceInfo.clearAllMaps(); assertTrue("server did not launch correctly", launchServer(BookServerResourceJacksonSpringProviders.class, true)); createStaticBus(); BusFactory.getDefaultBus().setProperty("skip.default.json.provider.registration", true); } @AfterClass public static void afterClass() throws Exception { BusFactory.getDefaultBus().getProperties().remove("skip.default.json.provider.registration"); } @Test public void testGetBook123() throws Exception { String endpointAddress = "http://localhost:" + PORT + "/webapp/store1/bookstore/books/123"; URL url = new URL(endpointAddress); URLConnection connect = url.openConnection(); connect.addRequestProperty("Accept", "application/json"); InputStream in = connect.getInputStream(); assertNotNull(in); assertEquals("Jackson output not correct", "{\"class\":\"org.apache.cxf.systest.jaxrs.Book\",\"name\":\"CXF in Action\",\"id\":123}", getStringFromInputStream(in).trim()); } @Test public void testGetSuperBookProxy() throws Exception { String endpointAddress = "http://localhost:" + PORT + "/webapp/store2"; BookStoreSpring proxy = JAXRSClientFactory.create(endpointAddress, BookStoreSpring.class, Collections.singletonList(new JacksonJsonProvider())); SuperBook book = proxy.getSuperBookJson(); assertEquals(999L, book.getId()); } @Test public void testMultipart() throws Exception { String endpointAddress = "http://localhost:" + PORT + "/webapp/multipart"; MultipartStore proxy = JAXRSClientFactory.create(endpointAddress, MultipartStore.class, Collections.singletonList(new JacksonJsonProvider())); Book json = new Book("json", 1L); InputStream is1 = getClass().getResourceAsStream("/org/apache/cxf/systest/jaxrs/resources/java.jpg"); Map<String, Object> attachments = proxy.addBookJsonImageStream(json, is1); assertEquals(2, attachments.size()); Book json2 = ((Attachment)attachments.get("application/json")).getObject(Book.class); assertEquals("json", json2.getName()); assertEquals(1L, json2.getId()); InputStream is2 = ((Attachment)attachments.get("application/octet-stream")).getObject(InputStream.class); byte[] image1 = IOUtils.readBytesFromStream( getClass().getResourceAsStream("/org/apache/cxf/systest/jaxrs/resources/java.jpg")); byte[] image2 = IOUtils.readBytesFromStream(is2); assertArrayEquals(image1, image2); } @Test public void testGetSuperBookCollectionProxy() throws Exception { String endpointAddress = "http://localhost:" + PORT + "/webapp/store2"; BookStoreSpring proxy = JAXRSClientFactory.create(endpointAddress, BookStoreSpring.class, Collections.singletonList(new JacksonJsonProvider())); List<SuperBook> books = proxy.getSuperBookCollectionJson(); assertEquals(999L, books.get(0).getId()); } @Test public void testEchoSuperBookCollectionProxy() throws Exception { String endpointAddress = "http://localhost:" + PORT + "/webapp/store2"; BookStoreSpring proxy = JAXRSClientFactory.create(endpointAddress, BookStoreSpring.class, Collections.singletonList(new JacksonJsonProvider())); WebClient.getConfig(proxy).getHttpConduit().getClient().setReceiveTimeout(10000000L); List<SuperBook> books = proxy.echoSuperBookCollectionJson(Collections.singletonList(new SuperBook("Super", 124L, true))); assertEquals(124L, books.get(0).getId()); assertTrue(books.get(0).isSuperBook()); } @Test public void testEchoSuperBookProxy() throws Exception { String endpointAddress = "http://localhost:" + PORT + "/webapp/store2"; BookStoreSpring proxy = JAXRSClientFactory.create(endpointAddress, BookStoreSpring.class, Collections.singletonList(new JacksonJsonProvider())); SuperBook book = proxy.echoSuperBookJson(new SuperBook("Super", 124L, true)); assertEquals(124L, book.getId()); assertTrue(book.isSuperBook()); } @Test public void testEchoGenericSuperBookCollectionProxy() throws Exception { String endpointAddress = "http://localhost:" + PORT + "/webapp/custombus/genericstore"; GenericBookStoreSpring proxy = JAXRSClientFactory.create(endpointAddress, GenericBookStoreSpring.class, Collections.singletonList(new JacksonJsonProvider())); List<SuperBook> books = proxy.echoSuperBookCollectionJson(Collections.singletonList(new SuperBook("Super", 124L, true))); assertEquals(124L, books.get(0).getId()); assertTrue(books.get(0).isSuperBook()); } @Test public void testEchoGenericSuperBookProxy() throws Exception { String endpointAddress = "http://localhost:" + PORT + "/webapp/custombus/genericstore"; GenericBookStoreSpring proxy = JAXRSClientFactory.create(endpointAddress, GenericBookStoreSpring.class, Collections.singletonList(new JacksonJsonProvider())); WebClient.getConfig(proxy).getHttpConduit().getClient().setReceiveTimeout(1000000000L); SuperBook book = proxy.echoSuperBookJson(new SuperBook("Super", 124L, true)); assertEquals(124L, book.getId()); assertTrue(book.isSuperBook()); } @Test public void testGetGenericSuperBookInt1() throws Exception { String endpointAddress = "http://localhost:" + PORT + "/webapp/genericstoreInt1/int/books/superbook"; WebClient wc = WebClient.create(endpointAddress, Collections.singletonList(new JacksonJsonProvider())); WebClient.getConfig(wc).getHttpConduit().getClient().setReceiveTimeout(1000000000L); GenericType<List<SuperBook>> genericResponseType = new GenericType<List<SuperBook>>() { }; List<SuperBook> books = wc.get(genericResponseType); assertEquals(1, books.size()); assertEquals(111L, books.get(0).getId()); } @Test public void testGetGenericSuperBookInt2() throws Exception { String endpointAddress = "http://localhost:" + PORT + "/webapp/genericstoreInt2"; GenericBookServiceInterface proxy = JAXRSClientFactory.create(endpointAddress, GenericBookServiceInterface.class, Collections.singletonList(new JacksonJsonProvider())); WebClient.getConfig(proxy).getHttpConduit().getClient().setReceiveTimeout(1000000000L); List<SuperBook> books = proxy.getSuperBook(); assertEquals(1, books.size()); assertEquals(111L, books.get(0).getId()); } @Test public void testEchoGenericSuperBookProxy2Json() throws Exception { String endpointAddress = "http://localhost:" + PORT + "/webapp/genericstore2"; GenericBookStoreSpring2 proxy = JAXRSClientFactory.create(endpointAddress, GenericBookStoreSpring2.class, Collections.singletonList(new JacksonJsonProvider())); WebClient.getConfig(proxy).getHttpConduit().getClient().setReceiveTimeout(1000000000L); WebClient.client(proxy).type("application/json").accept("application/json"); SuperBook book = proxy.echoSuperBook(new SuperBook("Super", 124L, true)); assertEquals(124L, book.getId()); assertTrue(book.isSuperBook()); } @Test public void testEchoGenericSuperBookProxy2JsonType() throws Exception { String endpointAddress = "http://localhost:" + PORT + "/webapp/genericstore2type"; GenericBookStoreSpring2 proxy = JAXRSClientFactory.create(endpointAddress, GenericBookStoreSpring2.class, Collections.singletonList(new JacksonJsonProvider())); WebClient.getConfig(proxy).getHttpConduit().getClient().setReceiveTimeout(1000000000L); WebClient.client(proxy).type("application/json").accept("application/json"); SuperBook2 book = proxy.echoSuperBookType(new SuperBook2("Super", 124L, true)); assertEquals(124L, book.getId()); assertTrue(book.isSuperBook()); } @Test public void testEchoGenericSuperBookProxy2Xml() throws Exception { String endpointAddress = "http://localhost:" + PORT + "/webapp/genericstore2"; JAXBElementProvider<Object> jaxbProvider = new JAXBElementProvider<>(); jaxbProvider.setXmlRootAsJaxbElement(true); jaxbProvider.setMarshallAsJaxbElement(true); GenericBookStoreSpring2 proxy = JAXRSClientFactory.create(endpointAddress, GenericBookStoreSpring2.class, Collections.singletonList(jaxbProvider)); WebClient.getConfig(proxy).getHttpConduit().getClient().setReceiveTimeout(1000000000L); WebClient.client(proxy).type("application/xml").accept("application/xml"); SuperBook book = proxy.echoSuperBook(new SuperBook("Super", 124L, true)); assertEquals(124L, book.getId()); assertTrue(book.isSuperBook()); } @Test public void testEchoGenericSuperBookProxy2XmlType() throws Exception { String endpointAddress = "http://localhost:" + PORT + "/webapp/genericstore2type"; JAXBElementProvider<Object> jaxbProvider = new JAXBElementProvider<>(); jaxbProvider.setMarshallAsJaxbElement(true); jaxbProvider.setUnmarshallAsJaxbElement(true); GenericBookStoreSpring2 proxy = JAXRSClientFactory.create(endpointAddress, GenericBookStoreSpring2.class, Collections.singletonList(jaxbProvider)); WebClient.getConfig(proxy).getHttpConduit().getClient().setReceiveTimeout(1000000000L); WebClient.client(proxy).type("application/xml").accept("application/xml"); SuperBook2 book = proxy.echoSuperBookType(new SuperBook2("Super", 124L, true)); assertEquals(124L, book.getId()); assertTrue(book.isSuperBook()); } @Test public void testEchoGenericSuperBookCollectionProxy2Json() throws Exception { String endpointAddress = "http://localhost:" + PORT + "/webapp/genericstore2"; GenericBookStoreSpring2 proxy = JAXRSClientFactory.create(endpointAddress, GenericBookStoreSpring2.class, Collections.singletonList(new JacksonJsonProvider())); WebClient.client(proxy).type("application/json").accept("application/json"); List<SuperBook> books = proxy.echoSuperBookCollection(Collections.singletonList(new SuperBook("Super", 124L, true))); assertEquals(124L, books.get(0).getId()); assertTrue(books.get(0).isSuperBook()); } @Test public void testEchoGenericSuperBookCollectionProxy2JsonType() throws Exception { String endpointAddress = "http://localhost:" + PORT + "/webapp/genericstore2type"; GenericBookStoreSpring2 proxy = JAXRSClientFactory.create(endpointAddress, GenericBookStoreSpring2.class, Collections.singletonList(new JacksonJsonProvider())); WebClient.client(proxy).type("application/json").accept("application/json"); List<SuperBook2> books = proxy.echoSuperBookTypeCollection(Collections.singletonList(new SuperBook2("Super", 124L, true))); assertEquals(124L, books.get(0).getId()); assertTrue(books.get(0).isSuperBook()); } @Test public void testEchoGenericSuperBookCollectionProxy2Xml() throws Exception { String endpointAddress = "http://localhost:" + PORT + "/webapp/genericstore2"; JAXBElementProvider<Object> jaxbProvider = new JAXBElementProvider<>(); jaxbProvider.setMarshallAsJaxbElement(true); jaxbProvider.setUnmarshallAsJaxbElement(true); GenericBookStoreSpring2 proxy = JAXRSClientFactory.create(endpointAddress, GenericBookStoreSpring2.class, Collections.singletonList(jaxbProvider)); WebClient.client(proxy).type("application/xml").accept("application/xml"); WebClient.getConfig(proxy).getHttpConduit().getClient().setReceiveTimeout(1000000000L); List<SuperBook> books = proxy.echoSuperBookCollection(Collections.singletonList(new SuperBook("Super", 124L, true))); assertEquals(124L, books.get(0).getId()); assertTrue(books.get(0).isSuperBook()); } @Test public void testEchoGenericSuperBookCollectionProxy2XmlType() throws Exception { String endpointAddress = "http://localhost:" + PORT + "/webapp/genericstore2type"; JAXBElementProvider<Object> jaxbProvider = new JAXBElementProvider<>(); jaxbProvider.setMarshallAsJaxbElement(true); jaxbProvider.setUnmarshallAsJaxbElement(true); GenericBookStoreSpring2 proxy = JAXRSClientFactory.create(endpointAddress, GenericBookStoreSpring2.class, Collections.singletonList(jaxbProvider)); WebClient.client(proxy).type("application/xml").accept("application/xml"); WebClient.getConfig(proxy).getHttpConduit().getClient().setReceiveTimeout(1000000000L); List<SuperBook2> books = proxy.echoSuperBookTypeCollection(Collections.singletonList(new SuperBook2("Super", 124L, true))); assertEquals(124L, books.get(0).getId()); assertTrue(books.get(0).isSuperBook()); } @Test public void testEchoGenericSuperBookWebClient() throws Exception { String endpointAddress = "http://localhost:" + PORT + "/webapp/custombus/genericstore/books/superbook"; WebClient wc = WebClient.create(endpointAddress, Collections.singletonList(new JacksonJsonProvider())); wc.accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON); SuperBook book = wc.post(new SuperBook("Super", 124L, true), SuperBook.class); assertEquals(124L, book.getId()); assertTrue(book.isSuperBook()); } @Test public void testEchoGenericSuperBookWebClientXml() throws Exception { String endpointAddress = "http://localhost:" + PORT + "/webapp/custombus/genericstore/books/superbook"; WebClient wc = WebClient.create(endpointAddress); wc.accept(MediaType.APPLICATION_XML).type(MediaType.APPLICATION_XML); SuperBook book = wc.post(new SuperBook("Super", 124L, true), SuperBook.class); assertEquals(124L, book.getId()); assertTrue(book.isSuperBook()); } @Test public void testEchoGenericSuperBookCollectionWebClient() throws Exception { String endpointAddress = "http://localhost:" + PORT + "/webapp/custombus/genericstore/books/superbooks"; WebClient wc = WebClient.create(endpointAddress, Collections.singletonList(new JacksonJsonProvider())); WebClient.getConfig(wc).getHttpConduit().getClient().setReceiveTimeout(100000000L); wc.accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON); Collection<? extends SuperBook> books = wc.postAndGetCollection(Collections.singletonList(new SuperBook("Super", 124L, true)), SuperBook.class, SuperBook.class); SuperBook book = books.iterator().next(); assertEquals(124L, book.getId()); assertTrue(book.isSuperBook()); } @Test public void testGetGenericSuperBookCollectionWebClient() throws Exception { String endpointAddress = "http://localhost:" + PORT + "/webapp/custombus/genericstore/books/superbooks2"; WebClient wc = WebClient.create(endpointAddress, Collections.singletonList(new JacksonJsonProvider())); WebClient.getConfig(wc).getHttpConduit().getClient().setReceiveTimeout(100000000L); wc.accept(MediaType.APPLICATION_JSON); List<SuperBook> books = wc.get(new GenericType<List<SuperBook>>() { }); SuperBook book = books.iterator().next(); assertEquals(124L, book.getId()); assertTrue(book.isSuperBook()); } @Test public void testEchoGenericSuperBookCollectionWebClientXml() throws Exception { String endpointAddress = "http://localhost:" + PORT + "/webapp/custombus/genericstore/books/superbooks"; WebClient wc = WebClient.create(endpointAddress); wc.accept(MediaType.APPLICATION_XML).type(MediaType.APPLICATION_XML); Collection<? extends SuperBook> books = wc.postAndGetCollection(Collections.singletonList(new SuperBook("Super", 124L, true)), SuperBook.class, SuperBook.class); SuperBook book = books.iterator().next(); assertEquals(124L, book.getId()); assertTrue(book.isSuperBook()); } @Test public void testGetCollectionOfBooks() throws Exception { String endpointAddress = "http://localhost:" + PORT + "/webapp/store1/bookstore/collections"; WebClient wc = WebClient.create(endpointAddress, Collections.singletonList(new JacksonJsonProvider())); wc.accept("application/json"); Collection<? extends Book> collection = wc.getCollection(Book.class); assertEquals(1, collection.size()); Book book = collection.iterator().next(); assertEquals(123L, book.getId()); } @Test public void testGetCollectionOfSuperBooks() throws Exception { String endpointAddress = "http://localhost:" + PORT + "/webapp/store2/books/superbooks"; WebClient wc = WebClient.create(endpointAddress, Collections.singletonList(new JacksonJsonProvider())); wc.accept("application/json"); Collection<? extends Book> collection = wc.getCollection(Book.class); assertEquals(1, collection.size()); Book book = collection.iterator().next(); assertEquals(999L, book.getId()); } private String getStringFromInputStream(InputStream in) throws Exception { return IOUtils.toString(in); } }
7,712
1,233
/* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.runtime.command; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Paths; import java.util.zip.ZipEntry; import java.util.zip.ZipOutputStream; public class CreateZipFile implements Command { private File jobJarFile; private File jobDescriptor; private File zipfileName; public CreateZipFile(File zipfileName, File jobJarFile, File jobDescriptor) { this.zipfileName = zipfileName; this.jobJarFile = jobJarFile; this.jobDescriptor = jobDescriptor; } private void readBytesFromFile(File file, ZipOutputStream os) throws CommandException { BufferedInputStream is = null; try { is = new BufferedInputStream(Files.newInputStream(Paths.get(file.toURI()))); byte[] in = new byte[1024]; int bytesRead = -1; while ((bytesRead = is.read(in)) > 0) { os.write(in, 0, bytesRead); } } catch (IOException e) { throw new CommandException(e); } finally { try { is.close(); } catch (IOException e) { throw new CommandException(e); } } } @Override public void execute() throws CommandException { ZipOutputStream out = null; try { out = new ZipOutputStream(new BufferedOutputStream(new FileOutputStream(zipfileName))); ZipEntry jobJarEntry = new ZipEntry(jobJarFile.getName()); out.putNextEntry(jobJarEntry); readBytesFromFile(jobJarFile, out); out.closeEntry(); ZipEntry jobDescriptorEntry = new ZipEntry(jobDescriptor.getName()); out.putNextEntry(jobDescriptorEntry); readBytesFromFile(jobDescriptor, out); out.closeEntry(); } catch (IOException e) { throw new CommandException(e); } finally { if (out != null) { try { out.close(); } catch (IOException e) { throw new CommandException(e); } } } } }
1,230
1,652
package com.ctrip.xpipe.redis.proxy.handler; import com.ctrip.xpipe.api.endpoint.Endpoint; import com.ctrip.xpipe.api.pool.SimpleObjectPool; import com.ctrip.xpipe.netty.commands.NettyClient; import com.ctrip.xpipe.redis.core.protocal.protocal.SimpleStringParser; import com.ctrip.xpipe.redis.core.proxy.PROXY_OPTION; import com.ctrip.xpipe.redis.core.proxy.endpoint.DefaultProxyEndpoint; import com.ctrip.xpipe.redis.proxy.handler.response.ProxyPingHandler; import com.ctrip.xpipe.redis.proxy.integrate.AbstractProxyIntegrationTest; import com.ctrip.xpipe.redis.proxy.resource.ResourceManager; import com.ctrip.xpipe.redis.proxy.resource.TestResourceManager; import com.ctrip.xpipe.simpleserver.Server; import io.netty.buffer.ByteBuf; import io.netty.channel.Channel; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; import java.net.InetSocketAddress; import java.util.concurrent.atomic.AtomicReference; import static org.mockito.Matchers.any; import static org.mockito.Mockito.*; public class ProxyPingHandlerTest extends AbstractProxyIntegrationTest { private ProxyPingHandler handler; private ResourceManager manager; @Before public void before() { manager = new TestResourceManager(); handler = new ProxyPingHandler(manager); } @Test public void testGetOption() { Assert.assertEquals(PROXY_OPTION.PING, handler.getOption()); } @Test public void testDoHandle() { Channel channel = mock(Channel.class); InetSocketAddress addr = new InetSocketAddress("127.0.0.1", randomPort()); when(channel.localAddress()).thenReturn(addr); AtomicReference<String> actual = new AtomicReference<>(); doAnswer(new Answer() { @Override public Object answer(InvocationOnMock invocation) throws Throwable { actual.set(new SimpleStringParser().read(invocation.getArgumentAt(0, ByteBuf.class)).getPayload()); return null; } }).when(channel).writeAndFlush(any(ByteBuf.class)); handler.handle(channel, new String[]{"PING"}); String expected = "PROXY PONG 127.0.0.1:" + addr.getPort(); Assert.assertEquals(expected, actual.get()); } @Test public void testDoHandleWithForwardPing() throws Exception { int port = randomPort(); Server server = startServer(port, "+PROXY PONG 127.0.0.1:" + port + "\r\n"); Channel channel = mock(Channel.class); AtomicReference<String> actual = new AtomicReference<>(); doAnswer(new Answer() { @Override public Object answer(InvocationOnMock invocation) throws Throwable { actual.set(new SimpleStringParser().read(invocation.getArgumentAt(0, ByteBuf.class)).getPayload()); return null; } }).when(channel).writeAndFlush(any(ByteBuf.class)); Endpoint key = new DefaultProxyEndpoint("TCP://127.0.0.1:" + server.getPort()); SimpleObjectPool<NettyClient> pool = manager.getKeyedObjectPool().getKeyPool(key); NettyClient client = pool.borrowObject(); pool.returnObject(client); handler.handle(channel, new String[]{"PING", "TCP://127.0.0.1:" + server.getPort()}); String expected = String.format("PROXY PONG 127.0.0.1:%d 127.0.0.1:%d", server.getPort(), server.getPort()); waitConditionUntilTimeOut(()->actual.get() != null, 1000); logger.info("[receive] {}", actual.get()); Assert.assertTrue(actual.get().startsWith(expected)); server.stop(); } }
1,427
973
{"remainingRequest":"/Users/leonard.gonsalves/aws/Flask-Scaffold/app/templates/static/node_modules/@angular-devkit/build-optimizer/src/build-optimizer/webpack-loader.js??ref--8-1!/Users/leonard.gonsalves/aws/Flask-Scaffold/app/templates/static/node_modules/core-js/modules/es6.math.hypot.js","dependencies":[{"path":"/Users/leonard.gonsalves/aws/Flask-Scaffold/app/templates/static/node_modules/core-js/modules/es6.math.hypot.js","mtime":499162500000},{"path":"/Users/leonard.gonsalves/aws/Flask-Scaffold/app/templates/static/node_modules/cache-loader/dist/cjs.js","mtime":499162500000},{"path":"/Users/leonard.gonsalves/aws/Flask-Scaffold/app/templates/static/node_modules/@angular-devkit/build-optimizer/src/build-optimizer/webpack-loader.js","mtime":1528256844000}],"contextDependencies":[],"result":["// 172.16.17.32 Math.hypot([value1[, value2[, … ]]])\nvar $export = require('./_export');\nvar abs = Math.abs;\n\n$export($export.S, 'Math', {\n hypot: function hypot(value1, value2) { // eslint-disable-line no-unused-vars\n var sum = 0;\n var i = 0;\n var aLen = arguments.length;\n var larg = 0;\n var arg, div;\n while (i < aLen) {\n arg = abs(arguments[i++]);\n if (larg < arg) {\n div = larg / arg;\n sum = sum * div * div + 1;\n larg = arg;\n } else if (arg > 0) {\n div = arg / larg;\n sum += div * div;\n } else sum += arg;\n }\n return larg === Infinity ? Infinity : larg * Math.sqrt(sum);\n }\n});\n",null]}
626
12,278
<filename>ReactNativeFrontend/ios/Pods/boost/boost/poly_collection/detail/is_invocable.hpp<gh_stars>1000+ /* Copyright 2016-2017 <NAME>. * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) * * See http://www.boost.org/libs/poly_collection for library home page. */ #ifndef BOOST_POLY_COLLECTION_DETAIL_IS_INVOCABLE_HPP #define BOOST_POLY_COLLECTION_DETAIL_IS_INVOCABLE_HPP #if defined(_MSC_VER) #pragma once #endif #include <functional> #include <type_traits> /* technique explained at * http://bannalia.blogspot.com/2016/09/compile-time-checking-existence-of.html */ namespace boost{ namespace poly_collection{ namespace detail{ namespace is_invocable_fallback{ template <typename F,typename... Args> struct is_invocable: std::is_constructible< std::function<void(Args...)>, std::reference_wrapper<typename std::remove_reference<F>::type> > {}; template <typename R,typename F,typename... Args> struct is_invocable_r: std::is_constructible< std::function<R(Args...)>, std::reference_wrapper<typename std::remove_reference<F>::type> > {}; struct hook{}; }}}} namespace std{ template<> struct is_void< ::boost::poly_collection::detail::is_invocable_fallback::hook>: std::false_type { template<typename F,typename... Args> static constexpr bool is_invocable_f() { using namespace ::boost::poly_collection::detail::is_invocable_fallback; return is_invocable<F,Args...>::value; } template<typename R,typename F,typename... Args> static constexpr bool is_invocable_r_f() { using namespace ::boost::poly_collection::detail::is_invocable_fallback; return is_invocable_r<R,F,Args...>::value; } }; } /* namespace std */ namespace boost{ namespace poly_collection{ namespace detail{ template<typename F,typename... Args> struct is_invocable:std::integral_constant< bool, std::is_void<is_invocable_fallback::hook>::template is_invocable_f<F,Args...>() >{}; template<typename R,typename F,typename... Args> struct is_invocable_r:std::integral_constant< bool, std::is_void<is_invocable_fallback::hook>::template is_invocable_r_f<R,F,Args...>() >{}; } /* namespace poly_collection::detail */ } /* namespace poly_collection */ } /* namespace boost */ #endif
906
777
<reponame>google-ar/chromium // Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef MOJO_PUBLIC_CPP_BINDINGS_LIB_UNION_ACCESSOR_H_ #define MOJO_PUBLIC_CPP_BINDINGS_LIB_UNION_ACCESSOR_H_ namespace mojo { namespace internal { // When serializing and deserializing Unions, it is necessary to access // the private fields and methods of the Union. This allows us to do that // without leaking those same fields and methods in the Union interface. // All Union wrappers are friends of this class allowing such access. template <typename U> class UnionAccessor { public: explicit UnionAccessor(U* u) : u_(u) {} typename U::Union_* data() { return &(u_->data_); } typename U::Tag* tag() { return &(u_->tag_); } void SwitchActive(typename U::Tag new_tag) { u_->SwitchActive(new_tag); } private: U* u_; }; } // namespace internal } // namespace mojo #endif // MOJO_PUBLIC_CPP_BINDINGS_LIB_UNION_ACCESSOR_H_
347
421
// <Snippet1> #using <system.dll> #using <system.messaging.dll> using namespace System; using namespace System::Messaging; /// <summary> /// Provides a container class for the example. /// </summary> ref class MyNewQueue { public: //************************************************* // Sends a message to a queue. //************************************************* void SendMessageTransactional() { // Connect to a queue on the local computer. MessageQueue^ myQueue = gcnew MessageQueue( ".\\myTransactionalQueue" ); // Send a message to the queue. if ( myQueue->Transactional == true ) { // Create a transaction. MessageQueueTransaction^ myTransaction = gcnew MessageQueueTransaction; // Begin the transaction. myTransaction->Begin(); // Send the message. myQueue->Send( "My Message Data.", myTransaction ); // Commit the transaction. myTransaction->Commit(); } return; } //************************************************* // Receives a message containing an Order. //************************************************* void ReceiveMessageTransactional() { // Connect to a transactional queue on the local computer. MessageQueue^ myQueue = gcnew MessageQueue( ".\\myTransactionalQueue" ); // Set the formatter. array<Type^>^p = gcnew array<Type^>(1); p[ 0 ] = String::typeid; myQueue->Formatter = gcnew XmlMessageFormatter( p ); // Create a transaction. MessageQueueTransaction^ myTransaction = gcnew MessageQueueTransaction; try { // Begin the transaction. myTransaction->Begin(); // Receive the message. Message^ myMessage = myQueue->Receive( myTransaction ); String^ myOrder = static_cast<String^>(myMessage->Body); // Display message information. Console::WriteLine( myOrder ); // Commit the transaction. myTransaction->Commit(); } catch ( MessageQueueException^ e ) { // Handle nontransactional queues. if ( e->MessageQueueErrorCode == MessageQueueErrorCode::TransactionUsage ) { Console::WriteLine( "Queue is not transactional." ); } // Else catch other sources of a MessageQueueException. // Roll back the transaction. myTransaction->Abort(); } // Catch other exceptions as necessary, such as // InvalidOperationException, thrown when the formatter // cannot deserialize the message. return; } }; //************************************************* // Provides an entry point into the application. // // This example sends and receives a message from // a transactional queue. //************************************************* int main() { // Create a new instance of the class. MyNewQueue^ myNewQueue = gcnew MyNewQueue; // Send a message to a queue. myNewQueue->SendMessageTransactional(); // Receive a message from a queue. myNewQueue->ReceiveMessageTransactional(); return 0; } // </Snippet1>
1,212
364
package ca.uhn.fhir.jpa.cache; import ca.uhn.fhir.context.FhirContext; import ca.uhn.fhir.jpa.cache.config.RegisteredResourceListenerFactoryConfig; import ca.uhn.fhir.jpa.model.sched.ISchedulerService; import ca.uhn.fhir.jpa.searchparam.SearchParameterMap; import ca.uhn.fhir.jpa.searchparam.matcher.InMemoryMatchResult; import ca.uhn.fhir.jpa.searchparam.matcher.InMemoryResourceMatcher; import ca.uhn.fhir.jpa.searchparam.matcher.SearchParamMatcher; import ca.uhn.fhir.parser.DataFormatException; import com.google.common.collect.Lists; import org.apache.commons.lang3.time.DateUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.test.mock.mockito.MockBean; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.context.annotation.Import; import org.springframework.test.context.junit.jupiter.SpringExtension; import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.stream.Collectors; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.hasSize; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @ExtendWith(SpringExtension.class) class ResourceChangeListenerRegistryImplTest { private static final FhirContext ourFhirContext = FhirContext.forR4(); public static final String PATIENT_RESOURCE_NAME = "Patient"; public static final String OBSERVATION_RESOURCE_NAME = "Observation"; private static final long TEST_REFRESH_INTERVAL_MS = DateUtils.MILLIS_PER_HOUR; @Autowired ResourceChangeListenerRegistryImpl myResourceChangeListenerRegistry; @Autowired ResourceChangeListenerCacheFactory myResourceChangeListenerCacheFactory; @MockBean private ISchedulerService mySchedulerService; @MockBean private IResourceVersionSvc myResourceVersionSvc; @MockBean private ResourceChangeListenerCacheRefresherImpl myResourceChangeListenerCacheRefresher; @MockBean private InMemoryResourceMatcher myInMemoryResourceMatcher; @MockBean private SearchParamMatcher mySearchParamMatcher; private final IResourceChangeListener myTestListener = mock(IResourceChangeListener.class); private static final SearchParameterMap ourMap = SearchParameterMap.newSynchronous(); @Configuration @Import(RegisteredResourceListenerFactoryConfig.class) static class SpringContext { @Bean public IResourceChangeListenerRegistry resourceChangeListenerRegistry() { return new ResourceChangeListenerRegistryImpl(); } @Bean public FhirContext fhirContext() { return ourFhirContext; } } @BeforeEach public void before() { Set<IResourceChangeListenerCache> entries = new HashSet<>(); IResourceChangeListenerCache cache = myResourceChangeListenerCacheFactory.create(PATIENT_RESOURCE_NAME, ourMap, myTestListener, TEST_REFRESH_INTERVAL_MS); entries.add(cache); when(myInMemoryResourceMatcher.canBeEvaluatedInMemory(any(), any())).thenReturn(InMemoryMatchResult.successfulMatch()); } @Test public void addingListenerForNonResourceFails() { try { myResourceChangeListenerRegistry.registerResourceResourceChangeListener("Foo", ourMap, myTestListener, TEST_REFRESH_INTERVAL_MS); fail(); } catch (DataFormatException e) { assertEquals("Unknown resource name \"Foo\" (this name is not known in FHIR version \"R4\")", e.getMessage()); } } @Test public void addingNonInMemorySearchParamFails() { try { mockInMemorySupported(InMemoryMatchResult.unsupportedFromReason("TEST REASON")); myResourceChangeListenerRegistry.registerResourceResourceChangeListener(PATIENT_RESOURCE_NAME, ourMap, myTestListener, TEST_REFRESH_INTERVAL_MS); fail(); } catch (IllegalArgumentException e) { assertEquals("SearchParameterMap SearchParameterMap[] cannot be evaluated in-memory: TEST REASON. Only search parameter maps that can be evaluated in-memory may be registered.", e.getMessage()); } } private void mockInMemorySupported(InMemoryMatchResult theTheInMemoryMatchResult) { when(myInMemoryResourceMatcher.canBeEvaluatedInMemory(ourMap, ourFhirContext.getResourceDefinition(PATIENT_RESOURCE_NAME))).thenReturn(theTheInMemoryMatchResult); } @AfterEach public void after() { myResourceChangeListenerRegistry.clearListenersForUnitTest(); ResourceChangeListenerCache.setNowForUnitTests(null); } @Test public void registerUnregister() { IResourceChangeListener listener1 = mock(IResourceChangeListener.class); myResourceChangeListenerRegistry.registerResourceResourceChangeListener(PATIENT_RESOURCE_NAME, ourMap, listener1, TEST_REFRESH_INTERVAL_MS); myResourceChangeListenerRegistry.registerResourceResourceChangeListener(OBSERVATION_RESOURCE_NAME, ourMap, listener1, TEST_REFRESH_INTERVAL_MS); when(mySearchParamMatcher.match(any(), any())).thenReturn(InMemoryMatchResult.successfulMatch()); assertEquals(2, myResourceChangeListenerRegistry.size()); IResourceChangeListener listener2 = mock(IResourceChangeListener.class); myResourceChangeListenerRegistry.registerResourceResourceChangeListener(PATIENT_RESOURCE_NAME, ourMap, listener2, TEST_REFRESH_INTERVAL_MS); assertEquals(3, myResourceChangeListenerRegistry.size()); List<ResourceChangeListenerCache> entries = Lists.newArrayList(myResourceChangeListenerRegistry.iterator()); assertThat(entries, hasSize(3)); List<IResourceChangeListener> listeners = entries.stream().map(ResourceChangeListenerCache::getResourceChangeListener).collect(Collectors.toList()); assertThat(listeners, contains(listener1, listener1, listener2)); List<String> resourceNames = entries.stream().map(IResourceChangeListenerCache::getResourceName).collect(Collectors.toList()); assertThat(resourceNames, contains(PATIENT_RESOURCE_NAME, OBSERVATION_RESOURCE_NAME, PATIENT_RESOURCE_NAME)); IResourceChangeListenerCache firstcache = entries.iterator().next(); // We made a copy assertTrue(ourMap != firstcache.getSearchParameterMap()); myResourceChangeListenerRegistry.unregisterResourceResourceChangeListener(listener1); assertEquals(1, myResourceChangeListenerRegistry.size()); ResourceChangeListenerCache cache = myResourceChangeListenerRegistry.iterator().next(); assertEquals(PATIENT_RESOURCE_NAME, cache.getResourceName()); assertEquals(listener2, cache.getResourceChangeListener()); myResourceChangeListenerRegistry.unregisterResourceResourceChangeListener(listener2); assertEquals(0, myResourceChangeListenerRegistry.size()); } }
2,150
5,411
<reponame>antonand03/hello-my-friend<filename>code/components/rage-formats-x/tests/Convert.cpp<gh_stars>1000+ /* * This file is part of the CitizenFX project - http://citizen.re/ * * See LICENSE and MENTIONS in the root of the source tree for information * regarding licensing. */ #include "StdInc.h" #define RAGE_FORMATS_GAME ny #define RAGE_FORMATS_GAME_NY #include <gtaDrawable.h> #include <phBound.h> #undef RAGE_FORMATS_GAME_NY #define RAGE_FORMATS_GAME five #define RAGE_FORMATS_GAME_FIVE #include <gtaDrawable.h> #include <phBound.h> #include <convert/gtaDrawable_ny_five.h> #include <convert/phBound_ny_five.h> rage::ny::BlockMap* UnwrapRSC5(const wchar_t* fileName); void ConvertDrawableInternal(const wchar_t* fileName) { rage::ny::BlockMap* bm = UnwrapRSC5(fileName); if (!bm) { trace("couldn't open input file...\n"); return; } std::wstring fileExt = std::wstring(wcsrchr(fileName, L'.')); rage::ny::pgStreamManager::SetBlockInfo(bm); auto bm2 = rage::five::pgStreamManager::BeginPacking(); int fileVersion = 0; if (fileExt == L".wbn") { trace("converting bound...\n"); auto bound = (rage::ny::datOwner<rage::ny::phBound>*)bm->blocks[0].data; rage::convert<rage::five::phBound*>(bound->GetChild()); fileVersion = 43; } else if (fileExt == L".wdr") { trace("converting drawable...\n"); auto drawable = (rage::ny::gtaDrawable*)bm->blocks[0].data; rage::convert<rage::five::gtaDrawable*>(drawable); fileVersion = 165; } else if (fileExt == L".wtd") { trace("converting txd...\n"); auto txd = (rage::ny::pgDictionary<rage::ny::grcTexturePC>*)bm->blocks[0].data; rage::convert<rage::five::pgDictionary<rage::five::grcTexturePC>*>(txd); fileVersion = 13; } else { trace("unknown file extension...\n"); return; } rage::five::pgStreamManager::EndPacking(); std::wstring outFileName(fileName); outFileName = outFileName.substr(0, outFileName.length() - 3) + L"y" + fileExt.substr(2); FILE* f = _wfopen(outFileName.c_str(), L"wb"); if (!f) { trace("... couldn't open output file for writing.\n"); return; } size_t outputSize = 0; bm2->Save(fileVersion, [&] (const void* d, size_t s) { fwrite(d, 1, s, f); outputSize += s; }); trace("written successfully - compressed size %d\n", outputSize); fclose(f); for (int i = 0; i < bm->physicalLen + bm->virtualLen; i++) { delete bm->blocks[i].data; } delete bm; } void ConvertDrawable(const wchar_t* from) { ConvertDrawableInternal(from); #if 0 FILE* f = fopen("Y:/dev/ydr/stat_hilberty01.wdr.sys", "rb"); fseek(f, 0, SEEK_END); long len = ftell(f); fseek(f, 0, SEEK_SET); char* buffer = new char[len]; fread(buffer, 1, len, f); fclose(f); long vlen = len; f = fopen("Y:/dev/ydr/stat_hilberty01.wdr.gfx", "rb"); fseek(f, 0, SEEK_END); len = ftell(f); fseek(f, 0, SEEK_SET); char* buffer2 = new char[len]; fread(buffer2, 1, len, f); fclose(f); rage::ny::BlockMap bm; bm.virtualLen = 1; bm.physicalLen = 1; bm.blocks[0].data = buffer; bm.blocks[0].offset = 0; bm.blocks[0].size = vlen; bm.blocks[1].data = buffer2; bm.blocks[1].offset = 0; bm.blocks[1].size = len; rage::ny::pgStreamManager::SetBlockInfo(&bm); rage::ny::gtaDrawable* drawable = (rage::ny::gtaDrawable*)buffer; //ddrawable->Resolve(&bm); //rage::ny::pgStreamManager::BeginPacking(&bm); auto bm2 = rage::five::pgStreamManager::BeginPacking(); auto cdrawable = rage::convert<rage::five::gtaDrawable*>(drawable); rage::five::pgStreamManager::EndPacking(); f = fopen("Y:\\common\\lovely.ydr", "wb"); bm2->Save(165, [&] (const void* d, size_t s) { fwrite(d, 1, s, f); }); fclose(f); #endif }
1,715
347
package org.ovirt.engine.core.vdsbroker.irsbroker; import java.util.ArrayList; import java.util.List; import java.util.Set; import org.ovirt.engine.core.common.businessentities.VDS; import org.ovirt.engine.core.common.businessentities.VDSDomainsData; import org.ovirt.engine.core.compat.Guid; public interface IrsProxy { void dispose(); List<Guid> obtainDomainsReportedAsProblematic(List<VDSDomainsData> vdsDomainsData); void clearVdsFromCache(Guid vdsId, String vdsName); void updateVdsDomainsData(VDS vds, final ArrayList<VDSDomainsData> data); boolean getHasVdssForSpmSelection(); IIrsServer getIrsProxy(); void runInControlledConcurrency(Runnable codeblock); boolean failover(); Guid getCurrentVdsId(); void setCurrentVdsId(Guid value); Guid getPreferredHostId(); void setPreferredHostId(Guid preferredHostId); Set<Guid> getTriedVdssList(); void clearPoolTimers(); void clearCache(); String getIsoDirectory(); void setFencedIrs(Guid fencedIrs); void resetIrs(); }
387
437
import random from abc import ABC, abstractmethod class PayloadScheduler(ABC): """Returns batches from multiple payloads in some order for MTL training""" def __init__(self, model, payloads, split, **kwargs): pass @abstractmethod def get_batches(self, payloads, split, **kwargs): """Returns batches from all payloads in some order until one 'epoch' is reached Args: payloads: a list of Payloads split: only Payloads belonging to this split will be returned Yields: batch: a tuple of (X_batch_dict, Y_batch_dict) payload_name: the name of the payload returned labels_to_tasks: a dict indicating which task each label set belongs to For now, an epoch is defined as one full pass through all datasets. This is required because of assumptions currently made in the logger and training loop about the number of batches that will be seen per epoch. """ pass class ProportionalScheduler(PayloadScheduler): """Returns batches proportional to the fraction of the total number of batches""" def get_batches(self, payloads, split, **kwargs): # First filter to only those payloads belonging to the given split payloads = [p for p in payloads if p.split == split] data_loaders = [iter(p.data_loader) for p in payloads] batch_counts = [len(p.data_loader) for p in payloads] batch_assignments = [] for payload_idx in range(len(payloads)): batch_assignments.extend([payload_idx] * batch_counts[payload_idx]) random.shuffle(batch_assignments) for payload_idx in batch_assignments: batch = next(data_loaders[payload_idx]) payload = payloads[payload_idx] yield (batch, payload.name, payload.labels_to_tasks)
711
364
#include "iris_rtc_engine.h" #include <flutter/method_channel.h> #include <flutter/standard_method_codec.h> class CallApiMethodCallHandler { public: CallApiMethodCallHandler(agora::iris::rtc::IrisRtcEngine *engine); ~CallApiMethodCallHandler(); virtual void HandleMethodCall(const flutter::MethodCall<flutter::EncodableValue> &method_call, std::unique_ptr<flutter::MethodResult<flutter::EncodableValue>> result); virtual int32_t CallApi(int32_t api_type, const char *params, char *result); virtual int32_t CallApi(int32_t api_type, const char *params, void *buffer, char *result); virtual std::string CallApiError(int32_t ret); protected: agora::iris::rtc::IrisRtcEngine *irisRtcEngine_; };
330
577
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ IRSA Query Tool =============== This module contains various methods for querying the IRSA Catalog Query Service(CatQuery). """ from astropy import config as _config class Conf(_config.ConfigNamespace): """ Configuration parameters for `astroquery.ipac.irsa`. """ server = _config.ConfigItem( 'https://irsa.ipac.caltech.edu/cgi-bin/Gator/nph-query', 'Name of the IRSA mirror to use.') gator_list_catalogs = _config.ConfigItem( 'https://irsa.ipac.caltech.edu/cgi-bin/Gator/nph-scan', 'URL from which to list all the public catalogs in IRSA.') row_limit = _config.ConfigItem( 500, 'Maximum number of rows to retrieve in result') timeout = _config.ConfigItem( 60, 'Time limit for connecting to the IRSA server.') conf = Conf() from .core import Irsa, IrsaClass __all__ = ['Irsa', 'IrsaClass', 'Conf', 'conf', ]
393
1,968
import rumps rumps.debug_mode(True) @rumps.clicked('Print Something') def print_something(_): rumps.alert(message='something', ok='YES!', cancel='NO!') @rumps.clicked('On/Off Test') def on_off_test(_): print_button = app.menu['Print Something'] if print_button.callback is None: print_button.set_callback(print_something) else: print_button.set_callback(None) @rumps.clicked('Clean Quit') def clean_up_before_quit(_): print('execute clean up code') rumps.quit_application() app = rumps.App('<NAME>', menu=['Print Something', 'On/Off Test', 'Clean Quit'], quit_button=None) app.run()
238
852
/*! \brief Implementation of methods of TTClusterAlgorithm_neighbor * \details Here, in the source file, the methods which do depend * on the specific type <T> that can fit the template. * * \author <NAME> * \date 2013, Jul 15 * */ #include "L1Trigger/TrackTrigger/interface/TTClusterAlgorithm_neighbor.h" /// Clustering operations /// Specialize template for Phase2TrackerDigis template <> void TTClusterAlgorithm_neighbor<Ref_Phase2TrackerDigi_>::Cluster( std::vector<std::vector<Ref_Phase2TrackerDigi_> >& output, const std::vector<Ref_Phase2TrackerDigi_>& input) const { /// Prepare output output.clear(); /// Loop over all input hits and delete /// them once clustered std::vector<bool> used(input.size(), false); for (unsigned int i = 0; i < input.size(); i++) { if (used[i]) continue; std::vector<Ref_Phase2TrackerDigi_> cluster; cluster.push_back(input[i]); used[i] = true; if (i < input.size() - 1) { addNeighbors(cluster, input, i + 1, used); } output.push_back(cluster); } /// End of iteration } /// End of Clustering Operations /// Check if the hit is a neighbour /// Specialize template for Phase2TrackerDigis template <> bool TTClusterAlgorithm_neighbor<Ref_Phase2TrackerDigi_>::isANeighbor(const Ref_Phase2TrackerDigi_& center, const Ref_Phase2TrackerDigi_& mayNeigh) const { unsigned int rowdist = std::abs((int)(center->row()) - (int)(mayNeigh->row())); unsigned int coldist = std::abs((int)(center->column()) - (int)(mayNeigh->column())); return rowdist <= 1 && coldist <= 1; } /// Add neighbours to the cluster /// Specialize template for Phase2TrackerDigis template <> void TTClusterAlgorithm_neighbor<Ref_Phase2TrackerDigi_>::addNeighbors(std::vector<Ref_Phase2TrackerDigi_>& cluster, const std::vector<Ref_Phase2TrackerDigi_>& input, unsigned int startVal, std::vector<bool>& used) const { /// This following line is necessary to ensure the /// iterators afterward remain valid. cluster.reserve(input.size()); typename std::vector<Ref_Phase2TrackerDigi_>::iterator clusIter; /// Loop over hits for (clusIter = cluster.begin(); clusIter < cluster.end(); clusIter++) { /// Loop over candidate neighbours for (unsigned int i = startVal; i < input.size(); i++) { /// Is it really a neighbour? if (isANeighbor(*clusIter, input[i])) { cluster.push_back(input[i]); used[i] = true; } } /// End of loop over candidate neighbours } /// End of loop over hits }
1,141
1,008
<filename>Template/convert_js_to_python.py ''' Convert Google Earth Engine JavaScript to Python script. To convert one Earth Engine JavaScript to Python script: js_to_python(in_file_path, out_file_path) To convert all Earth Engine JavaScripts in a folder recursively: js_to_python_dir(in_dir, out_dir) ''' # Authors: Dr. <NAME> (https://wetlands.io) # License: MIT import os import glob import random import string import argparse import subprocess from pathlib import Path from collections import deque def random_string(string_length=3): """Generate a random string of fixed length. Args: stringLength (int, optional): Fixed length. Defaults to 3. Returns: str: A random string """ # random.seed(1001) letters = string.ascii_lowercase return ''.join(random.choice(letters) for i in range(string_length)) def find_matching_bracket(lines, start_line_index, start_char_index, matching_char='{'): """Find the position of the matching closing bracket from a list of lines. Args: lines (list): The input list of lines. start_line_index (int): The line index where the starting bracket is located. start_char_index (int): The position index of the starting bracket. matching_char (str, optional): The starting bracket to search for. Defaults to '{'. Returns: matching_line_index (int): The line index where the matching closing bracket is located. matching_char_index (int): The position index of the matching closing bracket. """ matching_line_index = -1 matching_char_index = -1 matching_chars = { '{': '}', '(': ')', '[': ']' } if matching_char not in matching_chars.keys(): print("The matching character must be one of the following: {}".format( ', '.join(matching_chars.keys()))) return matching_line_index, matching_char_index # Create a deque to use it as a stack. d = deque() for line_index in range(start_line_index, len(lines)): line = lines[line_index] # deal with the line where the starting bracket is located. if line_index == start_line_index: line = lines[line_index][start_char_index:] for index, item in enumerate(line): # Pops a starting bracket for each closing bracket if item == matching_chars[matching_char]: d.popleft() # Push all starting brackets elif item == matching_char: d.append(matching_char) # If deque becomes empty if not d: matching_line_index = line_index if line_index == start_line_index: matching_char_index = start_char_index + index else: matching_char_index = index return matching_line_index, matching_char_index return matching_line_index, matching_char_index # extract parameters and wrap them with single/double quotes if needed. def format_params(line, sep=':'): """Format keys in a dictionary and adds quotes to the keys. For example, {min: 0, max: 10} will result in ('min': 0, 'max': 10) Args: line (str): A string. sep (str, optional): Separator. Defaults to ':'. Returns: [str]: A string with keys quoted """ # print(line) new_line = line prefix = "" suffix = "" if line.strip().startswith('for'): # skip for loop return line # find all occurrences of a substring def find_all(a_str, sub): start = 0 while True: start = a_str.find(sub, start) if start == -1: return yield start start += len(sub) # use start += 1 to find overlapping matches indices = list(find_all(line, sep)) count = len(indices) if "{" in line: bracket_index = line.index("{") if bracket_index < indices[0]: prefix = line[:bracket_index+1] line = line[bracket_index+1:] if count > 0: items = line.split(sep) if count == 1: for i in range(0, count): item = items[i].strip() if ('"' not in item) and ("'" not in item): new_item = "'" + item + "'" items[i] = items[i] .replace(item, new_item) new_line = ':'.join(items) elif count > 1: for i in range(0, count): item = items[i] if ',' in item: subitems = item.split(',') subitem = subitems[-1] if ('"' not in subitem) and ("'" not in subitem): new_subitem = "'" + subitem.strip() + "'" subitems[-1] = subitems[-1].replace( subitem, new_subitem) items[i] = ', '.join(subitems) else: if ('"' not in item) and ("'" not in item): new_item = "'" + item.strip() + "'" padding = len(item) - len(item.strip()) items[i] = " " * padding + item.replace(item, new_item) new_line = ':'.join(items) return prefix + new_line def use_math(lines): """Check if an Earth Engine uses Math library Args: lines (list): An Earth Engine JavaScript. Returns: [bool]: Returns True if the script contains 'Math.'. For example 'Math.PI', 'Math.pow' """ math_import = False for line in lines: if 'Math.' in line: math_import = True return math_import def convert_for_loop(line): """Convert JavaScript for loop to Python for loop. Args: line (str): Input JavaScript for loop Returns: str: Converted Python for loop. """ new_line = '' if 'var ' in line: line = line.replace('var ', '') start_index = line.index('(') end_index = line.index(')') prefix = line[:(start_index)] suffix = line[(end_index + 1):] params = line[(start_index + 1): end_index] if ' in ' in params and params.count(';') == 0: new_line = prefix + '{}:'.format(params) + suffix return new_line items = params.split('=') param_name = items[0].strip() items = params.split(';') subitems = [] for item in items: subitems.append(item.split(' ')[-1]) start = subitems[0] end = subitems[1] step = subitems[2] if '++' in step: step = 1 elif '--' in step: step = -1 prefix = line[:(start_index)] suffix = line[(end_index + 1):] new_line = prefix + '{} in range({}, {}, {}):'.format(param_name, start, end, step) + suffix return new_line def check_map_functions(input_lines): """Extract Earth Engine map function Args: input_lines (list): List of Earth Engine JavaScrips Returns: list: Output JavaScript with map function """ output_lines = [] for index, line in enumerate(input_lines): if ('.map(function' in line) or ('.map (function') in line: bracket_index = line.index("{") matching_line_index, matching_char_index = find_matching_bracket(input_lines, index, bracket_index) func_start_index = line.index('function') func_name = 'func_' + random_string() func_header = line[func_start_index:].replace('function', 'function ' + func_name) output_lines.append('\n') output_lines.append(func_header) for sub_index, tmp_line in enumerate(input_lines[index+1: matching_line_index]): output_lines.append(tmp_line) input_lines[index+1+sub_index] = '' header_line = line[:func_start_index] + func_name header_line = header_line.rstrip() func_footer = input_lines[matching_line_index][:matching_char_index+1] output_lines.append(func_footer) footer_line = input_lines[matching_line_index][matching_char_index+1:].strip() if footer_line == ')' or footer_line == ');': header_line = header_line + footer_line footer_line = '' input_lines[matching_line_index] = footer_line output_lines.append(header_line) output_lines.append(footer_line) else: output_lines.append(line) return output_lines # Convert GEE JavaScripts to Python def js_to_python(in_file, out_file=None, use_qgis=True, github_repo=None): """Convert an Earth Engine JavaScript to Python script. Args: in_file (str): File path of the input JavaScript. out_file (str, optional): File path of the output Python script. Defaults to None. use_qgis (bool, optional): Whether to add "from ee_plugin import Map \n" to the output script. Defaults to True. github_repo (str, optional): GitHub repo url. Defaults to None. Returns: list : Python script """ if out_file is None: out_file = in_file.replace(".js", ".py") root_dir = os.path.dirname(os.path.abspath(__file__)) if not os.path.isfile(in_file): in_file = os.path.join(root_dir, in_file) if not os.path.isfile(out_file): out_file = os.path.join(root_dir, out_file) is_python = False add_github_url = False qgis_import_str = '' if use_qgis: qgis_import_str = "from ee_plugin import Map \n" github_url = "" if github_repo is not None: github_url = "# GitHub URL: " + github_repo + in_file + "\n\n" math_import = False math_import_str = "" lines = [] with open(in_file) as f: lines = f.readlines() math_import = use_math(lines) for line in lines: line = line.strip() if line == 'import ee': is_python = True if math_import: math_import_str = "import math\n" output = "" if is_python: # only update the GitHub URL if it is already a GEE Python script output = github_url + ''.join(map(str, lines)) else: # deal with JavaScript header = github_url + "import ee \n" + qgis_import_str + math_import_str function_defs = [] output = header + "\n" with open(in_file) as f: lines = f.readlines() print('Processing {}'.format(in_file)) lines = check_map_functions(lines) for index, line in enumerate(lines): if ('/* color' in line) and ('*/' in line): line = line[:line.index('/*')].lstrip() + line[(line.index('*/')+2):] if ("= function" in line) or ("=function" in line) or line.strip().startswith("function"): bracket_index = line.index("{") matching_line_index, matching_char_index = find_matching_bracket( lines, index, bracket_index) line = line[:bracket_index] + line[bracket_index+1:] if matching_line_index == index: line = line[:matching_char_index] + \ line[matching_char_index+1:] else: tmp_line = lines[matching_line_index] lines[matching_line_index] = tmp_line[:matching_char_index] + \ tmp_line[matching_char_index+1:] line = line.replace(" = function", "").replace( "=function", '').replace("function ", '') line = " " * (len(line) - len(line.lstrip())) + "def " + line.strip() + ":" elif "{" in line: bracket_index = line.index("{") matching_line_index, matching_char_index = find_matching_bracket( lines, index, bracket_index) if (matching_line_index == index) and (':' in line): pass elif ('for (' in line) or ('for(' in line): line = convert_for_loop(line) lines[index] = line bracket_index = line.index("{") matching_line_index, matching_char_index = find_matching_bracket(lines, index, bracket_index) tmp_line = lines[matching_line_index] lines[matching_line_index] = tmp_line[:matching_char_index] + tmp_line[matching_char_index+1:] line = line.replace('{', '') if line is None: line = '' line = line.replace("//", "#") line = line.replace("var ", "", 1) line = line.replace("/*", '#') line = line.replace("*/", '#') line = line.replace("true", "True").replace("false", "False") line = line.replace("null", "{}") line = line.replace(".or", ".Or") line = line.replace(".and", '.And') line = line.replace(".not", '.Not') line = line.replace('visualize({', 'visualize(**{') line = line.replace('Math.PI', 'math.pi') line = line.replace('Math.', 'math.') line = line.replace('= new', '=') line = line.rstrip() if line.endswith("+"): line = line + " \\" elif line.endswith(";"): line = line[:-1] if line.lstrip().startswith('*'): line = line.replace('*', '#') if (":" in line) and (not line.strip().startswith("#")) and (not line.strip().startswith('def')) and (not line.strip().startswith(".")): line = format_params(line) if index < (len(lines) - 1) and line.lstrip().startswith("#") and lines[index+1].lstrip().startswith("."): line = '' if line.lstrip().startswith("."): if "#" in line: line = line[:line.index("#")] output = output.rstrip() + " " + "\\" + "\n" + line + "\n" else: output += line + "\n" out_dir = os.path.dirname(out_file) if not os.path.exists(out_dir): os.makedirs(out_dir) with open(out_file, 'w') as f: f.write(output) return output def js_to_python_dir(in_dir, out_dir=None, use_qgis=True, github_repo=None): """Convert all Earth Engine JavaScripts in a folder recursively to Python scripts Args: in_dir (str): The input folder containing Earth Engine JavaScripts. out_dir (str, optional): The output folder containing Earth Engine Python scripts. Defaults to None. use_qgis (bool, optional): Whether to add "from ee_plugin import Map \n" to the output script. Defaults to True. github_repo (str, optional): GitHub repo url. Defaults to None. """ if out_dir is None: out_dir = in_dir for in_file in Path(in_dir).rglob('*.js'): out_file = os.path.splitext(in_file)[0] + ".py" out_file = out_file.replace(in_dir, out_dir) js_to_python(in_file, out_file, use_qgis, github_repo) # print("Ouput Python script folder: {}".format(out_dir)) # def dict_key_str(line): # keys = """asFloat bands bestEffort bias collection color connectedness crs eeObject eightConnected format gain gamma # geometry groupField groupName image iterations kernel labelBand leftField magnitude max maxDistance # maxOffset maxPixels maxSize minBucketWidth min name normalize opacity palette patchWidth # radius reducer referenceImage region rightField scale selectors shown sigma size source # strokeWidth threshold units visParams width""".split() # for key in keys: # if ":" in line and key in line: # line = line.replace(key + ":", "'" + key + "':") # return line def remove_qgis_import(in_file): """Remove 'from ee_plugin import Map' from an Earth Engine Python script. Args: in_file (str): Input file path of the Python script. Returns: list: List of lines 'from ee_plugin import Map' removed. """ start_index = 0 with open(in_file) as f: lines = f.readlines() for index, line in enumerate(lines): if 'from ee_plugin import Map' in line: start_index = index i = 1 while True: line_tmp = lines[start_index + i].strip() if line_tmp != '': return lines[start_index + i:] else: i = i + 1 def template_header(in_template): """Extract header from the notebook template. Args: in_template (str): Input notebook template file path. Returns: list: List of lines. """ header = [] template_lines = [] header_end_index = 0 with open(in_template) as f: template_lines = f.readlines() for index, line in enumerate(template_lines): if '## Add Earth Engine Python script' in line: header_end_index = index + 5 header = template_lines[:header_end_index] return header def template_footer(in_template): """Extract footer from the notebook template. Args: in_template (str): Input notebook template file path. Returns: list: List of lines. """ footer = [] template_lines = [] footer_start_index = 0 with open(in_template) as f: template_lines = f.readlines() for index, line in enumerate(template_lines): if '## Display Earth Engine data layers' in line: footer_start_index = index - 3 footer = ['\n'] + template_lines[footer_start_index:] return footer def py_to_ipynb(in_file, template_file, out_file=None, github_username=None, github_repo=None): """Convert Earth Engine Python script to Jupyter notebook. Args: in_file (str): Input Earth Engine Python script. template_file (str): Input Jupyter notebook template. out_file (str, optional)): Output Jupyter notebook. github_username (str, optional): GitHub username. Defaults to None. github_repo (str, optional): GitHub repo name. Defaults to None. """ if out_file is None: out_file = in_file.replace('.py', '.ipynb') out_py_file = in_file.replace(".py", "_nb.py") content = remove_qgis_import(in_file) header = template_header(template_file) footer = template_footer(template_file) if (github_username is not None) and (github_repo is not None): out_py_path = str(in_file).split('/') index = out_py_path.index(github_repo) out_py_relative_path = '/'.join(out_py_path[index+1:]) out_ipynb_relative_path = out_py_relative_path.replace('.py', '.ipynb') new_header = [] for line in header: line = line.replace('giswqs', github_username) line = line.replace('earthengine-py-notebooks', github_repo) line = line.replace('Template/template.ipynb', out_ipynb_relative_path) new_header.append(line) header = new_header if content != None: out_text = header + content + footer else: out_text = header + footer if not os.path.exists(os.path.dirname(out_py_file)): os.makedirs(os.path.dirname(out_py_file)) with open(out_py_file, 'w') as f: f.writelines(out_text) try: command = 'ipynb-py-convert ' + out_py_file + ' ' + out_file print(os.popen(command).read().rstrip()) except: print('Please install ipynb-py-convert using the following command:\n') print('pip install ipynb-py-convert') os.remove(out_py_file) def py_to_ipynb_dir(in_dir, template_file, out_dir=None, github_username=None, github_repo=None): """Convert Earth Engine Python scripts in a folder recursively to Jupyter notebooks. Args: in_dir (str): Input folder containing Earth Engine Python scripts. template_file (str): Input jupyter notebook template file. out_dir str, optional): Ouput folder. Defaults to None. github_username (str, optional): GitHub username. Defaults to None. github_repo (str, optional): GitHub repo name. Defaults to None. """ files = list(Path(in_dir).rglob('*.py')) if out_dir is None: out_dir = in_dir for file in files: in_file = str(file) out_file = in_file.replace(in_dir, out_dir).replace('.py', '.ipynb') py_to_ipynb(in_file, template_file, out_file, github_username, github_repo) def execute_notebook(in_file): """Execute a Jupyter notebook and save output cells Args: in_file (str): Input Jupyter notebook. """ command = 'jupyter nbconvert --to notebook --execute ' + in_file + ' --inplace' print(os.popen(command).read().rstrip()) def execute_notebook_dir(in_dir): """Execute all Jupyter notebooks in the given directory recursively and save output cells. Args: in_dir (str): Input folder containing notebooks. """ files = list(Path(in_dir).rglob('*.ipynb')) count = len(files) if files is not None: for index, file in enumerate(files): in_file = str(file) print('Processing {}/{} ...'.format(index+1, count)) execute_notebook(in_file) if __name__ == '__main__': ## Convert an Earth Engine JavaScript to Python script. root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) in_file_path = os.path.join(root_dir, "JavaScripts/Image/NormalizedDifference.js") # change this path to your JavaScript file out_file_path = os.path.splitext(in_file_path)[0] + ".py" js_to_python(in_file_path, out_file_path) print("Python script saved at: {}".format(out_file_path)) # Convert all Earth Engine JavaScripts in a folder recursively to Python scripts. in_dir = os.path.join(root_dir, "JavaScripts") out_dir = os.path.join(root_dir, "JavaScripts") js_to_python_dir(in_dir, out_dir, use_qgis=True) print("Python scripts saved at: {}".format(out_dir)) # Convert an Earth Engine Python script to Jupyter notebook. in_template =os.path.join(root_dir, 'Template/template.py') in_file = os.path.join(root_dir, 'JavaScripts/NormalizedDifference.py') out_file = in_file.replace('.py', '.ipynb') py_to_ipynb(in_file, in_template, out_file, 'giswqs', 'earthengine-py-notebooks') # Convert all Earth Engine Python scripts in a folder recursively to Jupyter notebooks. in_dir = os.path.join(root_dir, 'JavaScripts') py_to_ipynb_dir(in_dir, in_template, github_username='giswqs', github_repo='earthengine-py-notebooks') # Execute all Jupyter notebooks in a folder recursively and save the output cells. execute_notebook_dir(in_dir) # parser = argparse.ArgumentParser() # parser.add_argument('--input', type=str, # help="Path to the input JavaScript file") # parser.add_argument('--output', type=str, # help="Path to the output Python file") # args = parser.parse_args() # js_to_python(args.input, args.output)
10,767
451
from django.dispatch import Signal hook_event = Signal(providing_args=['action', 'instance']) raw_hook_event = Signal(providing_args=['event_name', 'payload', 'user']) hook_sent_event = Signal(providing_args=['payload', 'instance', 'hook'])
81
543
package com.riiablo.engine.server.component; import com.artemis.Component; import com.artemis.annotations.PooledWeaver; @PooledWeaver public class Running extends Component {}
53
575
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef ASH_SYSTEM_NETWORK_VPN_LIST_VIEW_H_ #define ASH_SYSTEM_NETWORK_VPN_LIST_VIEW_H_ #include <map> #include <string> #include "ash/system/network/network_state_list_detailed_view.h" #include "ash/system/network/vpn_list.h" #include "base/macros.h" #include "base/memory/weak_ptr.h" #include "chromeos/services/network_config/public/mojom/cros_network_config.mojom-forward.h" class PrefRegistrySimple; namespace views { class View; } namespace ash { namespace tray { // A list of VPN providers and networks that shows VPN providers and networks in // a hierarchical layout, allowing the user to see at a glance which provider a // network belongs to. The only exception is the currently connected or // connecting network, which is detached from its provider and moved to the top. // If there is a connected network, a disconnect button is shown next to its // name. // // Disconnected networks are arranged in shill's priority order within each // provider and the providers are arranged in the order of their highest // priority network. Clicking on a disconnected network triggers a connection // attempt. Clicking on the currently connected or connecting network shows its // configuration dialog. Clicking on a provider shows the provider's "add // network" dialog. class VPNListView : public NetworkStateListDetailedView, public VpnList::Observer { public: using VpnProviderPtr = chromeos::network_config::mojom::VpnProviderPtr; VPNListView(DetailedViewDelegate* delegate, LoginStatus login); ~VPNListView() override; // Make following functions publicly accessible for VPNListNetworkEntry. using NetworkStateListDetailedView::SetupConnectedScrollListItem; using NetworkStateListDetailedView::SetupConnectingScrollListItem; // NetworkStateListDetailedView: void UpdateNetworkList() override; bool IsNetworkEntry(views::View* view, std::string* guid) const override; // VpnList::Observer: void OnVpnProvidersChanged() override; // See Shell::RegisterProfilePrefs(). static void RegisterProfilePrefs(PrefRegistrySimple* registry); // views::View: const char* GetClassName() const override; private: using NetworkStateList = std::vector<chromeos::network_config::mojom::NetworkStatePropertiesPtr>; void OnGetNetworkStateList(NetworkStateList networks); // Adds a network to the list. void AddNetwork( const chromeos::network_config::mojom::NetworkStateProperties* network); // Adds the VPN provider identified by |vpn_provider| to the list, along with // any networks that may belong to this provider. Takes ownership of // |vpn_provider|. void AddProviderAndNetworks(VpnProviderPtr vpn_provider, const NetworkStateList& networks); // Finds VPN provider from |providers| that matches given |network|. Then adds // the VPN provider along with any networks that belong to this provider. Will // also remove the match from |providers| to avoid showing duplicate provider // entry in VPN list view. // Returns true if finds a match, returns false otherwise. bool ProcessProviderForNetwork( const chromeos::network_config::mojom::NetworkStateProperties* network, const NetworkStateList& networks, std::vector<VpnProviderPtr>* providers); // Adds all available VPN providers and networks to the list. void AddProvidersAndNetworks(const NetworkStateList& networks); // A mapping from each VPN provider's list entry to the provider. std::map<const views::View* const, VpnProviderPtr> provider_view_map_; // A mapping from each network's list entry to the network's guid. std::map<const views::View* const, std::string> network_view_guid_map_; // Whether the list is currently empty (i.e., the next entry added will become // the topmost entry). bool list_empty_ = true; base::WeakPtrFactory<VPNListView> weak_ptr_factory_{this}; DISALLOW_COPY_AND_ASSIGN(VPNListView); }; } // namespace tray } // namespace ash #endif // ASH_SYSTEM_NETWORK_VPN_LIST_VIEW_H_
1,215
1,127
// Copyright (C) 2022 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #include <gtest/gtest.h> #include <common_test_utils/graph_comparator.hpp> TEST(GraphComparatorTests, AllEnablePositiveCheck) { FunctionsComparator comparator(FunctionsComparator::no_default()); std::shared_ptr<ov::Model> function, function_ref; { auto input = std::make_shared<ov::opset8::Parameter>(ngraph::element::i64, ov::Shape{1}); auto constant = ov::opset8::Constant::create(ngraph::element::i64, {1}, {0}); auto add = std::make_shared<ov::opset8::Add>(input, constant); function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add}, ngraph::ParameterVector{ input }); function = ov::clone_model(*function_ref); } comparator.enable(FunctionsComparator::NAMES) .enable(FunctionsComparator::NODES) .enable(FunctionsComparator::CONST_VALUES) .enable(FunctionsComparator::PRECISIONS) .enable(FunctionsComparator::ATTRIBUTES) .enable(FunctionsComparator::RUNTIME_KEYS) .enable(FunctionsComparator::TENSOR_NAMES); auto res = comparator.compare(function, function_ref); ASSERT_TRUE(res.valid) << res.message; } TEST(GraphComparatorTests, CheckbyDefault) { FunctionsComparator comparator(FunctionsComparator::with_default()); std::shared_ptr<ov::Model> function, function_ref; { auto input = std::make_shared<ov::opset8::Parameter>(ngraph::element::i64, ov::Shape{3}); auto input2 = std::make_shared<ov::opset8::Parameter>(ngraph::element::i64, ov::Shape{3}); auto add = std::make_shared<ov::opset8::Add>(input, input2); function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input, input2 }); } { auto input = std::make_shared<ov::opset8::Parameter>(ngraph::element::i64, ov::Shape{3}); auto constant = ov::opset8::Constant::create(ngraph::element::i64, {3}, {12}); auto add = std::make_shared<ov::opset8::Add>(input, constant); function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input }); } auto res = comparator.compare(function, function_ref); ASSERT_FALSE(res.valid) << res.message; } TEST(GraphComparatorTests, CheckResultsNumber) { FunctionsComparator comparator(FunctionsComparator::with_default()); std::shared_ptr<ov::Model> function, function_ref; { auto input = std::make_shared<ov::opset8::Parameter>(ngraph::element::i64, ov::Shape{3}); auto input2 = std::make_shared<ov::opset8::Parameter>(ngraph::element::i64, ov::Shape{3}); auto add = std::make_shared<ov::opset8::Add>(input, input2); function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input, input2 }); } { auto input = std::make_shared<ov::opset8::Parameter>(ngraph::element::i64, ov::Shape{3}); auto constant = ov::opset8::Constant::create(ngraph::element::i64, {3}, {12}); auto add = std::make_shared<ov::opset8::Add>(input, constant); auto result1 = std::make_shared<ov::opset8::Result>(constant); auto result2 = std::make_shared<ov::opset8::Result>(add); function = std::make_shared<ngraph::Function>(ngraph::ResultVector{ result1, result2 }, ngraph::ParameterVector{ input }); } auto res = comparator.compare(function, function_ref); ASSERT_FALSE(res.valid) << res.message; } TEST(GraphComparatorTests, NamesCheckPositive) { FunctionsComparator comparator(FunctionsComparator::no_default()); std::shared_ptr<ov::Model> function, function_ref; { auto input = std::make_shared<ov::opset8::Parameter>(ngraph::element::i64, ov::Shape{1}); input->set_friendly_name("new_name1"); auto constant = ov::opset8::Constant::create(ngraph::element::i64, {1}, {0}); constant->set_friendly_name("new_name2"); auto add = std::make_shared<ov::opset8::Add>(input, constant); add->set_friendly_name("new_name3"); function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input }); } { auto input = std::make_shared<ov::opset8::Parameter>(ngraph::element::i64, ov::Shape{1}); input->set_friendly_name("new_name1"); auto constant = ov::opset8::Constant::create(ngraph::element::i64, {1}, {0}); constant->set_friendly_name("new_name2"); auto add = std::make_shared<ov::opset8::Add>(input, constant); add->set_friendly_name("new_name3"); function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input }); } comparator.enable(FunctionsComparator::NAMES) .enable(FunctionsComparator::NODES); auto res = comparator.compare(function, function_ref); ASSERT_TRUE(res.valid) << res.message; } TEST(GraphComparatorTests, NamesCheckNegative) { FunctionsComparator comparator(FunctionsComparator::no_default()); std::shared_ptr<ov::Model> function, function_ref; { auto input = std::make_shared<ov::opset8::Parameter>(ngraph::element::i64, ov::Shape{1}); input->set_friendly_name("new_name1"); auto constant = ov::opset8::Constant::create(ngraph::element::i64, {1}, {0}); constant->set_friendly_name("new_name2"); auto add = std::make_shared<ov::opset8::Add>(input, constant); add->set_friendly_name("new_name3"); function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input }); } { auto input = std::make_shared<ov::opset8::Parameter>(ngraph::element::i64, ov::Shape{1}); input->set_friendly_name("new_name1"); auto constant = ov::opset8::Constant::create(ngraph::element::i64, {1}, {0}); constant->set_friendly_name("new_name2"); auto add = std::make_shared<ov::opset8::Add>(input, constant); add->set_friendly_name("new_name3_different"); function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input }); } comparator.enable(FunctionsComparator::NAMES) .enable(FunctionsComparator::NODES); auto res = comparator.compare(function, function_ref); ASSERT_FALSE(res.valid) << res.message; } TEST(GraphComparatorTests, ConstCheckWithoutEnable) { FunctionsComparator comparator(FunctionsComparator::no_default()); std::shared_ptr<ov::Model> function, function_ref; { auto input = std::make_shared<ov::opset8::Parameter>(ngraph::element::i64, ov::Shape{3}); auto constant = ov::opset8::Constant::create(ngraph::element::i64, {3}, {0}); auto add = std::make_shared<ov::opset8::Add>(input, constant); function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input }); } { auto input = std::make_shared<ov::opset8::Parameter>(ngraph::element::i64, ov::Shape{3}); auto constant = ov::opset8::Constant::create(ngraph::element::i64, {3}, {12}); auto add = std::make_shared<ov::opset8::Add>(input, constant); function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input }); } comparator.enable(FunctionsComparator::NODES); auto res = comparator.compare(function, function_ref); ASSERT_TRUE(res.valid) << res.message; } TEST(GraphComparatorTests, ConstCheckNegative) { FunctionsComparator comparator(FunctionsComparator::no_default()); std::shared_ptr<ov::Model> function, function_ref; { auto input = std::make_shared<ov::opset8::Parameter>(ngraph::element::i64, ov::Shape{3}); auto constant = ov::opset8::Constant::create(ngraph::element::i64, {3}, {0}); auto add = std::make_shared<ov::opset8::Add>(input, constant); function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input }); } { auto input = std::make_shared<ov::opset8::Parameter>(ngraph::element::i64, ov::Shape{3}); auto constant = ov::opset8::Constant::create(ngraph::element::i64, {3}, {12}); auto add = std::make_shared<ov::opset8::Add>(input, constant); function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input }); } comparator.enable(FunctionsComparator::CONST_VALUES) .enable(FunctionsComparator::NODES); auto res = comparator.compare(function, function_ref); ASSERT_FALSE(res.valid) << res.message; } TEST(GraphComparatorTests, TensorNamesCheckNegative) { FunctionsComparator comparator(FunctionsComparator::no_default()); std::shared_ptr<ov::Model> function, function_ref; { auto input = std::make_shared<ov::opset8::Parameter>(ngraph::element::i64, ov::Shape{1}); auto constant = ov::opset8::Constant::create(ngraph::element::i64, {1}, {0}); auto add = std::make_shared<ov::opset8::Add>(input, constant); function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input }); function = ov::clone_model(*function_ref); add->get_input_tensor(0).set_names({"new_name"}); } comparator.enable(FunctionsComparator::TENSOR_NAMES) .enable(FunctionsComparator::NODES); auto res = comparator.compare(function, function_ref); ASSERT_FALSE(res.valid) << res.message; } TEST(GraphComparatorTests, TensorNamesCheckWithoutEnable) { FunctionsComparator comparator(FunctionsComparator::no_default()); std::shared_ptr<ov::Model> function, function_ref; { auto input = std::make_shared<ov::opset8::Parameter>(ngraph::element::i64, ov::Shape{1}); auto constant = ov::opset8::Constant::create(ngraph::element::i64, {1}, {0}); auto add = std::make_shared<ov::opset8::Add>(input, constant); function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input }); function = ov::clone_model(*function_ref); add->get_input_tensor(0).set_names({"new_name"}); } comparator.enable(FunctionsComparator::NODES); auto res = comparator.compare(function, function_ref); ASSERT_TRUE(res.valid) << res.message; } TEST(GraphComparatorTests, CheckAttributesNegative) { FunctionsComparator comparator(FunctionsComparator::no_default()); std::shared_ptr<ov::Model> function, function_ref; { auto input = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::Shape{ 1, 3, 12, 12 }); auto const_weights = ov::opset8::Constant::create(ov::element::f16, ov::Shape{ 1, 3, 3, 3 }, { 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9 }); auto convert_ins1 = std::make_shared<ov::opset8::Convert>(const_weights, ov::element::f32); auto conv = std::make_shared<ov::opset8::Convolution>(input, convert_ins1, ov::Strides{ 1, 1 }, ov::CoordinateDiff{ 1, 1 }, ov::CoordinateDiff{ 1, 1 }, ov::Strides{ 1, 1 }); function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ conv }, ngraph::ParameterVector{ input }); } { auto input = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::Shape{ 1, 3, 12, 12 }); auto const_weights = ov::opset8::Constant::create(ov::element::f16, ov::Shape{ 1, 3, 3, 3 }, { 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9 }); auto convert_ins1 = std::make_shared<ov::opset8::Convert>(const_weights, ov::element::f32); auto conv = std::make_shared<ov::opset8::Convolution>(input, convert_ins1, ov::Strides{ 1, 1 }, ov::CoordinateDiff{ 0, 0 }, ov::CoordinateDiff{ 0, 0 }, ov::Strides{ 1, 1 }); function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ conv }, ngraph::ParameterVector{ input }); } comparator.enable(FunctionsComparator::ATTRIBUTES) .enable(FunctionsComparator::NODES); auto res = comparator.compare(function, function_ref); ASSERT_FALSE(res.valid) << res.message; } TEST(GraphComparatorTests, CheckPrecisionsNegative) { FunctionsComparator comparator(FunctionsComparator::no_default()); std::shared_ptr<ov::Model> function, function_ref; { auto input = std::make_shared<ov::opset8::Parameter>(ngraph::element::i64, ov::Shape{3}); auto constant = ov::opset8::Constant::create(ngraph::element::i64, {3}, {0}); auto add = std::make_shared<ov::opset8::Add>(input, constant); function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input }); } { auto input = std::make_shared<ov::opset8::Parameter>(ngraph::element::f32, ov::Shape{3}); auto constant = ov::opset8::Constant::create(ngraph::element::f32, {3}, {0}); auto add = std::make_shared<ov::opset8::Add>(input, constant); function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input }); } comparator.enable(FunctionsComparator::PRECISIONS) .enable(FunctionsComparator::NODES); auto res = comparator.compare(function, function_ref); ASSERT_FALSE(res.valid) << res.message; } TEST(GraphComparatorTests, CheckPrecisionsWithoutEnable) { FunctionsComparator comparator(FunctionsComparator::no_default()); std::shared_ptr<ov::Model> function, function_ref; { auto input = std::make_shared<ov::opset8::Parameter>(ngraph::element::i64, ov::Shape{3}); auto constant = ov::opset8::Constant::create(ngraph::element::i64, {3}, {0}); auto add = std::make_shared<ov::opset8::Add>(input, constant); function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input }); } { auto input = std::make_shared<ov::opset8::Parameter>(ngraph::element::f32, ov::Shape{3}); auto constant = ov::opset8::Constant::create(ngraph::element::f32, {3}, {0}); auto add = std::make_shared<ov::opset8::Add>(input, constant); function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input }); } comparator.enable(FunctionsComparator::NODES); auto res = comparator.compare(function, function_ref); ASSERT_TRUE(res.valid) << res.message; } TEST(GraphComparatorTests, CheckRTInfo) { FunctionsComparator comparator(FunctionsComparator::no_default()); std::shared_ptr<ov::Model> function, function_ref; { auto input = std::make_shared<ov::opset8::Parameter>(ngraph::element::i64, ov::Shape{3}); auto constant = ov::opset8::Constant::create(ngraph::element::i64, {3}, {0}); auto add = std::make_shared<ov::opset8::Add>(input, constant); add->get_rt_info()["my_info"] = 42; function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input }); } { auto input = std::make_shared<ov::opset8::Parameter>(ngraph::element::i64, ov::Shape{3}); auto constant = ov::opset8::Constant::create(ngraph::element::i64, {3}, {0}); auto add = std::make_shared<ov::opset8::Add>(input, constant); function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input }); } comparator.enable(FunctionsComparator::RUNTIME_KEYS) .enable(FunctionsComparator::NODES); auto res = comparator.compare(function, function_ref); ASSERT_FALSE(res.valid) << res.message; } TEST(GraphComparatorTests, CheckRTInfoReverse) { FunctionsComparator comparator(FunctionsComparator::no_default()); std::shared_ptr<ov::Model> function, function_ref; { auto input = std::make_shared<ov::opset8::Parameter>(ngraph::element::i64, ov::Shape{3}); auto constant = ov::opset8::Constant::create(ngraph::element::i64, {3}, {0}); auto add = std::make_shared<ov::opset8::Add>(input, constant); function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input }); } { auto input = std::make_shared<ov::opset8::Parameter>(ngraph::element::i64, ov::Shape{3}); auto constant = ov::opset8::Constant::create(ngraph::element::i64, {3}, {0}); auto add = std::make_shared<ov::opset8::Add>(input, constant); add->get_rt_info()["my_info"] = 42; function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input }); } comparator.enable(FunctionsComparator::RUNTIME_KEYS) .enable(FunctionsComparator::NODES); auto res = comparator.compare(function, function_ref); ASSERT_TRUE(res.valid) << res.message; } TEST(GraphComparatorTests, CheckRTInfoInput) { FunctionsComparator comparator(FunctionsComparator::no_default()); std::shared_ptr<ov::Model> function, function_ref; { auto input = std::make_shared<ov::opset8::Parameter>(ngraph::element::i64, ov::Shape{3}); auto constant = ov::opset8::Constant::create(ngraph::element::i64, {3}, {0}); auto add = std::make_shared<ov::opset8::Add>(input, constant); add->input(0).get_rt_info()["my_info"] = 42; function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input }); } { auto input = std::make_shared<ov::opset8::Parameter>(ngraph::element::i64, ov::Shape{3}); auto constant = ov::opset8::Constant::create(ngraph::element::i64, {3}, {0}); auto add = std::make_shared<ov::opset8::Add>(input, constant); function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input }); } comparator.enable(FunctionsComparator::RUNTIME_KEYS) .enable(FunctionsComparator::NODES); auto res = comparator.compare(function, function_ref); ASSERT_FALSE(res.valid) << res.message; } TEST(GraphComparatorTests, CheckRTInfoOutput) { FunctionsComparator comparator(FunctionsComparator::no_default()); std::shared_ptr<ov::Model> function, function_ref; { auto input = std::make_shared<ov::opset8::Parameter>(ngraph::element::i64, ov::Shape{3}); auto constant = ov::opset8::Constant::create(ngraph::element::i64, {3}, {0}); auto add = std::make_shared<ov::opset8::Add>(input, constant); add->output(0).get_rt_info()["my_info"] = 42; function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input }); } { auto input = std::make_shared<ov::opset8::Parameter>(ngraph::element::i64, ov::Shape{3}); auto constant = ov::opset8::Constant::create(ngraph::element::i64, {3}, {0}); auto add = std::make_shared<ov::opset8::Add>(input, constant); function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input }); } comparator.enable(FunctionsComparator::RUNTIME_KEYS) .enable(FunctionsComparator::NODES); auto res = comparator.compare(function, function_ref); ASSERT_FALSE(res.valid) << res.message; } TEST(GraphComparatorTests, CheckTensorIteratorPositive) { FunctionsComparator comparator(FunctionsComparator::no_default()); std::shared_ptr<ov::Model> function, function_ref; { auto X = std::make_shared<ov::opset8::Parameter>(ngraph::element::f32, ngraph::Shape{2, 1, 16}); auto Y = std::make_shared<ov::opset8::Parameter>(ngraph::element::f32, ngraph::Shape{1, 128}); auto Xi = std::make_shared<ov::opset8::Parameter>(ngraph::element::f32, ngraph::Shape{1, 1, 16}); auto Yi = std::make_shared<ov::opset8::Parameter>(ngraph::element::f32, ngraph::Shape{1, 128}); // Body auto axis = ov::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{}, {0}); auto squeeze = std::make_shared<ov::opset8::Squeeze>(Xi, axis); auto w_val = std::vector<float>(384*16, 0); auto r_val = std::vector<float>(384*128, 0); auto b_val = std::vector<float>(384, 0); auto W = ov::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{384, 16}, w_val); auto R = ov::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{384, 128}, r_val); auto B = ov::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{384}, b_val); auto gru_cell = std::make_shared<ov::opset8::GRUCell>(squeeze, Yi, W, R, B, 128); auto res_1 = std::make_shared<ov::opset8::Result>(gru_cell); auto unsqueeze = std::make_shared<ov::opset8::Unsqueeze>(gru_cell, axis); auto res_2 = std::make_shared<ov::opset8::Result>(unsqueeze); auto body = std::make_shared<ngraph::Function>(ngraph::OutputVector{res_1, res_2}, ngraph::ParameterVector{Xi, Yi}); auto tensor_iterator = std::make_shared<ov::opset8::TensorIterator>(); tensor_iterator->set_body(body); tensor_iterator->set_sliced_input(Xi, X, 0, 1, 1, -1, 0); tensor_iterator->set_merged_input(Yi, Y, res_1); auto out0 = tensor_iterator->get_iter_value(res_1, -1); auto out1 = tensor_iterator->get_concatenated_slices(res_2, 0, 1, 1, -1, 0); auto res_ti_1 = std::make_shared<ov::opset8::Result>(tensor_iterator->output(1)); function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{res_ti_1}, ngraph::ParameterVector{X, Y}); function = ov::clone_model(*function_ref); } comparator.enable(FunctionsComparator::NODES); auto res = comparator.compare(function, function_ref); ASSERT_TRUE(res.valid) << res.message; } TEST(GraphComparatorTests, CheckLoopPositive) { FunctionsComparator comparator(FunctionsComparator::no_default()); std::shared_ptr<ov::Model> function, function_ref; { auto X = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape::dynamic()); auto Y = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape::dynamic()); auto M = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape::dynamic()); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters auto Xi = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape::dynamic()); auto Yi = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape::dynamic()); auto M_body = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape::dynamic()); auto body_condition = std::make_shared<ov::opset8::Constant>(ov::element::boolean, ov::Shape{1}, true); auto trip_count = std::make_shared<ov::opset8::Constant>(ngraph::element::i64, ov::Shape{1}, 3); auto exec_condition = std::make_shared<ov::opset8::Constant>(ngraph::element::boolean, ov::Shape{1}, true); // Body auto sum = std::make_shared<ov::opset8::Add>(Xi, Yi); auto Zo = std::make_shared<ov::opset8::Multiply>(sum, M_body); auto body = std::make_shared<ov::Model>(ov::OutputVector{body_condition, Zo}, ov::ParameterVector{Xi, Yi, M_body}); auto loop = std::make_shared<ov::opset8::Loop>(trip_count, exec_condition); loop->set_function(body); loop->set_invariant_input(Xi, X); loop->set_invariant_input(Yi, Y); loop->set_merged_input(M_body, M, Zo); loop->set_special_body_ports(ov::opset8::Loop::SpecialBodyPorts{-1, 0}); // Output is last Zo auto result = std::make_shared<ov::opset8::Result>(loop->get_iter_value(Zo, -1)); function_ref = std::make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{X, Y, M}); function = ov::clone_model(*function_ref); } comparator.enable(FunctionsComparator::NODES); auto res = comparator.compare(function, function_ref); ASSERT_TRUE(res.valid) << res.message; } TEST(GraphComparatorTests, CheckSinksPositive) { FunctionsComparator comparator(FunctionsComparator::no_default()); std::shared_ptr<ov::Model> function, function_ref; { auto arg = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::Shape{1, 1}); auto init_const = ov::opset8::Constant::create(ov::element::f32, ov::Shape{1, 1}, {0}); const std::string variable_name("variable0"); auto variable = std::make_shared<ngraph::Variable>(ngraph::VariableInfo{ov::PartialShape::dynamic(), ov::element::dynamic, variable_name}); auto read = std::make_shared<ov::opset8::ReadValue>(init_const, variable); auto read2 = std::make_shared<ov::opset8::ReadValue>(init_const, variable); auto add = std::make_shared<ov::opset8::Add>(arg, read); auto add2 = std::make_shared<ov::opset8::Add>(arg, read2); auto assign = std::make_shared<ov::opset8::Assign>(add, variable); auto assign2 = std::make_shared<ov::opset8::Assign>(add, variable); auto res = std::make_shared<ov::opset8::Result>(add); auto res2 = std::make_shared<ov::opset8::Result>(add2); function_ref = std::make_shared<ov::Model>(ov::ResultVector({res, res2}), ov::SinkVector({assign, assign2}), ov::ParameterVector({arg})); function = ov::clone_model(*function_ref); } comparator.enable(FunctionsComparator::NODES); auto res = comparator.compare(function, function_ref); ASSERT_TRUE(res.valid) << res.message; } TEST(GraphComparatorTests, CheckSinksNegative) { FunctionsComparator comparator(FunctionsComparator::no_default()); std::shared_ptr<ov::Model> function, function_ref; { auto arg = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::Shape{1, 1}); auto init_const = ov::opset8::Constant::create(ov::element::f32, ov::Shape{1, 1}, {0}); const std::string variable_name("variable0"); auto variable = std::make_shared<ngraph::Variable>(ngraph::VariableInfo{ov::PartialShape::dynamic(), ov::element::dynamic, variable_name}); auto read = std::make_shared<ov::opset8::ReadValue>(init_const, variable); auto read2 = std::make_shared<ov::opset8::ReadValue>(init_const, variable); auto add = std::make_shared<ov::opset8::Add>(arg, read); auto add2 = std::make_shared<ov::opset8::Add>(arg, read2); auto assign = std::make_shared<ov::opset8::Assign>(add, variable); auto assign2 = std::make_shared<ov::opset8::Assign>(add, variable); auto res = std::make_shared<ov::opset8::Result>(add); auto res2 = std::make_shared<ov::opset8::Result>(add2); function_ref = std::make_shared<ov::Model>(ov::ResultVector({res, res2}), ov::SinkVector({assign, assign2}), ov::ParameterVector({arg})); } { auto arg = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::Shape{1, 1}); auto init_const = ov::opset8::Constant::create(ov::element::f32, ov::Shape{1, 1}, {0}); const std::string variable_name("variable_different"); auto variable = std::make_shared<ngraph::Variable>(ngraph::VariableInfo{ov::PartialShape::dynamic(), ov::element::dynamic, variable_name}); auto read = std::make_shared<ov::opset8::ReadValue>(init_const, variable); auto read2 = std::make_shared<ov::opset8::ReadValue>(init_const, variable); auto add = std::make_shared<ov::opset8::Add>(arg, read); auto add2 = std::make_shared<ov::opset8::Add>(arg, read2); auto assign = std::make_shared<ov::opset8::Assign>(add, variable); auto assign2 = std::make_shared<ov::opset8::Assign>(add, variable); auto res = std::make_shared<ov::opset8::Result>(add); auto res2 = std::make_shared<ov::opset8::Result>(add2); function = std::make_shared<ov::Model>(ov::ResultVector({res, res2}), ov::SinkVector({assign, assign2}), ov::ParameterVector({arg})); } comparator.enable(FunctionsComparator::NODES); auto res = comparator.compare(function, function_ref); ASSERT_FALSE(res.valid) << res.message; } TEST(GraphComparatorTests, DisableCheck) { FunctionsComparator comparator(FunctionsComparator::no_default()); std::shared_ptr<ov::Model> function, function_ref; { auto input = std::make_shared<ov::opset8::Parameter>(ngraph::element::i64, ov::Shape{1}); auto constant = ov::opset8::Constant::create(ngraph::element::i64, {1}, {0}); auto add = std::make_shared<ov::opset8::Add>(input, constant); function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add }, ngraph::ParameterVector{ input }); function = ov::clone_model(*function_ref); } comparator.enable(FunctionsComparator::NODES); comparator.disable(FunctionsComparator::NODES); auto res = comparator.compare(function, function_ref); ASSERT_FALSE(res.valid) << res.message; } TEST(GraphComparatorTests, CheckAccuracyPositive) { FunctionsComparator comparator(FunctionsComparator::no_default()); std::shared_ptr<ov::Model> function, function_ref; { auto input = std::make_shared<ov::opset8::Parameter>(ngraph::element::i64, ov::Shape{1}); auto constant = ov::opset8::Constant::create(ngraph::element::i64, {1}, {0}); auto add = std::make_shared<ov::opset8::Add>(input, constant); function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add}, ngraph::ParameterVector{ input }); } { auto input = std::make_shared<ov::opset8::Parameter>(ngraph::element::i64, ov::Shape{1}); auto constant = ov::opset8::Constant::create(ngraph::element::i64, {1}, {0}); auto add = std::make_shared<ov::opset8::Add>(input, constant); function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add}, ngraph::ParameterVector{ input }); } comparator.enable(FunctionsComparator::ACCURACY); auto res = comparator.compare(function, function_ref); ASSERT_TRUE(res.valid) << res.message; } TEST(GraphComparatorTests, CheckAccuracyNegative) { FunctionsComparator comparator(FunctionsComparator::no_default()); std::shared_ptr<ov::Model> function, function_ref; { auto input = std::make_shared<ov::opset8::Parameter>(ngraph::element::i64, ov::Shape{1}); auto constant = ov::opset8::Constant::create(ngraph::element::i64, {1}, {12}); auto add = std::make_shared<ov::opset8::Add>(input, constant); function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add}, ngraph::ParameterVector{ input }); } { auto input = std::make_shared<ov::opset8::Parameter>(ngraph::element::i64, ov::Shape{1}); auto constant = ov::opset8::Constant::create(ngraph::element::i64, {1}, {200}); auto add = std::make_shared<ov::opset8::Add>(input, constant); function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ add}, ngraph::ParameterVector{ input }); } comparator.enable(FunctionsComparator::ACCURACY); auto res = comparator.compare(function, function_ref); ASSERT_FALSE(res.valid) << res.message; } TEST(GraphComparatorTests, CheckAccuracyNotEnabled) { FunctionsComparator comparator(FunctionsComparator::no_default()); std::shared_ptr<ov::Model> function, function_ref; { auto input = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::Shape{ 1, 3, 12, 12 }); auto const_weights = ov::opset8::Constant::create(ov::element::f16, ov::Shape{ 1, 3, 3, 3 }, { 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9 }); auto convert_ins1 = std::make_shared<ov::opset8::Convert>(const_weights, ov::element::f32); auto conv = std::make_shared<ov::opset8::Convolution>(input, convert_ins1, ov::Strides{ 1, 1 }, ov::CoordinateDiff{ 1, 1 }, ov::CoordinateDiff{ 1, 1 }, ov::Strides{ 1, 1 }); function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ conv }, ngraph::ParameterVector{ input }); } { auto input = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::Shape{ 1, 3, 12, 12 }); auto const_weights = ov::opset8::Constant::create(ov::element::f16, ov::Shape{ 1, 3, 3, 3 }, { 1, 9, 3, 4, 5, 6, 7, 8, 9, 1, 12, 3, 9, 5, 0, 7, 8, 9, 1, 2, 12, 4, 9, 6, 7, 8, 9 }); auto convert_ins1 = std::make_shared<ov::opset8::Convert>(const_weights, ov::element::f32); auto conv = std::make_shared<ov::opset8::Convolution>(input, convert_ins1, ov::Strides{ 1, 1 }, ov::CoordinateDiff{ 1, 1 }, ov::CoordinateDiff{ 1, 1 }, ov::Strides{ 1, 1 }); function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ conv }, ngraph::ParameterVector{ input }); } comparator.enable(FunctionsComparator::NODES); auto res = comparator.compare(function, function_ref); ASSERT_TRUE(res.valid) << res.message; }
15,242
8,238
<gh_stars>1000+ // AsmJit - Machine code generation for C++ // // * Official AsmJit Home Page: https://asmjit.com // * Official Github Repository: https://github.com/asmjit/asmjit // // Copyright (c) 2008-2020 The AsmJit Authors // // This software is provided 'as-is', without any express or implied // warranty. In no event will the authors be held liable for any damages // arising from the use of this software. // // Permission is granted to anyone to use this software for any purpose, // including commercial applications, and to alter it and redistribute it // freely, subject to the following restrictions: // // 1. The origin of this software must not be misrepresented; you must not // claim that you wrote the original software. If you use this software // in a product, an acknowledgment in the product documentation would be // appreciated but is not required. // 2. Altered source versions must be plainly marked as such, and must not be // misrepresented as being the original software. // 3. This notice may not be removed or altered from any source distribution. #ifndef ASMJIT_CORE_COMPILERDEFS_H_INCLUDED #define ASMJIT_CORE_COMPILERDEFS_H_INCLUDED #include "../core/api-config.h" #include "../core/operand.h" #include "../core/zonestring.h" ASMJIT_BEGIN_NAMESPACE // ============================================================================ // [Forward Declarations] // ============================================================================ class RAWorkReg; //! \addtogroup asmjit_compiler //! \{ // ============================================================================ // [asmjit::VirtReg] // ============================================================================ //! Virtual register data, managed by \ref BaseCompiler. class VirtReg { public: ASMJIT_NONCOPYABLE(VirtReg) //! Virtual register id. uint32_t _id = 0; //! Virtual register info (signature). RegInfo _info = {}; //! Virtual register size (can be smaller than `regInfo._size`). uint32_t _virtSize = 0; //! Virtual register alignment (for spilling). uint8_t _alignment = 0; //! Type-id. uint8_t _typeId = 0; //! Virtual register weight for alloc/spill decisions. uint8_t _weight = 1; //! True if this is a fixed register, never reallocated. uint8_t _isFixed : 1; //! True if the virtual register is only used as a stack (never accessed as register). uint8_t _isStack : 1; uint8_t _reserved : 6; //! Virtual register name (user provided or automatically generated). ZoneString<16> _name {}; // ------------------------------------------------------------------------- // The following members are used exclusively by RAPass. They are initialized // when the VirtReg is created to NULL pointers and then changed during RAPass // execution. RAPass sets them back to NULL before it returns. // ------------------------------------------------------------------------- //! Reference to `RAWorkReg`, used during register allocation. RAWorkReg* _workReg = nullptr; //! \name Construction & Destruction //! \{ inline VirtReg(uint32_t id, uint32_t signature, uint32_t virtSize, uint32_t alignment, uint32_t typeId) noexcept : _id(id), _info { signature }, _virtSize(virtSize), _alignment(uint8_t(alignment)), _typeId(uint8_t(typeId)), _isFixed(false), _isStack(false), _reserved(0) {} //! \} //! \name Accessors //! \{ //! Returns the virtual register id. inline uint32_t id() const noexcept { return _id; } //! Returns the virtual register name. inline const char* name() const noexcept { return _name.data(); } //! Returns the size of the virtual register name. inline uint32_t nameSize() const noexcept { return _name.size(); } //! Returns a register information that wraps the register signature. inline const RegInfo& info() const noexcept { return _info; } //! Returns a virtual register type (maps to the physical register type as well). inline uint32_t type() const noexcept { return _info.type(); } //! Returns a virtual register group (maps to the physical register group as well). inline uint32_t group() const noexcept { return _info.group(); } //! Returns a real size of the register this virtual register maps to. //! //! For example if this is a 128-bit SIMD register used for a scalar single //! precision floating point value then its virtSize would be 4, however, the //! `regSize` would still say 16 (128-bits), because it's the smallest size //! of that register type. inline uint32_t regSize() const noexcept { return _info.size(); } //! Returns a register signature of this virtual register. inline uint32_t signature() const noexcept { return _info.signature(); } //! Returns the virtual register size. //! //! The virtual register size describes how many bytes the virtual register //! needs to store its content. It can be smaller than the physical register //! size, see `regSize()`. inline uint32_t virtSize() const noexcept { return _virtSize; } //! Returns the virtual register alignment. inline uint32_t alignment() const noexcept { return _alignment; } //! Returns the virtual register type id, see `Type::Id`. inline uint32_t typeId() const noexcept { return _typeId; } //! Returns the virtual register weight - the register allocator can use it //! as explicit hint for alloc/spill decisions. inline uint32_t weight() const noexcept { return _weight; } //! Sets the virtual register weight (0 to 255) - the register allocator can //! use it as explicit hint for alloc/spill decisions and initial bin-packing. inline void setWeight(uint32_t weight) noexcept { _weight = uint8_t(weight); } //! Returns whether the virtual register is always allocated to a fixed //! physical register (and never reallocated). //! //! \note This is only used for special purposes and it's mostly internal. inline bool isFixed() const noexcept { return bool(_isFixed); } //! Returns whether the virtual register is indeed a stack that only uses //! the virtual register id for making it accessible. //! //! \note It's an error if a stack is accessed as a register. inline bool isStack() const noexcept { return bool(_isStack); } inline bool hasWorkReg() const noexcept { return _workReg != nullptr; } inline RAWorkReg* workReg() const noexcept { return _workReg; } inline void setWorkReg(RAWorkReg* workReg) noexcept { _workReg = workReg; } inline void resetWorkReg() noexcept { _workReg = nullptr; } //! \} }; //! \} ASMJIT_END_NAMESPACE #endif // ASMJIT_CORE_COMPILERDEFS_H_INCLUDED
1,883
1,013
/* * Copyright (c) 2016-2017 <NAME> (Aikar) - MIT License * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ package co.aikar.commands; import java.lang.annotation.Annotation; import java.util.List; /** * Holds information about the currently executing command on this thread */ public class CommandOperationContext<I extends CommandIssuer> { private final CommandManager manager; private final I issuer; private final BaseCommand command; private final String commandLabel; private final String[] args; private final boolean isAsync; private RegisteredCommand registeredCommand; List<String> enumCompletionValues; CommandOperationContext(CommandManager manager, I issuer, BaseCommand command, String commandLabel, String[] args, boolean isAsync) { this.manager = manager; this.issuer = issuer; this.command = command; this.commandLabel = commandLabel; this.args = args; this.isAsync = isAsync; } public CommandManager getCommandManager() { return manager; } public I getCommandIssuer() { return issuer; } public BaseCommand getCommand() { return command; } public String getCommandLabel() { return commandLabel; } public String[] getArgs() { return args; } public boolean isAsync() { return isAsync; } public void setRegisteredCommand(RegisteredCommand registeredCommand) { this.registeredCommand = registeredCommand; } public RegisteredCommand getRegisteredCommand() { return registeredCommand; } /** * This method will not support annotation processors!! use getAnnotationValue or hasAnnotation * * @deprecated Use {@link #getAnnotationValue(Class)} */ @Deprecated public <T extends Annotation> T getAnnotation(Class<T> anno) { return registeredCommand.method.getAnnotation(anno); } public <T extends Annotation> String getAnnotationValue(Class<T> cls) { return manager.getAnnotations().getAnnotationValue(registeredCommand.method, cls); } public <T extends Annotation> String getAnnotationValue(Class<T> cls, int options) { return manager.getAnnotations().getAnnotationValue(registeredCommand.method, cls, options); } public boolean hasAnnotation(Class<? extends Annotation> anno) { return getAnnotation(anno) != null; } }
1,104
430
<reponame>NVIDIA/Torch-TensorRT import torch import torch.nn as nn import torch_tensorrt.fx.tracer.acc_tracer.acc_ops as acc_ops from parameterized import parameterized from torch.testing._internal.common_utils import run_tests from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase class TestTopKConverter(AccTestCase): @parameterized.expand( [ ("top1", 1, -1), ("top2", 2, -1), ("none_dim", 1, None), ("smallest", 1, -1, False), ("top1_dim0", 1, 0, False), ] ) def test_topk(self, _, k, dim, largest=True): class TopK(nn.Module): def __init__(self, k, dim): super().__init__() self.k = k self.dim = dim self.largest = largest def forward(self, x): if self.dim is not None: out = torch.topk( x, k=self.k, dim=self.dim, largest=self.largest, sorted=False ) else: out = torch.topk(x, k=self.k, largest=self.largest, sorted=False) return out[0], out[1] inputs = [torch.randn(1, 2, 3, 4)] self.run_test( TopK(k, dim), inputs, expected_ops={acc_ops.topk}, test_implicit_batch_dim=(dim != 0), ) if __name__ == "__main__": run_tests()
775
1,127
<reponame>ryanloney/openvino-1 # Copyright (C) 2018-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from openvino.tools.mo.ops.prelu import PReLU from openvino.tools.mo.front.caffe.collect_attributes import merge_attrs from openvino.tools.mo.front.caffe.extractors.utils import weights_biases from openvino.tools.mo.front.common.extractors.utils import layout_attrs from openvino.tools.mo.front.extractor import FrontExtractorOp class PreluFrontExtractor(FrontExtractorOp): op = 'PReLU' enabled = True @classmethod def extract(cls, node): proto_layer = node.pb pb_model = node.model_pb param = proto_layer.prelu_param update_attrs = { 'channel_shared': int(param.channel_shared) } variance_norm_caffe_map = { 0: 'caffe.FillerParameter.FAN_IN', 1: 'caffe.FillerParameter.FAN_OUT', 2: 'caffe.FillerParameter.AVERAGE' } if hasattr(param, 'filler'): update_attrs.update({ 'filler_type': param.filler.type, 'filler_value': int(param.filler.value), 'min': int(param.filler.min), 'max': int(param.filler.max), 'mean': int(param.filler.mean), 'std': int(param.filler.std), 'sparse': param.filler.sparse, 'variance_norm': variance_norm_caffe_map[param.filler.variance_norm] }) mapping_rule = merge_attrs(param, update_attrs) mapping_rule.update(weights_biases(False, pb_model)) mapping_rule.update(layout_attrs()) # update the attributes of the node PReLU.update_node_stat(node, mapping_rule) return cls.enabled
823
626
<reponame>spacemanidol/anserini /* * Anserini: A Lucene toolkit for reproducible information retrieval research * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.anserini.rerank.lib; import io.anserini.analysis.AnalyzerUtils; import io.anserini.index.IndexArgs; import io.anserini.rerank.Reranker; import io.anserini.rerank.RerankerContext; import io.anserini.rerank.ScoredDocuments; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.util.BytesRef; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import static io.anserini.search.SearchCollection.BREAK_SCORE_TIES_BY_DOCID; import static io.anserini.search.SearchCollection.BREAK_SCORE_TIES_BY_TWEETID; class BM25PrfSimilarity extends BM25Similarity { BM25PrfSimilarity(float k1, float b) { super(k1, b); } @Override // idf is not needed in BM25PRF protected float idf(long docFreq, long docCount) { return 1; } } public class BM25PrfReranker implements Reranker { private static final Logger LOG = LogManager.getLogger(BM25PrfReranker.class); private final int fbDocs; private final Analyzer analyzer; private final String field; private final boolean outputQuery; private final int fbTerms; private final float k1; private final float b; private final float newTermWeight; public BM25PrfReranker(Analyzer analyzer, String field, int fbTerms, int fbDocs, float k1, float b, float newTermWeight, boolean outputQuery) { this.analyzer = analyzer; this.outputQuery = outputQuery; this.field = field; this.fbTerms = fbTerms; this.fbDocs = fbDocs; this.k1 = k1; this.b = b; this.newTermWeight = newTermWeight; } @Override public ScoredDocuments rerank(ScoredDocuments docs, RerankerContext context) { // set similarity to BM25PRF IndexSearcher searcher = context.getIndexSearcher(); BM25Similarity originalSimilarity = (BM25Similarity) searcher.getSimilarity(); searcher.setSimilarity(new BM25PrfSimilarity(k1, b)); IndexReader reader = searcher.getIndexReader(); List<String> originalQueryTerms = AnalyzerUtils.analyze(analyzer, context.getQueryText()); boolean useRf = (context.getSearchArgs().rf_qrels != null); PrfFeatures fv = expandQuery(originalQueryTerms, docs, reader, useRf); Query newQuery = fv.toQuery(); if (this.outputQuery) { LOG.info("QID: " + context.getQueryId()); LOG.info("Original Query: " + context.getQuery().toString(this.field)); LOG.info("Running new query: " + newQuery.toString(this.field)); LOG.info("Features: " + fv.toString()); } TopDocs rs; try { // Figure out how to break the scoring ties. if (context.getSearchArgs().arbitraryScoreTieBreak) { rs = searcher.search(newQuery, context.getSearchArgs().hits); } else if (context.getSearchArgs().searchtweets) { rs = searcher.search(newQuery, context.getSearchArgs().hits, BREAK_SCORE_TIES_BY_TWEETID, true); } else { rs = searcher.search(newQuery, context.getSearchArgs().hits, BREAK_SCORE_TIES_BY_DOCID, true); } } catch (IOException e) { e.printStackTrace(); return docs; } // set similarity back searcher.setSimilarity(originalSimilarity); return ScoredDocuments.fromTopDocs(rs, searcher); } private PrfFeatures expandQuery(List<String> originalTerms, ScoredDocuments docs, IndexReader reader, boolean useRf) { PrfFeatures newFeatures = new PrfFeatures(); Set<String> vocab = new HashSet<>(); Map<Integer, Set<String>> docToTermsMap = new HashMap<>(); int numFbDocs; if (useRf){ numFbDocs = docs.documents.length; } else { numFbDocs = docs.documents.length < fbDocs ? docs.documents.length : fbDocs; } int numDocs = reader.numDocs(); for (int i = 0; i < numFbDocs; i++) { try { if (useRf && docs.scores[i] <= 0){ continue; } Terms terms = reader.getTermVector(docs.ids[i], field); Set<String> termsStr = getTermsStr(terms); docToTermsMap.put(docs.ids[i], termsStr); vocab.addAll(termsStr); } catch (IOException e) { e.printStackTrace(); } } int numRelDocs = docToTermsMap.size(); Set<String> originalTermsSet = new HashSet<>(originalTerms); // Add New Terms for (String term : vocab) { if (originalTermsSet.contains(term)) continue; if (term.length() < 2 || term.length() > 20) continue; if (!term.matches("[a-z0-9]+")) continue; if (term.matches("[0-9]+")) continue; try { int df = reader.docFreq(new Term(IndexArgs.CONTENTS, term)); int dfRel = 0; for (Map.Entry<Integer, Set<String>> entry : docToTermsMap.entrySet()) { Set<String> terms = entry.getValue(); if (terms.contains(term)) { dfRel++; } } if (dfRel < 2) { continue; } newFeatures.addFeature(term, df, dfRel, numDocs, numRelDocs, newTermWeight); } catch (IOException e) { e.printStackTrace(); } } newFeatures.pruneToSize(fbTerms); for (String term : originalTerms) { try { int df = reader.docFreq(new Term(IndexArgs.CONTENTS, term)); int dfRel = 0; for (Map.Entry<Integer, Set<String>> entry : docToTermsMap.entrySet()) { Set<String> terms = entry.getValue(); if (terms.contains(term)) { dfRel++; } } newFeatures.addFeature(term, df, dfRel, numDocs, numRelDocs); } catch (IOException e) { e.printStackTrace(); } } return newFeatures; } @Override public String tag() { return "BM25PRF(fbDocs=" + fbDocs + ",fbTerms=" + fbTerms + ",k1=" + k1 + ",b=" + b + ",newTermWeight=" + newTermWeight; } private Set<String> getTermsStr(Terms terms) { Set<String> termsStr = new HashSet<>(); try { TermsEnum termsEnum = terms.iterator(); BytesRef text; while ((text = termsEnum.next()) != null) { String term = text.utf8ToString(); termsStr.add(term); } } catch (Exception e) { e.printStackTrace(); // Return empty feature vector return termsStr; } return termsStr; } class PrfFeature { int df; int dfRel; int numDocs; int numDocsRel; float weight; PrfFeature(int df, int dfRel, int numDocs, int numDocsRel, float weight) { this.df = df; this.dfRel = dfRel; this.numDocs = numDocs; this.numDocsRel = numDocsRel; this.weight = weight; } double getRelWeight() { double rw = Math.log((dfRel + 0.5D) * (numDocs - df - numDocsRel + dfRel + 0.5D) / ((df - dfRel + 0.5D) * (numDocsRel - dfRel + 0.5D))) * weight; return Math.max(rw, 1e-6); } double getOfferWeight() { // we apply log to dfRel according to // Sakai and Robertson (SIGIR 2002) return getRelWeight() * Math.log(Math.max(dfRel, 1e-6)); } @Override public String toString() { return String.format("%d, %d, %d, %d, %f, %f, %f", df, dfRel, numDocs, numDocsRel, weight, getRelWeight(), getOfferWeight()); } } class PrfFeatures { private HashMap<String, PrfFeature> features; PrfFeatures() { this.features = new HashMap<>(); } void addFeature(String term, int df, int dfRel, int numDocs, int numDocsRel, float weight) { features.put(term, new PrfFeature(df, dfRel, numDocs, numDocsRel, weight)); } void addFeature(String term, int df, int dfRel, int numDocs, int numDocsRel) { addFeature(term, df, dfRel, numDocs, numDocsRel, 1.0f); } public Query toQuery() { BooleanQuery.Builder feedbackQueryBuilder = new BooleanQuery.Builder(); for (Map.Entry<String, PrfFeature> f : features.entrySet()) { String term = f.getKey(); float rw = (float) f.getValue().getRelWeight(); feedbackQueryBuilder.add(new BoostQuery(new TermQuery(new Term(field, term)), rw), BooleanClause.Occur.SHOULD); } return feedbackQueryBuilder.build(); } private List<KeyValuePair> getOrderedFeatures() { List<KeyValuePair> kvpList = new ArrayList<KeyValuePair>(features.size()); for (String feature : features.keySet()) { PrfFeature value = features.get(feature); KeyValuePair keyValuePair = new KeyValuePair(feature, value); kvpList.add(keyValuePair); } Collections.sort(kvpList, new Comparator<KeyValuePair>() { public int compare(KeyValuePair x, KeyValuePair y) { double xVal = x.getValue(); double yVal = y.getValue(); return (Double.compare(yVal, xVal)); } }); return kvpList; } PrfFeatures pruneToSize(int k) { List<KeyValuePair> pairs = getOrderedFeatures(); HashMap<String, PrfFeature> pruned = new HashMap<>(); for (KeyValuePair pair : pairs) { if (pruned.size() >= k) { break; } pruned.put(pair.getKey(), pair.getFeature()); } this.features = pruned; return this; } @Override public String toString() { List<String> strBuilder = new ArrayList<String>(); List<KeyValuePair> pairs = getOrderedFeatures(); for (KeyValuePair pair : pairs) { strBuilder.add(pair.getKey() + "," + pair.getFeature()); } return String.join("||", strBuilder); } private class KeyValuePair { private String key; private PrfFeature value; public KeyValuePair(String key, PrfFeature value) { this.key = key; this.value = value; } public String getKey() { return key; } @Override public String toString() { return value + "\t" + key; } public float getValue() { return (float) value.getOfferWeight(); } public PrfFeature getFeature() { return value; } } } }
4,585
716
// // Copyright (c) 2019 INRIA // #ifndef __pinocchio_math_comparison_operators_hpp__ #define __pinocchio_math_comparison_operators_hpp__ namespace pinocchio { #define PINOCCHIO_DEFINE_COMPARISON_OP(name,OP) \ struct name \ { \ template<typename T1, typename T2> \ static bool call(const T1 & a, const T2 & b) \ { return a OP b;} \ } PINOCCHIO_DEFINE_COMPARISON_OP(equal_to_op,==); PINOCCHIO_DEFINE_COMPARISON_OP(not_equal_to_op,!=); PINOCCHIO_DEFINE_COMPARISON_OP(less_than_op,<); PINOCCHIO_DEFINE_COMPARISON_OP(greater_than_op,>); PINOCCHIO_DEFINE_COMPARISON_OP(less_than_or_equal_to_op,<=); PINOCCHIO_DEFINE_COMPARISON_OP(greater_than_or_equal_to_op,>=); template<class OP, bool condition, bool default_return_value> struct apply_op_if { template<typename T1, typename T2> static bool op(const T1 & /*a*/, const T2 & /*b*/) { return default_return_value; } }; template<class OP, bool default_return_value> struct apply_op_if<OP,true,default_return_value> { template<typename T1, typename T2> static bool op(const T1 & a, const T2 & b) { return OP::call(a,b); } }; } #endif //#ifndef __pinocchio_math_comparison_operators_hpp__
563
623
<filename>backtest/dual_thrust.py """ The Dual Thrust trading algorithm is a famous strategy developed by <NAME>. It is a breakout system, commonly used in futures, forex and equity markets. The limits are based on today’s opening price plus or minus a certain percentage of recent trading range. When the price breaks through the upper level, it will long, and when it breaks the lower level, it will short. 1. recent trading range is relatively stable, using four price points; 2. Percentage K1 and K2 can be asymmetric https://www.quantconnect.com/tutorials/strategy-library/dual-thrust-trading-algorithm Similar to quantconnect, got negative Sharpe -0.377. It is an intraday breakout strategy, requires tickdata; holding position for a year is against the essence of this strategy. Improvements: 1. profit target and stop loss. 2. confirmation e.g. MA5min>MA10min """ import os import numpy as np import pandas as pd import pytz from datetime import datetime, timezone import multiprocessing import talib import quanttrader as qt import matplotlib.pyplot as plt import empyrical as ep import pyfolio as pf # set browser full width from IPython.core.display import display, HTML display(HTML("<style>.container { width:100% !important; }</style>")) class DualThrust(qt.StrategyBase): def __init__(self, n=4, k1=0.5, k2=0.5 ): super(DualThrust, self).__init__() self.n = n self.k1 = k1 self.k2 = k2 self.current_time = None def on_tick(self, tick_event): self.current_time = tick_event.timestamp # print('Processing {}'.format(self.current_time)) symbol = self.symbols[0] df_hist = self._data_board.get_hist_price(symbol, tick_event.timestamp) # need n day trading range if df_hist.shape[0] < self.n: return high = df_hist.High.iloc[-self.n:] low = df_hist.Low.iloc[-self.n:] close = df_hist.Close.iloc[-self.n:] current_open = df_hist.Open.iloc[-1] current_price = df_hist.Close.iloc[-1] current_size = self._position_manager.get_position_size(symbol) npv = self._position_manager.current_total_capital HH, HC, LC, LL = max(high), max(close), min(close), min(low) signal_range = max(HH - LC, HC - LL) selltrig = current_open - self.k2 * signal_range buytrig = current_open + self.k1 * signal_range if current_price > buytrig: # buy on upper break if current_size > 0: return target_size = int(npv / current_price) self.adjust_position(symbol, size_from=current_size, size_to=target_size, timestamp=self.current_time) print(f'{self.current_time}, BUY ORDER SENT, {symbol}, Price: {current_price:.2f}, ' f'Buy trigger: {buytrig:.2f}, Size: {current_size}, Target Size: {target_size}') elif current_price < selltrig: # sell on down break if current_size < 0: return target_size = -int(npv / current_price) self.adjust_position(symbol, size_from=current_size, size_to=target_size, timestamp=self.current_time) print(f'{self.current_time}, SELL ORDER SENT, {symbol}, Price: {current_price:.2f}, ' f'Sell trigger: {selltrig:.2f}, Size: {current_size}, Target Size: {target_size}') def parameter_search(engine, tag, target_name, return_dict): """ This function should be the same for all strategies. The only reason not included in quanttrader is because of its dependency on pyfolio (to get perf_stats) """ ds_equity, _, _ = engine.run() try: strat_ret = ds_equity.pct_change().dropna() perf_stats_strat = pf.timeseries.perf_stats(strat_ret) target_value = perf_stats_strat.loc[target_name] # first table in tuple except KeyError: target_value = 0 return_dict[tag] = target_value if __name__ == '__main__': do_optimize = False run_in_jupyter = False symbol = 'SPX' benchmark = 'SPX' datapath = os.path.join('../data/', f'{symbol}.csv') data = qt.util.read_ohlcv_csv(datapath) init_capital = 100_000.0 test_start_date = datetime(2010,1,1, 8, 30, 0, 0, pytz.timezone('America/New_York')) test_end_date = datetime(2019,12,31, 6, 0, 0, 0, pytz.timezone('America/New_York')) if do_optimize: # parallel parameter search params_list = [] for n_ in [3, 4, 5, 10]: for k_ in [0.4, 0.5, 0.6]: params_list.append({'n': n_, 'k1': k_, 'k2': k_}) target_name = 'Sharpe ratio' manager = multiprocessing.Manager() return_dict = manager.dict() jobs = [] for params in params_list: strategy = DualThrust() strategy.set_capital(init_capital) strategy.set_symbols([symbol]) backtest_engine = qt.BacktestEngine(test_start_date, test_end_date) backtest_engine.set_capital(init_capital) # capital or portfolio >= capital for one strategy backtest_engine.add_data(symbol, data) strategy.set_params({'n': params['n'], 'k1': params['k1'], 'k2': params['k2']}) backtest_engine.set_strategy(strategy) tag = (params['n'], params['k1'], params['k2']) p = multiprocessing.Process(target=parameter_search, args=(backtest_engine, tag, target_name, return_dict)) jobs.append(p) p.start() for proc in jobs: proc.join() for k,v in return_dict.items(): print(k, v) else: strategy = DualThrust() strategy.set_capital(init_capital) strategy.set_symbols([symbol]) strategy.set_params({'n':4, 'k1': 0.5, 'k2': 0.5}) # Create a Data Feed backtest_engine = qt.BacktestEngine(test_start_date, test_end_date) backtest_engine.set_capital(init_capital) # capital or portfolio >= capital for one strategy backtest_engine.add_data(symbol, data) backtest_engine.set_strategy(strategy) ds_equity, df_positions, df_trades = backtest_engine.run() # save to excel qt.util.save_one_run_results('./output', ds_equity, df_positions, df_trades) # ------------------------- Evaluation and Plotting -------------------------------------- # strat_ret = ds_equity.pct_change().dropna() strat_ret.name = 'strat' bm = qt.util.read_ohlcv_csv(os.path.join('../data/', f'{benchmark}.csv')) bm_ret = bm['Close'].pct_change().dropna() bm_ret.index = pd.to_datetime(bm_ret.index) bm_ret = bm_ret[strat_ret.index] bm_ret.name = 'benchmark' perf_stats_strat = pf.timeseries.perf_stats(strat_ret) perf_stats_all = perf_stats_strat perf_stats_bm = pf.timeseries.perf_stats(bm_ret) perf_stats_all = pd.concat([perf_stats_strat, perf_stats_bm], axis=1) perf_stats_all.columns = ['Strategy', 'Benchmark'] drawdown_table = pf.timeseries.gen_drawdown_table(strat_ret, 5) monthly_ret_table = ep.aggregate_returns(strat_ret, 'monthly') monthly_ret_table = monthly_ret_table.unstack().round(3) ann_ret_df = pd.DataFrame(ep.aggregate_returns(strat_ret, 'yearly')) ann_ret_df = ann_ret_df.unstack().round(3) print('-------------- PERFORMANCE ----------------') print(perf_stats_all) print('-------------- DRAWDOWN ----------------') print(drawdown_table) print('-------------- MONTHLY RETURN ----------------') print(monthly_ret_table) print('-------------- ANNUAL RETURN ----------------') print(ann_ret_df) if run_in_jupyter: pf.create_full_tear_sheet( strat_ret, benchmark_rets=bm_ret, positions=df_positions, transactions=df_trades, round_trips=False) plt.show() else: f1 = plt.figure(1) pf.plot_rolling_returns(strat_ret, factor_returns=bm_ret) f1.show() f2 = plt.figure(2) pf.plot_rolling_volatility(strat_ret, factor_returns=bm_ret) f2.show() f3 = plt.figure(3) pf.plot_rolling_sharpe(strat_ret) f3.show() f4 = plt.figure(4) pf.plot_drawdown_periods(strat_ret) f4.show() f5 = plt.figure(5) pf.plot_monthly_returns_heatmap(strat_ret) f5.show() f6 = plt.figure(6) pf.plot_annual_returns(strat_ret) f6.show() f7 = plt.figure(7) pf.plot_monthly_returns_dist(strat_ret) plt.show()
3,987
1,177
<reponame>kishankj/python class Luhn: def __init__(self, card_num): pass def valid(self): pass
58
1,444
<reponame>nnadams/mage package mage.cards.s; import java.util.UUID; import mage.MageInt; import mage.abilities.TriggeredAbilityImpl; import mage.abilities.costs.mana.ManaCostsImpl; import mage.abilities.effects.Effect; import mage.abilities.effects.common.ExileTargetEffect; import mage.abilities.keyword.NinjutsuAbility; import mage.cards.CardImpl; import mage.cards.CardSetInfo; import mage.constants.CardType; import mage.constants.SubType; import mage.constants.Zone; import mage.filter.FilterCard; import mage.filter.common.FilterControlledCreaturePermanent; import mage.filter.predicate.card.OwnerIdPredicate; import mage.filter.predicate.permanent.UnblockedPredicate; import mage.game.Game; import mage.game.events.DamagedPlayerEvent; import mage.game.events.GameEvent; import mage.target.common.TargetCardInOpponentsGraveyard; /** * * @author LevelX2 */ public final class Skullsnatcher extends CardImpl { private static final FilterControlledCreaturePermanent filter = new FilterControlledCreaturePermanent("unblocked attacker you control"); static { filter.add(UnblockedPredicate.instance); } public Skullsnatcher(UUID ownerId, CardSetInfo setInfo) { super(ownerId,setInfo,new CardType[]{CardType.CREATURE},"{1}{B}"); this.subtype.add(SubType.RAT); this.subtype.add(SubType.NINJA); this.power = new MageInt(2); this.toughness = new MageInt(1); // Ninjutsu {B} ({B}, Return an unblocked attacker you control to hand: Put this card onto the battlefield from your hand tapped and attacking.) this.addAbility(new NinjutsuAbility(new ManaCostsImpl("{B"))); // Whenever Skullsnatcher deals combat damage to a player, exile up to two target cards from that player's graveyard. Effect effect = new ExileTargetEffect(null, "", Zone.GRAVEYARD); effect.setText("exile up to two target cards from that player's graveyard"); this.addAbility(new SkullsnatcherTriggeredAbility(effect)); } private Skullsnatcher(final Skullsnatcher card) { super(card); } @Override public Skullsnatcher copy() { return new Skullsnatcher(this); } } class SkullsnatcherTriggeredAbility extends TriggeredAbilityImpl { SkullsnatcherTriggeredAbility(Effect effect) { super(Zone.BATTLEFIELD, effect, false); } SkullsnatcherTriggeredAbility(final SkullsnatcherTriggeredAbility ability) { super(ability); } @Override public SkullsnatcherTriggeredAbility copy() { return new SkullsnatcherTriggeredAbility(this); } @Override public boolean checkEventType(GameEvent event, Game game) { return event.getType() == GameEvent.EventType.DAMAGED_PLAYER; } @Override public boolean checkTrigger(GameEvent event, Game game) { if (((DamagedPlayerEvent) event).isCombatDamage() && event.getSourceId().equals(sourceId)) { FilterCard filter = new FilterCard("up to two target cards from that player's graveyard"); filter.add(new OwnerIdPredicate(event.getPlayerId())); filter.setMessage("up to two cards in " + game.getPlayer(event.getTargetId()).getLogName() + "'s graveyard"); this.getTargets().clear(); this.addTarget(new TargetCardInOpponentsGraveyard(0,2,filter)); return true; } return false; } @Override public String getTriggerPhrase() { return "Whenever {this} deals combat damage to a player, " ; } }
1,275
439
package org.apache.struts2.examples.quarkus; import com.opensymphony.xwork2.Action; import com.opensymphony.xwork2.Result; import org.apache.struts2.ServletActionContext; import javax.servlet.http.HttpServletResponse; public class IndexAction { private String message; private String name; public Result execute() { return (Result) invocation -> { HttpServletResponse response = ServletActionContext.getResponse(); response.getWriter().println("Hello!"); }; } public String hello() { message = "Hello " + name; return Action.SUCCESS; } public void setName(String name) { this.name = name; } public String getMessage() { return message; } }
293
14,668
// Copyright 2020 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef COMPONENTS_SYNC_TRUSTED_VAULT_TRUSTED_VAULT_REQUEST_H_ #define COMPONENTS_SYNC_TRUSTED_VAULT_TRUSTED_VAULT_REQUEST_H_ #include <memory> #include <string> #include "base/callback.h" #include "base/memory/weak_ptr.h" #include "components/sync/trusted_vault/trusted_vault_connection.h" #include "third_party/abseil-cpp/absl/types/optional.h" #include "url/gurl.h" struct CoreAccountId; namespace network { class SharedURLLoaderFactory; class SimpleURLLoader; } // namespace network namespace signin { struct AccessTokenInfo; } // namespace signin namespace syncer { class TrustedVaultAccessTokenFetcher; // Allows calling VaultService API using proto-over-http. class TrustedVaultRequest : public TrustedVaultConnection::Request { public: enum class HttpStatus { // Reported when server returns http status code 200 or 204. kSuccess, // Reported when server returns http status code 400 (bad request). kBadRequest, // Reported when server returns http status code 404 (not found). kNotFound, // Reported when server returns http status code 409 (conflict). kConflict, // Reported when access token fetch attempt was failed and request wasn't // sent. kAccessTokenFetchingFailure, // Reported when other network and http errors occur. kOtherError }; enum class HttpMethod { kGet, kPost }; using CompletionCallback = base::OnceCallback<void(HttpStatus status, const std::string& response_body)>; // |callback| will be run upon completion and it's allowed to delete this // object upon |callback| call. For GET requests, |serialized_request_proto| // must be null. For |POST| requests, it can be either way (optional payload). // |url_loader_factory| must not be null. TrustedVaultRequest( HttpMethod http_method, const GURL& request_url, const absl::optional<std::string>& serialized_request_proto, scoped_refptr<network::SharedURLLoaderFactory> url_loader_factory); TrustedVaultRequest(const TrustedVaultRequest& other) = delete; TrustedVaultRequest& operator=(const TrustedVaultRequest& other) = delete; ~TrustedVaultRequest() override; // Attempts to fetch access token and sends the request if fetch was // successful or populates error into ResultCallback otherwise. Should be // called at most once. void FetchAccessTokenAndSendRequest( const CoreAccountId& account_id, TrustedVaultAccessTokenFetcher* access_token_fetcher, CompletionCallback callback); private: void OnAccessTokenFetched( absl::optional<signin::AccessTokenInfo> access_token_info); void OnURLLoadComplete(std::unique_ptr<std::string> response_body); std::unique_ptr<network::SimpleURLLoader> CreateURLLoader( const std::string& access_token) const; // Running |completion_callback_| may cause destroying of this object, so all // callers of this method must not run any code afterwards. void RunCompletionCallbackAndMaybeDestroySelf( HttpStatus status, const std::string& response_body); const HttpMethod http_method_; const GURL request_url_; const absl::optional<std::string> serialized_request_proto_; const scoped_refptr<network::SharedURLLoaderFactory> url_loader_factory_; CompletionCallback completion_callback_; // Initialized lazily upon successful access token fetch. std::unique_ptr<network::SimpleURLLoader> url_loader_; base::WeakPtrFactory<TrustedVaultRequest> weak_ptr_factory_{this}; }; } // namespace syncer #endif // COMPONENTS_SYNC_TRUSTED_VAULT_TRUSTED_VAULT_REQUEST_H_
1,225
852
#ifndef DQM_SiStripCommissioningAnalysis_FedCablingAlgorithm_H #define DQM_SiStripCommissioningAnalysis_FedCablingAlgorithm_H #include "FWCore/ParameterSet/interface/ParameterSet.h" #include "DQM/SiStripCommissioningAnalysis/interface/CommissioningAlgorithm.h" #include <vector> class FedCablingAnalysis; class TH1; /** @class FedCablingAlgorithm @author R.Bainbridge @brief Histogram-based analysis for connection loop. */ class FedCablingAlgorithm : public CommissioningAlgorithm { public: // ---------- Con(de)structors and typedefs ---------- FedCablingAlgorithm(const edm::ParameterSet& pset, FedCablingAnalysis* const); ~FedCablingAlgorithm() override { ; } /** Pointer to FED id histogram. */ inline const Histo& hFedId() const; /** Pointer to FED channel histogram. */ inline const Histo& hFedCh() const; private: FedCablingAlgorithm() { ; } /** Extracts and organises histograms. */ void extract(const std::vector<TH1*>&) override; /** Performs histogram anaysis. */ void analyse() override; private: /** Histo containing FED id */ Histo hFedId_; /** Histo containing FED channel */ Histo hFedCh_; }; const FedCablingAlgorithm::Histo& FedCablingAlgorithm::hFedId() const { return hFedId_; } const FedCablingAlgorithm::Histo& FedCablingAlgorithm::hFedCh() const { return hFedCh_; } #endif // DQM_SiStripCommissioningAnalysis_FedCablingAlgorithm_H
463
770
<reponame>maxchernet/TheiaSfM // Copyright (C) 2013 The Regents of the University of California (Regents). // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // * Neither the name of The Regents or University of California nor the // names of its contributors may be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. // // Please contact the author of this library if you have any questions. // Author: <NAME> (<EMAIL>) #ifndef THEIA_IMAGE_KEYPOINT_DETECTOR_SIFT_DETECTOR_H_ #define THEIA_IMAGE_KEYPOINT_DETECTOR_SIFT_DETECTOR_H_ extern "C" { #include <vl/sift.h> } #include <vector> #include "theia/image/keypoint_detector/keypoint_detector.h" #include "theia/image/keypoint_detector/sift_parameters.h" #include "theia/util/util.h" namespace theia { class FloatImage; class Keypoint; // SIFT detector as originally proposed by <NAME>. This relies on the open // source software VLFeat (www.vlfeat.org) to detect keypoints. class SiftDetector : public KeypointDetector { public: // We only implement the standard 128-dimension descriptor. Specify the // number of image octaves, number of scale levels per octave, and where the // first octave should start. explicit SiftDetector(const SiftParameters& sift_params) : sift_params_(sift_params), sift_filter_(nullptr) {} SiftDetector(int num_octaves, int num_levels, int first_octave) : sift_params_(num_octaves, num_levels, first_octave), sift_filter_(nullptr) {} SiftDetector() : sift_filter_(nullptr) {} ~SiftDetector(); // Given an image, detect keypoints using the sift descriptor. bool DetectKeypoints(const FloatImage& image, std::vector<Keypoint>* keypoints); private: const SiftParameters sift_params_; VlSiftFilt* sift_filter_; DISALLOW_COPY_AND_ASSIGN(SiftDetector); }; } // namespace theia #endif // THEIA_IMAGE_KEYPOINT_DETECTOR_SIFT_DETECTOR_H_
1,082
751
<gh_stars>100-1000 /* * Copyright (c) 2016 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef included_features_h #define included_features_h #include <vnet/vnet.h> #include <vnet/api_errno.h> #include <vnet/devices/devices.h> /** feature registration object */ typedef struct _vnet_feature_arc_registration { /** next registration in list of all registrations*/ struct _vnet_feature_arc_registration *next; /** Feature Arc name */ char *arc_name; /** Start nodes */ char **start_nodes; int n_start_nodes; /** End of the arc (optional, for consistency-checking) */ char *last_in_arc; /* Feature arc index, assigned by init function */ u8 feature_arc_index; u8 *arc_index_ptr; } vnet_feature_arc_registration_t; /* Enable feature callback. */ typedef clib_error_t *(vnet_feature_enable_disable_function_t) (u32 sw_if_index, int enable_disable); /** feature registration object */ typedef struct _vnet_feature_registration { /** next registration in list of all registrations*/ struct _vnet_feature_registration *next, *next_in_arc; /** Feature arc name */ char *arc_name; /** Graph node name */ char *node_name; /** Pointer to this feature index, filled in by vnet_feature_arc_init */ u32 *feature_index_ptr; u32 feature_index; /** Constraints of the form "this feature runs before X" */ char **runs_before; /** Constraints of the form "this feature runs after Y" */ char **runs_after; /** Function to enable/disable feature **/ vnet_feature_enable_disable_function_t *enable_disable_cb; } vnet_feature_registration_t; /** constraint registration object */ typedef struct _vnet_feature_constraint_registration { /** next constraint set in list of all registrations*/ struct _vnet_feature_constraint_registration *next, *next_in_arc; /** Feature arc name */ char *arc_name; /** Feature arc index, assigned by init function */ u8 feature_arc_index; /** Node names, to run in the specified order */ char **node_names; } vnet_feature_constraint_registration_t; typedef struct vnet_feature_config_main_t_ { vnet_config_main_t config_main; u32 *config_index_by_sw_if_index; } vnet_feature_config_main_t; typedef struct { /** feature arc configuration list */ vnet_feature_arc_registration_t *next_arc; uword **arc_index_by_name; /** feature path configuration lists */ vnet_feature_registration_t *next_feature; vnet_feature_registration_t **next_feature_by_arc; vnet_feature_constraint_registration_t *next_constraint; vnet_feature_constraint_registration_t **next_constraint_by_arc; uword **next_feature_by_name; /** feature config main objects */ vnet_feature_config_main_t *feature_config_mains; /** Save partial order results for show command */ char ***feature_nodes; /** bitmap of interfaces which have driver rx features configured */ uword **sw_if_index_has_features; /** feature reference counts by interface */ i16 **feature_count_by_sw_if_index; /** Feature arc index for device-input */ u8 device_input_feature_arc_index; /** convenience */ vlib_main_t *vlib_main; vnet_main_t *vnet_main; } vnet_feature_main_t; extern vnet_feature_main_t feature_main; #ifndef CLIB_MARCH_VARIANT #define VNET_FEATURE_ARC_INIT(x,...) \ __VA_ARGS__ vnet_feature_arc_registration_t vnet_feat_arc_##x;\ static void __vnet_add_feature_arc_registration_##x (void) \ __attribute__((__constructor__)) ; \ static void __vnet_add_feature_arc_registration_##x (void) \ { \ vnet_feature_main_t * fm = &feature_main; \ vnet_feat_arc_##x.next = fm->next_arc; \ fm->next_arc = & vnet_feat_arc_##x; \ } \ static void __vnet_rm_feature_arc_registration_##x (void) \ __attribute__((__destructor__)) ; \ static void __vnet_rm_feature_arc_registration_##x (void) \ { \ vnet_feature_main_t * fm = &feature_main; \ vnet_feature_arc_registration_t *r = &vnet_feat_arc_##x; \ VLIB_REMOVE_FROM_LINKED_LIST (fm->next_arc, r, next); \ } \ __VA_ARGS__ vnet_feature_arc_registration_t vnet_feat_arc_##x #define VNET_FEATURE_INIT(x,...) \ __VA_ARGS__ vnet_feature_registration_t vnet_feat_##x; \ static void __vnet_add_feature_registration_##x (void) \ __attribute__((__constructor__)) ; \ static void __vnet_add_feature_registration_##x (void) \ { \ vnet_feature_main_t * fm = &feature_main; \ vnet_feat_##x.next = fm->next_feature; \ fm->next_feature = & vnet_feat_##x; \ } \ static void __vnet_rm_feature_registration_##x (void) \ __attribute__((__destructor__)) ; \ static void __vnet_rm_feature_registration_##x (void) \ { \ vnet_feature_main_t * fm = &feature_main; \ vnet_feature_registration_t *r = &vnet_feat_##x; \ VLIB_REMOVE_FROM_LINKED_LIST (fm->next_feature, r, next); \ } \ __VA_ARGS__ vnet_feature_registration_t vnet_feat_##x #define VNET_FEATURE_ARC_ORDER(x,...) \ __VA_ARGS__ vnet_feature_constraint_registration_t \ vnet_feature_constraint_##x; \ static void __vnet_add_constraint_registration_##x (void) \ __attribute__((__constructor__)) ; \ static void __vnet_add_constraint_registration_##x (void) \ { \ vnet_feature_main_t * fm = &feature_main; \ vnet_feature_constraint_##x.next = fm->next_constraint; \ fm->next_constraint = & vnet_feature_constraint_##x; \ } \ static void __vnet_rm_constraint_registration_##x (void) \ __attribute__((__destructor__)) ; \ static void __vnet_rm_constraint_registration_##x (void) \ { \ vnet_feature_main_t * fm = &feature_main; \ vnet_feature_constraint_registration_t *r = &vnet_feature_constraint_##x; \ VLIB_REMOVE_FROM_LINKED_LIST (fm->next_constraint, r, next); \ } \ __VA_ARGS__ vnet_feature_constraint_registration_t vnet_feature_constraint_##x #else #define VNET_FEATURE_ARC_INIT(x,...) \ extern vnet_feature_arc_registration_t __clib_unused vnet_feat_arc_##x; \ static vnet_feature_arc_registration_t __clib_unused __clib_unused_vnet_feat_arc_##x #define VNET_FEATURE_INIT(x,...) \ extern vnet_feature_registration_t __clib_unused vnet_feat_##x; \ static vnet_feature_registration_t __clib_unused __clib_unused_vnet_feat_##x #define VNET_FEATURE_ARC_ORDER(x,...) \ extern vnet_feature_constraint_registration_t \ __clib_unused vnet_feature_constraint_##x; \ static vnet_feature_constraint_registration_t __clib_unused \ __clib_unused_vnet_feature_constraint_##x #endif void vnet_config_update_feature_count (vnet_feature_main_t * fm, u8 arc, u32 sw_if_index, int is_add); u32 vnet_get_feature_index (u8 arc, const char *s); u8 vnet_get_feature_arc_index (const char *s); vnet_feature_registration_t *vnet_get_feature_reg (const char *arc_name, const char *node_name); int vnet_feature_enable_disable_with_index (u8 arc_index, u32 feature_index, u32 sw_if_index, int enable_disable, void *feature_config, u32 n_feature_config_bytes); int vnet_feature_enable_disable (const char *arc_name, const char *node_name, u32 sw_if_index, int enable_disable, void *feature_config, u32 n_feature_config_bytes); u32 vnet_feature_modify_end_node (u8 arc_index, u32 sw_if_index, u32 node_index); static_always_inline u32 vnet_get_feature_count (u8 arc, u32 sw_if_index) { vnet_feature_main_t *fm = &feature_main; return (fm->feature_count_by_sw_if_index[arc][sw_if_index]); } static inline vnet_feature_config_main_t * vnet_get_feature_arc_config_main (u8 arc_index) { vnet_feature_main_t *fm = &feature_main; if (arc_index == (u8) ~ 0) return 0; return &fm->feature_config_mains[arc_index]; } static_always_inline vnet_feature_config_main_t * vnet_feature_get_config_main (u16 arc) { vnet_feature_main_t *fm = &feature_main; return &fm->feature_config_mains[arc]; } static_always_inline int vnet_have_features (u8 arc, u32 sw_if_index) { vnet_feature_main_t *fm = &feature_main; return clib_bitmap_get (fm->sw_if_index_has_features[arc], sw_if_index); } static_always_inline u32 vnet_get_feature_config_index (u8 arc, u32 sw_if_index) { vnet_feature_main_t *fm = &feature_main; vnet_feature_config_main_t *cm = &fm->feature_config_mains[arc]; return vec_elt (cm->config_index_by_sw_if_index, sw_if_index); } static_always_inline void * vnet_feature_arc_start_with_data (u8 arc, u32 sw_if_index, u32 * next, vlib_buffer_t * b, u32 n_data_bytes) { vnet_feature_main_t *fm = &feature_main; vnet_feature_config_main_t *cm; cm = &fm->feature_config_mains[arc]; if (PREDICT_FALSE (vnet_have_features (arc, sw_if_index))) { vnet_buffer (b)->feature_arc_index = arc; b->current_config_index = vec_elt (cm->config_index_by_sw_if_index, sw_if_index); return vnet_get_config_data (&cm->config_main, &b->current_config_index, next, n_data_bytes); } return 0; } static_always_inline void * vnet_feature_arc_start_w_cfg_index (u8 arc, u32 sw_if_index, u32 * next, vlib_buffer_t * b, u32 cfg_index) { vnet_feature_main_t *fm = &feature_main; vnet_feature_config_main_t *cm; cm = &fm->feature_config_mains[arc]; vnet_buffer (b)->feature_arc_index = arc; b->current_config_index = cfg_index; return vnet_get_config_data (&cm->config_main, &b->current_config_index, next, 0); } static_always_inline void vnet_feature_arc_start (u8 arc, u32 sw_if_index, u32 * next0, vlib_buffer_t * b0) { vnet_feature_arc_start_with_data (arc, sw_if_index, next0, b0, 0); } static_always_inline void * vnet_feature_next_with_data (u32 * next0, vlib_buffer_t * b0, u32 n_data_bytes) { vnet_feature_main_t *fm = &feature_main; u8 arc = vnet_buffer (b0)->feature_arc_index; vnet_feature_config_main_t *cm = &fm->feature_config_mains[arc]; return vnet_get_config_data (&cm->config_main, &b0->current_config_index, next0, n_data_bytes); } static_always_inline void vnet_feature_next (u32 * next0, vlib_buffer_t * b0) { vnet_feature_next_with_data (next0, b0, 0); } static_always_inline void vnet_feature_next_u16 (u16 * next0, vlib_buffer_t * b0) { u32 next32; vnet_feature_next_with_data (&next32, b0, 0); *next0 = next32; } static_always_inline int vnet_device_input_have_features (u32 sw_if_index) { vnet_feature_main_t *fm = &feature_main; return vnet_have_features (fm->device_input_feature_arc_index, sw_if_index); } static_always_inline void vnet_feature_start_device_input_x1 (u32 sw_if_index, u32 * next0, vlib_buffer_t * b0) { vnet_feature_main_t *fm = &feature_main; vnet_feature_config_main_t *cm; u8 feature_arc_index = fm->device_input_feature_arc_index; cm = &fm->feature_config_mains[feature_arc_index]; if (PREDICT_FALSE (clib_bitmap_get (fm->sw_if_index_has_features[feature_arc_index], sw_if_index))) { /* * Save next0 so that the last feature in the chain * can skip ethernet-input if indicated... */ u16 adv; adv = device_input_next_node_advance[*next0]; vlib_buffer_advance (b0, -adv); vnet_buffer (b0)->feature_arc_index = feature_arc_index; b0->current_config_index = vec_elt (cm->config_index_by_sw_if_index, sw_if_index); vnet_get_config_data (&cm->config_main, &b0->current_config_index, next0, /* # bytes of config data */ 0); } } static_always_inline void vnet_feature_start_device_input_x2 (u32 sw_if_index, u32 * next0, u32 * next1, vlib_buffer_t * b0, vlib_buffer_t * b1) { vnet_feature_main_t *fm = &feature_main; vnet_feature_config_main_t *cm; u8 feature_arc_index = fm->device_input_feature_arc_index; cm = &fm->feature_config_mains[feature_arc_index]; if (PREDICT_FALSE (clib_bitmap_get (fm->sw_if_index_has_features[feature_arc_index], sw_if_index))) { /* * Save next0 so that the last feature in the chain * can skip ethernet-input if indicated... */ u16 adv; adv = device_input_next_node_advance[*next0]; vlib_buffer_advance (b0, -adv); adv = device_input_next_node_advance[*next1]; vlib_buffer_advance (b1, -adv); vnet_buffer (b0)->feature_arc_index = feature_arc_index; vnet_buffer (b1)->feature_arc_index = feature_arc_index; b0->current_config_index = vec_elt (cm->config_index_by_sw_if_index, sw_if_index); b1->current_config_index = b0->current_config_index; vnet_get_config_data (&cm->config_main, &b0->current_config_index, next0, /* # bytes of config data */ 0); vnet_get_config_data (&cm->config_main, &b1->current_config_index, next1, /* # bytes of config data */ 0); } } static_always_inline void vnet_feature_start_device_input_x4 (u32 sw_if_index, u32 * next0, u32 * next1, u32 * next2, u32 * next3, vlib_buffer_t * b0, vlib_buffer_t * b1, vlib_buffer_t * b2, vlib_buffer_t * b3) { vnet_feature_main_t *fm = &feature_main; vnet_feature_config_main_t *cm; u8 feature_arc_index = fm->device_input_feature_arc_index; cm = &fm->feature_config_mains[feature_arc_index]; if (PREDICT_FALSE (clib_bitmap_get (fm->sw_if_index_has_features[feature_arc_index], sw_if_index))) { /* * Save next0 so that the last feature in the chain * can skip ethernet-input if indicated... */ u16 adv; adv = device_input_next_node_advance[*next0]; vlib_buffer_advance (b0, -adv); adv = device_input_next_node_advance[*next1]; vlib_buffer_advance (b1, -adv); adv = device_input_next_node_advance[*next2]; vlib_buffer_advance (b2, -adv); adv = device_input_next_node_advance[*next3]; vlib_buffer_advance (b3, -adv); vnet_buffer (b0)->feature_arc_index = feature_arc_index; vnet_buffer (b1)->feature_arc_index = feature_arc_index; vnet_buffer (b2)->feature_arc_index = feature_arc_index; vnet_buffer (b3)->feature_arc_index = feature_arc_index; b0->current_config_index = vec_elt (cm->config_index_by_sw_if_index, sw_if_index); b1->current_config_index = b0->current_config_index; b2->current_config_index = b0->current_config_index; b3->current_config_index = b0->current_config_index; vnet_get_config_data (&cm->config_main, &b0->current_config_index, next0, /* # bytes of config data */ 0); vnet_get_config_data (&cm->config_main, &b1->current_config_index, next1, /* # bytes of config data */ 0); vnet_get_config_data (&cm->config_main, &b2->current_config_index, next2, /* # bytes of config data */ 0); vnet_get_config_data (&cm->config_main, &b3->current_config_index, next3, /* # bytes of config data */ 0); } } #define VNET_FEATURES(...) (char*[]) { __VA_ARGS__, 0} clib_error_t *vnet_feature_arc_init (vlib_main_t * vm, vnet_config_main_t * vcm, char **feature_start_nodes, int num_feature_start_nodes, char *last_in_arc, vnet_feature_registration_t * first_reg, vnet_feature_constraint_registration_t * first_const_set, char ***in_feature_nodes); void vnet_interface_features_show (vlib_main_t * vm, u32 sw_if_index, int verbose); typedef void (*vnet_feature_update_cb_t) (u32 sw_if_index, u8 arc_index, u8 is_enable, void *cb); extern void vnet_feature_register (vnet_feature_update_cb_t cb, void *data); int vnet_feature_is_enabled (const char *arc_name, const char *feature_node_name, u32 sw_if_index); #endif /* included_feature_h */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */
7,315
380
#include "onion_router_descriptor_parser.h" #include <mini/crypto/base64.h> namespace mini::tor { constexpr onion_router_descriptor_parser::control_word_list onion_router_descriptor_parser::control_words; void onion_router_descriptor_parser::parse( onion_router* router, const string_ref descriptor ) { string_collection lines = static_cast<string>(descriptor).split("\n"); document_location current_location = document_location::control_word; string current_key; for (auto&& line : lines) { string_collection splitted_line = line.split(" "); string_hash control_word_hash = splitted_line[0]; // // onion-key // if (line == control_words[control_word_onion_key]) { current_location = document_location::onion_key; continue; } // // signing-key // else if (line == control_words[control_word_signing_key]) { current_location = document_location::signing_key; continue; } // // -----BEGIN RSA PUBLIC KEY----- // else if (line == control_words[control_word_key_begin]) { if (current_location == document_location::onion_key) { current_location = document_location::onion_key_content; } else if (current_location == document_location::signing_key) { current_location = document_location::signing_key_content; } continue; } // // -----END RSA PUBLIC KEY----- // else if (line == control_words[control_word_key_end]) { if (current_location == document_location::onion_key_content) { router->set_onion_key(crypto::base64::decode(current_key)); } else if (current_location == document_location::signing_key_content) { router->set_signing_key(crypto::base64::decode(current_key)); } current_location = document_location::control_word; current_key.clear(); } else if (current_location == document_location::onion_key_content || current_location == document_location::signing_key_content) { current_key += line; } else if (control_word_hash == control_words[control_word_ntor_onion_key]) { router->set_ntor_onion_key(crypto::base64::decode(splitted_line[1])); } } } }
914
11,433
<filename>allennlp/modules/backbones/vilbert_backbone.py import logging from typing import Dict, List import torch from overrides import overrides from allennlp.data.fields.text_field import TextFieldTensors from allennlp.data.vocabulary import Vocabulary from allennlp.modules.backbones.backbone import Backbone from allennlp.modules.transformer import ( BiModalEncoder, ImageFeatureEmbeddings, TransformerEmbeddings, TransformerPooler, ) logger = logging.getLogger(__name__) @Backbone.register("vilbert") @Backbone.register("vilbert_from_huggingface", constructor="from_huggingface_model_name") class VilbertBackbone(Backbone): """ Uses a Vilbert model as a `Backbone`. Registered as a `Backbone` with name "vilbert". """ def __init__( self, vocab: Vocabulary, text_embeddings: TransformerEmbeddings, image_embeddings: ImageFeatureEmbeddings, encoder: BiModalEncoder, pooled_output_dim: int, fusion_method: str = "sum", dropout: float = 0.1, vocab_namespace: str = "tokens", ) -> None: super().__init__() self.fusion_method = fusion_method self.text_embeddings = text_embeddings self.image_embeddings = image_embeddings self.encoder = encoder self.t_pooler = TransformerPooler(encoder.hidden_size1, pooled_output_dim) self.v_pooler = TransformerPooler(encoder.hidden_size2, pooled_output_dim) self.dropout = torch.nn.Dropout(dropout) self._vocab = vocab self._namespace = vocab_namespace @classmethod def from_huggingface_model_name( cls, vocab: Vocabulary, model_name: str, image_feature_dim: int, image_num_hidden_layers: int, image_hidden_size: int, image_num_attention_heads: int, combined_hidden_size: int, combined_num_attention_heads: int, pooled_output_dim: int, image_intermediate_size: int, image_attention_dropout: float, image_hidden_dropout: float, image_biattention_id: List[int], text_biattention_id: List[int], text_fixed_layer: int, image_fixed_layer: int, fusion_method: str = "sum", ): text_embeddings = TransformerEmbeddings.from_pretrained_module(model_name) image_embeddings = ImageFeatureEmbeddings( feature_size=image_feature_dim, embedding_size=image_hidden_size, dropout=image_hidden_dropout, ) encoder = BiModalEncoder.from_pretrained_module( model_name, num_hidden_layers2=image_num_hidden_layers, hidden_size2=image_hidden_size, num_attention_heads2=image_num_attention_heads, combined_hidden_size=combined_hidden_size, combined_num_attention_heads=combined_num_attention_heads, intermediate_size2=image_intermediate_size, attention_dropout2=image_attention_dropout, hidden_dropout2=image_hidden_dropout, biattention_id1=text_biattention_id, biattention_id2=image_biattention_id, fixed_layer1=text_fixed_layer, fixed_layer2=image_fixed_layer, ) return cls( vocab=vocab, text_embeddings=text_embeddings, image_embeddings=image_embeddings, encoder=encoder, pooled_output_dim=pooled_output_dim, fusion_method=fusion_method, ) @overrides def forward( self, # type: ignore box_features: torch.Tensor, box_coordinates: torch.Tensor, box_mask: torch.Tensor, text: TextFieldTensors, ) -> Dict[str, torch.Tensor]: if "token_ids" in text["tokens"]: token_ids = text["tokens"]["token_ids"] else: token_ids = text["tokens"]["tokens"] if token_ids.shape[:-1] != box_features.shape[:-2]: raise ValueError( "Tokens and boxes must have the same batch size and extra " "dimensions (if applicable). Token size {0} did not match " "box feature size {1}.".format(token_ids.shape[:-1], box_features.shape[:-2]) ) # Shape: (batch_size, num_tokens) token_type_ids = text["tokens"].get("type_ids") # Shape: (batch_size, num_tokens) attention_mask = text["tokens"].get("mask") box_feature_dimensions = box_features.shape feature_size = box_feature_dimensions[-1] rolled_dimensions = box_feature_dimensions[:-2] rolled_dimensions_product = 1 for dim in rolled_dimensions: rolled_dimensions_product *= dim token_ids = token_ids.view(rolled_dimensions_product, token_ids.shape[-1]) if token_type_ids is not None: token_type_ids = token_type_ids.view( rolled_dimensions_product, token_type_ids.shape[-1] ) if attention_mask is not None: attention_mask = attention_mask.view( rolled_dimensions_product, attention_mask.shape[-1] ) box_features = box_features.view( rolled_dimensions_product, box_feature_dimensions[-2], feature_size ) box_coordinates = box_coordinates.view( rolled_dimensions_product, box_coordinates.shape[-2], box_coordinates.shape[-1], ) box_mask = box_mask.view(rolled_dimensions_product, box_mask.shape[-1]) # Shape: (rolled_dimensions_product, num_tokens, embedding_dim) embedding_output = self.text_embeddings(token_ids, token_type_ids) # this attention mask is more simple than the triangular masking of # causal attention used in OpenAI GPT, we just need to prepare the # broadcast dimension here. if attention_mask is not None: extended_attention_mask = attention_mask else: extended_attention_mask = None extended_image_attention_mask = box_mask # Shape: (rolled_dimensions_product, num_boxes, image_embedding_dim) v_embedding_output = self.image_embeddings(box_features, box_coordinates) encoded_layers_t, encoded_layers_v = self.encoder( embedding_output, v_embedding_output, extended_attention_mask, extended_image_attention_mask, ) # Shape: (rolled_dimensions_product, num_tokens, embedding_dim) sequence_output_t = encoded_layers_t[:, :, :, -1] # Shape: (rolled_dimensions_product, num_boxes, image_embedding_dim) sequence_output_v = encoded_layers_v[:, :, :, -1] # Shape: (rolled_dimensions_product, pooled_output_dim) pooled_output_t = self.t_pooler(sequence_output_t) # Shape: (rolled_dimensions_product, pooled_output_dim) pooled_output_v = self.v_pooler(sequence_output_v) sequence_output_t = sequence_output_t.view( rolled_dimensions + (sequence_output_t.shape[-2], sequence_output_t.shape[-1]) ) sequence_output_v = sequence_output_v.view( rolled_dimensions + (sequence_output_v.shape[-2], sequence_output_v.shape[-1]) ) pooled_output_t = pooled_output_t.view(rolled_dimensions + (pooled_output_t.shape[-1],)) pooled_output_v = pooled_output_v.view(rolled_dimensions + (pooled_output_v.shape[-1],)) if self.fusion_method == "sum": pooled_output = self.dropout(pooled_output_t + pooled_output_v) elif self.fusion_method == "mul": pooled_output = self.dropout(pooled_output_t * pooled_output_v) else: raise ValueError(f"Fusion method '{self.fusion_method}' not supported") return { "encoded_boxes": sequence_output_v, "encoded_boxes_mask": box_mask, "encoded_boxes_pooled": pooled_output_v, "encoded_text": sequence_output_t, "encoded_text_mask": attention_mask, "encoded_text_pooled": pooled_output_t, "pooled_boxes_and_text": pooled_output, }
3,746
5,169
<reponame>Gantios/Specs { "name": "BarcodeEasyScan", "version": "0.1.0", "summary": "Scan barcode and QR code with few lines of swift. No xib or storyboards", "description": "Scan your barcode by implementing a delegate which provides the string result. No need to write boilerplate codes. Just install the pod, and implement a delegate.", "homepage": "https://github.com/harshalrj25/BarcodeEasyScan", "license": { "type": "MIT", "file": "LICENSE" }, "authors": { "harshalrj25": "<EMAIL>" }, "source": { "git": "https://github.com/harshalrj25/BarcodeEasyScan.git", "tag": "0.1.0" }, "swift_version": "4.2", "platforms": { "ios": "10.0" }, "source_files": "BarcodeEasyScan/Classes/**/*" }
291
410
#include "SetVideoSourceCmd.h" namespace ofx { namespace piMapper { SetVideoSourceCmd::SetVideoSourceCmd(std::string sourceId, bool loop, BaseSurface * surface, SourcesEditorWidget * sourcesEditor){ _sourceId = sourceId; _loop = loop; _surface = surface; _sourcesEditor = sourcesEditor; } void SetVideoSourceCmd::exec(){ ofLogNotice("SetVideoSourceCmd", "exec"); _oldSourceTypeHelper = _surface->getSource()->getType(); if(_surface->getSource()->isLoadable()){ _oldSourceId = _surface->getSource()->getPath(); }else{ _oldSourceId = _surface->getSource()->getName(); } _sourcesEditor->setVideoSource(_sourceId); BaseSource * src = _surface->getSource(); VideoSource * vid = dynamic_cast<VideoSource *>(src); vid->setLoop(_loop); } void SetVideoSourceCmd::undo(){ ofLogNotice("SetVideoSourceCmd", "undo"); if(_oldSourceTypeHelper == SourceType::SOURCE_TYPE_IMAGE){ _sourcesEditor->setImageSource(_oldSourceId); }else if(_oldSourceTypeHelper == SourceType::SOURCE_TYPE_VIDEO){ _sourcesEditor->setVideoSource(_oldSourceId); }else if(_oldSourceTypeHelper == SourceType::SOURCE_TYPE_FBO){ _sourcesEditor->setFboSource(_oldSourceId); }else if(_oldSourceTypeHelper == SourceType::SOURCE_TYPE_NONE){ _sourcesEditor->clearSource(); } _surface = 0; _sourcesEditor = 0; } } // namespace piMapper } // namespace ofx
478
711
/* * Copyright 2015 JBoss Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.apiman.gateway.engine.beans.util; import java.io.Serializable; import java.nio.ByteOrder; import java.util.AbstractMap; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.TreeMap; import java.util.stream.Collectors; import java.util.stream.IntStream; import net.openhft.hashing.Access; import net.openhft.hashing.LongHashFunction; import static java.nio.ByteOrder.BIG_ENDIAN; import static java.nio.ByteOrder.LITTLE_ENDIAN; /** * A simple multimap able to accept multiple values for a given key. * <p> * The implementation is specifically tuned for headers (such as HTTP), where * the number of entries tends to be moderate, but are frequently accessed. * </p> * <p> * This map expects ASCII for key strings. * </p> * <p> * Case is ignored (avoiding {@link String#toLowerCase()} <code>String</code> allocation) * before being hashed (<code>xxHash</code>). * </p> * <p> * Constraints: * <ul> * <li><strong>Not thread-safe.</strong></li> * <li><tt>Null</tt> is <strong>not</strong> a valid key.</li> * </ul> * </p> * * @author <NAME> {@literal <<EMAIL>>} */ public class CaseInsensitiveStringMultiMap implements IStringMultiMap, Serializable { private static final long serialVersionUID = -2052530527825235543L; private static final Access<String> LOWER_CASE_ACCESS_INSTANCE = new LowerCaseAccess(); //private static final float MAX_LOAD_FACTOR = 0.75f; private Element[] hashArray; private int keyCount = 0; public CaseInsensitiveStringMultiMap() { hashArray = new Element[32]; } public CaseInsensitiveStringMultiMap(int sizeHint) { hashArray = new Element[(int) (sizeHint*1.25)]; } @Override public Iterator<Entry<String, String>> iterator() { return new ElemIterator(hashArray); } @Override public IStringMultiMap put(String key, String value) { long keyHash = getHash(key); int idx = getIndex(keyHash); if (hashArray[idx] == null) { keyCount++; hashArray[idx] = new Element(key, value, keyHash); } else { remove(key); add(key, value); } return this; } private int getIndex(long hash) { return Math.abs((int) (hash % hashArray.length)); } private long getHash(String text) { return LongHashFunction.xx().hash(text, LOWER_CASE_ACCESS_INSTANCE, 0, text.length()); } @Override public IStringMultiMap putAll(Map<String, String> map) { map.entrySet().stream() .forEachOrdered(pair -> put(pair.getKey(), pair.getValue())); return this; } @Override public IStringMultiMap add(String key, String value) { long hash = getHash(key); int idx = getIndex(hash); Element existingHead = hashArray[idx]; if (existingHead == null) { hashArray[idx] = new Element(key, value, hash); keyCount++; } else { // Last element appears first in list. if (existingHead.getByHash(hash, key) == null) { keyCount++; // If it's a unique key collision and we've not actually seen this key before. } Element newHead = new Element(key, value, hash); newHead.previous = existingHead; hashArray[idx] = newHead; } return this; } public IStringMultiMap add(String key, List<String> values) { values.forEach((value) -> add(key, value)); return this; } private Element getElement(String key) { long hash = getHash(key); Element head = hashArray[getIndex(hash)]; return head == null ? null : head.getByHash(hash, key); } @Override public IStringMultiMap addAll(Map<String, String> map) { map.entrySet().stream() .forEachOrdered(pair -> put(pair.getKey(), pair.getValue())); return this; } @Override public IStringMultiMap addAll(IStringMultiMap map) { map.getEntries().stream() .forEachOrdered(pair -> add(pair.getKey(), pair.getValue())); return this; } @Override public IStringMultiMap remove(String key) { long hash = getHash(key); int idx = getIndex(hash); Element headElem = hashArray[idx]; if (headElem != null) hashArray[idx] = headElem.removeByHash(hash, key); return this; } @Override public String get(String key) { Element elem = getElement(key); // Just return the first value, ignore all others (i.e. most recently added one) return elem == null ? null : elem.getValue(); } @Override public List<Entry<String, String>> getAllEntries(String key) { if (keyCount > 0) { Element elem = getElement(key); return elem == null ? Collections.emptyList() : elem.getAllEntries(key, getHash(key)); } return Collections.emptyList(); } @Override public List<String> getAll(String key) { if (keyCount > 0) { Element elem = getElement(key); return elem == null ? Collections.emptyList() : elem.getAllValues(key, getHash(key)); } return Collections.emptyList(); } @Override public int size() { return keyCount; } @Override public List<Entry<String, String>> getEntries() { List<Entry<String, String>> entryList = new ArrayList<>(keyCount); // Look at all top-level elements for (Element oElem : hashArray) { if (oElem != null) { // Add any non-null elements // If there are multiple values, will also add those for (Element iElem = oElem; iElem != null; iElem = iElem.getNext()) { entryList.add(iElem); } } } return entryList; } @Override public Map<String, String> toMap() { Map<String, String> map = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); // Look at all top-level elements for (Element oElem : hashArray) { if (oElem != null) { // Must check all bucket entries as can be hash collision for (Entry<String, String> iElem : oElem.getAllEntries()) { // Add any non-null ones that aren't already in (NB: LIFO) if (!map.containsKey(iElem.getKey())) map.put(iElem.getKey(), iElem.getValue()); } } } return map; } @Override public boolean containsKey(String key) { long hash = getHash(key); int idx = getIndex(hash); // Check if there's an entry the idx, *and* check that the key is not just a collision return hashArray[idx] != null && hashArray[idx].getByHash(hash, key) != null; } @Override public Set<String> keySet() { // TODO return toMap().keySet(); } @Override public IStringMultiMap clear() { hashArray = new Element[hashArray.length]; keyCount = 0; return this; } @Override @SuppressWarnings("nls") public String toString() { String elems = keySet().stream() .map(this::getAllEntries) .map(pairs -> pairs.get(0).getKey() + " => [" + joinValues(pairs) + "]") .collect(Collectors.joining(", ")); return "{" + elems + "}"; } @SuppressWarnings("nls") private String joinValues(List<Entry<String, String>> pairs) { return IntStream.rangeClosed(1, pairs.size()) .mapToObj(i -> pairs.get(pairs.size() - i)) .map(Entry::getValue) .collect(Collectors.joining(", ")); } private static boolean insensitiveEquals(String a, String b) { if (a.length() != b.length()) return false; for (int i = 0; i < a.length(); i++) { char charA = a.charAt(i); char charB = b.charAt(i); // If characters match, just continue if (charA == charB) continue; // If charA is upper and we didn't already match above // then charB may be lower (and possibly still not match). if (charA >= 'A' && charA <= 'Z' && (charA + 32 != charB)) return false; // If charB is upper and we didn't already match above // then charA may be lower (and possibly still not match). if (charB >= 'A' && charB <= 'Z' && (charB + 32 != charA)) return false; // Otherwise matches } return true; } private final class Element extends AbstractMap.SimpleImmutableEntry<String, String> implements Iterable<Entry<String, String>> { private static final long serialVersionUID = 4505963331324890429L; private final long keyHash; private Element previous = null; /** * The <tt>keyHash</tt> is stored because we may have duplicate entries for a * given hash bucket for two reasons: * * <ol> * <li> Multiple value insertions for the same key (standard multimap behaviour) * <li> Hash collision. The key is different, but maps to the same bucket. * </ol> * * We can use the stored hash to rapidly differentiate between these scenarios * and in various operations such as delete. * * @param key the key * @param value the value * @param keyHash the hash <strong>(NB: full hash, not just bucket index!)</strong> */ public Element(String key, String value, long keyHash) { super(key, value); this.keyHash = keyHash; } public Element removeByHash(long hash, String key) { Element current = this; Element newHead = null; Element link = null; boolean removedAny = false; while (current != null) { // If matches hash and key, should discard. if (current.eq(hash, key)) { Element prev = current.previous; current.previous = null; current = prev; removedAny = true; if (link != null) link.previous = prev; } else if (newHead == null) { newHead = link = current; current = newHead.previous; } else { link.previous = link = current; current = current.previous; } } if (removedAny) keyCount--; return newHead; } private boolean eq(long hashCode, String key) { return getKeyHash() == hashCode && insensitiveEquals(key, getKey()); } // NB: Even if hashes match, tiny chance of collision - so also check key. public Element getByHash(long hashCode, String key) { return getKeyHash() == hashCode && insensitiveEquals(key, getKey()) ? this : getNext(hashCode, key); } @Override public Iterator<Entry<String, String>> iterator() { return getAllEntries().iterator(); } public List<Entry<String, String>> getAllEntries(String key, long hashCode) { List<Entry<String, String>> allElems = new ArrayList<>(); for (Element elem = this; elem != null; elem = elem.getNext()) { if (elem.getKeyHash() == hashCode && insensitiveEquals(key, elem.getKey())) { allElems.add(elem); } } return allElems; } public List<Entry<String, String>> getAllEntries() { List<Entry<String, String>> allElems = new ArrayList<>(); for (Element elem = this; elem != null; elem = elem.getNext()) { allElems.add(elem); } return allElems; } public long getKeyHash() { return keyHash; } public List<String> getAllValues(String key, long hashCode) { List<String> allElems = new ArrayList<>(); for (Element elem = this; elem != null; elem = elem.getNext()) { if (elem.getKeyHash() == hashCode && insensitiveEquals(key, elem.getKey())) { allElems.add(elem.getValue()); } } return allElems; } public boolean hasNext() { return previous != null; } public Element getNext() { return previous; } public Element getNext(long hash, String key) { Element elem = this; while (elem.previous != null) { elem = elem.previous; if (elem.getKeyHash() == hash && insensitiveEquals(elem.getKey(), key)) return elem; } return null; } } private static final class ElemIterator implements Iterator<Entry<String, String>> { final Element[] hashTable; Element next; Element selected; int idx = 0; public ElemIterator(Element[] hashTable) { this.hashTable = hashTable; } @Override public boolean hasNext() { if (next == null) setNext(); return next != null; } @Override public Entry<String, String> next() { selected = next; setNext(); return selected; } private void setNext() { // If already have a selected element, then select next value with same key if (selected != null && selected.hasNext()) { next = selected.getNext(); } else { // Otherwise, look through table until next non-null element found while (idx < hashTable.length) { if (hashTable[idx] != null) { // Found non-null element next = hashTable[idx]; // Set it as next idx++; // Increment index so we'll look at the following element next return; } idx++; } next = null; } } } private static final class LowerCaseAccess extends Access<String> { @Override public int getByte(String input, long offset) { char c = input.charAt((int)offset); if (c >= 'A' && c <= 'Z') { return c + 32; // toLower } return c; } @Override public ByteOrder byteOrder(String input) { return ByteOrder.nativeOrder(); } @Override protected Access<String> reverseAccess() { return new ReverseAccess<>(LOWER_CASE_ACCESS_INSTANCE); } } /** * The default reverse byte order delegating {@code Access} class. */ private static class ReverseAccess<String> extends Access<String> { final Access<String> access; private ReverseAccess(final Access<String> access) { this.access = access; } @Override public long getLong(final String input, final long offset) { return Long.reverseBytes(access.getLong(input, offset)); } @Override public long getUnsignedInt(final String input, final long offset) { return Long.reverseBytes(access.getUnsignedInt(input, offset)) >>> 32; } @Override public int getInt(final String input, final long offset) { return Integer.reverseBytes(access.getInt(input, offset)); } @Override public int getUnsignedShort(final String input, final long offset) { return Integer.reverseBytes(access.getUnsignedShort(input, offset)) >>> 16; } @Override public int getShort(final String input, final long offset) { return Integer.reverseBytes(access.getShort(input, offset)) >> 16; } @Override public int getUnsignedByte(final String input, final long offset) { return access.getUnsignedByte(input, offset); } @Override public int getByte(final String input, final long offset) { return access.getByte(input, offset); } @Override public ByteOrder byteOrder(final String input) { return LITTLE_ENDIAN == access.byteOrder(input) ? BIG_ENDIAN : LITTLE_ENDIAN; } @Override protected Access<String> reverseAccess() { return access; } } }
7,773
445
#if !defined(TZ) # define TZ "" # define SPACER "" #else # define SPACER " " #endif #if defined(_MSC_VER) && !defined(__VERSION__) # if _MSC_VER >= 2000 define the version string here # elif _MSC_VER >= 1920 # define __VERSION__ "Microsoft Visual Studio 2019 version 16.x" # elif _MSC_VER >= 1910 # define __VERSION__ "Microsoft Visual Studio 2017 version 15.x" # elif _MSC_VER >= 1900 # define __VERSION__ "Microsoft Visual Studio 2015 version 14" # elif _MSC_VER >= 1800 // That Mcrosoft appears to be superstitious inspires such confidence... # define __VERSION__ "Microsoft Visual Studio 2013 version 12.x" # elif _MSC_VER >= 1700 # define __VERSION__ "Microsoft Visual Studio 2012 version 11.x" # elif _MSC_VER >= 1600 # define __VERSION__ "Microsoft Visual Studio 2010 version 10.x" # elif _MSC_VER >= 1500 # define __VERSION__ "Microsoft Visual Studio 2008 version 9.x" # elif _MSC_VER >= 1400 # define __VERSION__ "Microsoft Visual Studio 2005 version 8.x" # elif _MSC_VER >= 1300 # define __VERSION__ "Microsoft Visual Studio 2003 version 7.x" # elif _MSC_VER >= 1200 # define __VERSION__ "Microsoft Visual Studio 6" # elif _MSC_VER >= 1100 # define __VERSION__ "Microsoft Visual Studio 5" # else define the version string here # endif //_MSC_VER #endif //MSVC versions && !defined(__VERSION__) char vmBuildString[] = \ "Win32 built on " \ __DATE__" "__TIME__ SPACER TZ \ " Compiler: " __VERSION__ ;
466
481
from . import topic from . import maxent
10
339
<filename>integration/mediation-tests/tests-platform/tests-rabbitmq/src/test/java/org/wso2/carbon/esb/rabbitmq/utils/RabbitMQServerInstance.java /* * Copyright (c) 2017, WSO2 Inc. (http://www.wso2.org) All Rights Reserved. * * WSO2 Inc. licenses this file to you under the Apache License, * Version 2.0 (the "License"); you may not use this file except * in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.wso2.carbon.esb.rabbitmq.utils; import org.apache.commons.lang3.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.testng.Assert; import org.wso2.esb.integration.common.utils.clients.rabbitmqclient.RabbitMQConsumerClient; import org.wso2.esb.integration.common.utils.clients.rabbitmqclient.RabbitMQProducerClient; import org.wso2.esb.integration.common.utils.common.TestConfigurationProvider; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import javax.xml.xpath.XPathExpressionException; public class RabbitMQServerInstance { private static final Log log = LogFactory.getLog(RabbitMQServerInstance.class); private static final String RABBITMQ_HOST_XPATH = "//rabbitmq/host"; private static final String RABBITMQ_PORT_XPATH = "//rabbitmq/port"; private static final String DOCKER_HOST = "DOCKER_HOST"; private static final String USER_NAME = "guest"; private static final String PASSWORD = "<PASSWORD>"; /** * Returns the host on which docker is running. * <p> * If the docker instance was started by passing an environment variable to set the host, the given host will be * returned. If not, localhost will get returned. * * @return the ip of the host on which docker is running. */ public static String getHost() { String host = "localhost"; try { if (DOCKER_HOST.equalsIgnoreCase( TestConfigurationProvider.getAutomationContext().getConfigurationValue(RABBITMQ_HOST_XPATH))) { String dockerHost = System.getenv("DOCKER_HOST"); if ((null != dockerHost) && (!StringUtils.isEmpty(dockerHost))) { URI uri; try { uri = new URI(dockerHost); host = uri.getHost(); } catch (URISyntaxException e) { log.error("Error getting DOCKER_HOST", e); } } } } catch (XPathExpressionException e) { log.warn("Error reading the rabbitmq host in automation.xml. Proceed with default value " + host); } return host; } public static int getPort() { //set default port int port = 5672; try { port = Integer.parseInt( TestConfigurationProvider.getAutomationContext().getConfigurationValue(RABBITMQ_PORT_XPATH)); } catch (XPathExpressionException e) { log.warn("Error reading the rabbitmq port in automation.xml. Proceed with default value " + port); } return port; } /** * Initialize rabbitmq broker by declaring the the exchange and a routing key that the producer will publishing * to. * * @return the rabbitmq producer that was used to declaration. */ public static RabbitMQProducerClient createProducerWithDeclaration(String exchange, String routingKey) throws IOException { RabbitMQProducerClient sender = new RabbitMQProducerClient(getHost(), getPort(), USER_NAME, PASSWORD); sender.declareAndConnect(exchange, routingKey); return sender; } /** * Initialize rabbitmq broker by declaring the the exchange and a routing key that the consumer will be bound to. * * @return the rabbitmq consumer that was used to declaration. */ public static RabbitMQConsumerClient createConsumerWithDeclaration(String exchange, String routingKey) throws IOException { RabbitMQConsumerClient consumer = new RabbitMQConsumerClient(getHost()); consumer.declareAndConnect(exchange, routingKey); return consumer; } }
1,741
2,151
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_TIME_H_ #define SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_TIME_H_ #include <time.h> #if !defined(CPUCLOCK_CLOCK_MASK) #define CPUCLOCK_CLOCK_MASK 3 #endif #if !defined(CLOCKFD) #define CLOCKFD 3 #endif #if !defined(CLOCK_REALTIME_COARSE) #define CLOCK_REALTIME_COARSE 5 #endif #if !defined(CLOCK_MONOTONIC_COARSE) #define CLOCK_MONOTONIC_COARSE 6 #endif #endif // SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_TIME_H_
251
407
#include "gtest/gtest.h" #include "gate_library_test_utils.h" #include "netlist_test_utils.h" #include "verilog_parser/verilog_parser.h" #include "verilog_writer/verilog_writer.h" #include "hal_core/plugin_system/plugin_manager.h" #include "hal_core/netlist/gate_library/gate_library_manager.h" namespace hal { class VerilogWriterTest : public ::testing::Test { protected: GateLibrary* m_gl; virtual void SetUp() { NO_COUT_BLOCK; test_utils::init_log_channels(); plugin_manager::load_all_plugins(); test_utils::create_sandbox_directory(); // gate library needs to be registered through gate_library_manager for serialization std::unique_ptr<GateLibrary> gl_tmp = test_utils::create_gate_library(test_utils::create_sandbox_path("testing_gate_library.hgl")); gate_library_manager::save(gl_tmp->get_path(), gl_tmp.get(), true); m_gl = gate_library_manager::load(gl_tmp->get_path()); } virtual void TearDown() { NO_COUT_BLOCK; plugin_manager::unload_all_plugins(); // test_utils::remove_sandbox_directory(); } }; /** * Test writing a given netlist to file and subsequently parse it using the VerilogParser. * * Functions: write */ TEST_F(VerilogWriterTest, check_main) { TEST_START { std::filesystem::path path_netlist = test_utils::create_sandbox_path("test.v"); std::unique_ptr<Netlist> nl = std::make_unique<Netlist>(m_gl); { nl->set_design_name("top_module"); Module* top_module = nl->get_top_module(); top_module->set_type("top_module"); Gate* gate_0 = nl->create_gate(m_gl->get_gate_type_by_name("AND2"), "gate_0"); Gate* gate_1 = nl->create_gate(m_gl->get_gate_type_by_name("GND"), "gate_1"); Gate* gate_2 = nl->create_gate(m_gl->get_gate_type_by_name("VCC"), "gate_2"); Gate* gate_3 = nl->create_gate(m_gl->get_gate_type_by_name("INV"), "gate_3"); Gate* gate_4 = nl->create_gate(m_gl->get_gate_type_by_name("INV"), "gate_4"); Gate* gate_5 = nl->create_gate(m_gl->get_gate_type_by_name("AND2"), "gate_5"); Gate* gate_6 = nl->create_gate(m_gl->get_gate_type_by_name("AND2"), "gate_6"); Gate* gate_7 = nl->create_gate(m_gl->get_gate_type_by_name("AND2"), "gate_7"); gate_1->mark_gnd_gate(); gate_2->mark_vcc_gate(); Net* net_1_3 = test_utils::connect(nl.get(), gate_1, "O", gate_3, "I"); Net* net_3_0 = test_utils::connect(nl.get(), gate_3, "O", gate_0, "I0"); Net* net_2_0 = test_utils::connect(nl.get(), gate_2, "O", gate_0, "I1"); Net* net_0_4_5 = test_utils::connect(nl.get(), gate_0, "O", gate_4, "I"); test_utils::connect(nl.get(), gate_0, "O", gate_5, "I0"); Net* net_6_7 = test_utils::connect(nl.get(), gate_6, "O", gate_7, "I0"); Net* net_4_out = test_utils::connect_global_out(nl.get(), gate_4, "O", "net_4_out"); top_module->set_output_port_name(net_4_out, net_4_out->get_name()); Net* net_5_out = test_utils::connect_global_out(nl.get(), gate_5, "O", "net_5_out"); top_module->set_output_port_name(net_5_out, net_5_out->get_name()); Net* net_7_out = test_utils::connect_global_out(nl.get(), gate_7, "O", "net_7_out"); top_module->set_output_port_name(net_7_out, net_7_out->get_name()); } VerilogWriter verilog_writer; ASSERT_TRUE(verilog_writer.write(nl.get(), path_netlist)); VerilogParser verilog_parser; std::unique_ptr<Netlist> parsed_nl = verilog_parser.parse_and_instantiate(path_netlist, m_gl); ASSERT_NE(parsed_nl, nullptr); // prepare comparison parsed_nl->set_device_name(nl->get_device_name()); // compare netlists EXPECT_TRUE(test_utils::netlists_are_equal(nl.get(), parsed_nl.get(), true)); } TEST_END } /** * Test writing global input and output nets. * * Functions: write */ TEST_F(VerilogWriterTest, check_global_nets) { TEST_START { std::filesystem::path path_netlist = test_utils::create_sandbox_path("test.v"); std::unique_ptr<Netlist> nl = std::make_unique<Netlist>(m_gl); { Module* top_module = nl->get_top_module(); Gate* gate_0 = nl->create_gate(m_gl->get_gate_type_by_name("BUF"), "gate_0"); Net* global_in_0 = test_utils::connect_global_in(nl.get(), gate_0, "I", "global_in_0"); top_module->set_input_port_name(global_in_0, global_in_0->get_name()); Gate* gate_1 = nl->create_gate(m_gl->get_gate_type_by_name("BUF"), "gate_1"); Net* global_in_1 = test_utils::connect_global_in(nl.get(), gate_1, "I", "global_in_1"); top_module->set_input_port_name(global_in_1, global_in_1->get_name()); Gate* gate_2 = nl->create_gate(m_gl->get_gate_type_by_name("BUF"), "gate_2"); Net* global_out_0 = test_utils::connect_global_out(nl.get(), gate_2, "O", "global_out_0"); top_module->set_output_port_name(global_out_0, global_out_0->get_name()); Gate* gate_3 = nl->create_gate(m_gl->get_gate_type_by_name("BUF"), "gate_3"); Net* global_out_1 = test_utils::connect_global_out(nl.get(), gate_3, "O", "global_out_1"); top_module->set_output_port_name(global_out_1, global_out_1->get_name()); } VerilogWriter verilog_writer; ASSERT_TRUE(verilog_writer.write(nl.get(), path_netlist)); VerilogParser verilog_parser; std::unique_ptr<Netlist> parsed_nl = verilog_parser.parse_and_instantiate(path_netlist, m_gl); ASSERT_NE(parsed_nl, nullptr); EXPECT_EQ(parsed_nl->get_global_input_nets().size(), 2); EXPECT_EQ(parsed_nl->get_global_output_nets().size(), 2); std::vector<Net*> nets = parsed_nl->get_nets(); ASSERT_EQ(nets.size(), 4); auto global_in_0_it = std::find_if(nets.begin(), nets.end(), [](const Net* net){ return net->get_name() == "global_in_0"; }); ASSERT_NE(global_in_0_it, nets.end()); EXPECT_TRUE((*global_in_0_it)->is_global_input_net()); auto global_in_1_it = std::find_if(nets.begin(), nets.end(), [](const Net* net){ return net->get_name() == "global_in_1"; }); ASSERT_NE(global_in_1_it, nets.end()); EXPECT_TRUE((*global_in_1_it)->is_global_input_net()); auto global_out_0_it = std::find_if(nets.begin(), nets.end(), [](const Net* net){ return net->get_name() == "global_out_0"; }); ASSERT_NE(global_out_0_it, nets.end()); EXPECT_TRUE((*global_out_0_it)->is_global_output_net()); auto global_out_1_it = std::find_if(nets.begin(), nets.end(), [](const Net* net){ return net->get_name() == "global_out_1"; }); ASSERT_NE(global_out_1_it, nets.end()); EXPECT_TRUE((*global_out_1_it)->is_global_output_net()); } TEST_END } /** * Test writing a modularized netlist. * * Functions: write */ TEST_F(VerilogWriterTest, check_modules) { TEST_START { std::filesystem::path path_netlist = test_utils::create_sandbox_path("test.v"); std::unique_ptr<Netlist> nl = std::make_unique<Netlist>(m_gl); { Gate* buf_0 = nl->create_gate(m_gl->get_gate_type_by_name("BUF"), "buf_0"); Gate* buf_1 = nl->create_gate(m_gl->get_gate_type_by_name("BUF"), "buf_1"); Gate* buf_2 = nl->create_gate(m_gl->get_gate_type_by_name("BUF"), "buf_2"); Gate* buf_3 = nl->create_gate(m_gl->get_gate_type_by_name("BUF"), "buf_3"); Gate* inv_0 = nl->create_gate(m_gl->get_gate_type_by_name("INV"), "inv_0"); Gate* and2_0 = nl->create_gate(m_gl->get_gate_type_by_name("AND2"), "and2_0"); Gate* and2_1 = nl->create_gate(m_gl->get_gate_type_by_name("AND2"), "and2_1"); Gate* or2_0 = nl->create_gate(m_gl->get_gate_type_by_name("OR2"), "or2_0"); Net* net_0 = test_utils::connect(nl.get(), buf_0, "O", and2_0, "I0", "net_0"); Net* net_1 = test_utils::connect(nl.get(), buf_1, "O", and2_0, "I1", "net_1"); Net* net_2 = test_utils::connect(nl.get(), and2_0, "O", or2_0, "I0", "net_2"); test_utils::connect(nl.get(), and2_0, "O", and2_1, "I0", "net_3"); Net* net_3 = test_utils::connect(nl.get(), inv_0, "O", or2_0, "I1", "net_3"); test_utils::connect(nl.get(), inv_0, "O", and2_1, "I1", "net_5"); Net* net_4 = test_utils::connect(nl.get(), or2_0, "O", buf_2, "I", "net_4"); Net* net_5 = test_utils::connect(nl.get(), and2_1, "O", buf_3, "I", "net_5"); Net* net_6 = test_utils::connect_global_in(nl.get(), buf_0, "I", "net_6"); Net* net_7 = test_utils::connect_global_in(nl.get(), buf_1, "I", "net_7"); Net* net_8 = test_utils::connect_global_in(nl.get(), inv_0, "I", "net_8"); Net* net_9 = test_utils::connect_global_out(nl.get(), buf_2, "O", "net_9"); Net* net_10 = test_utils::connect_global_out(nl.get(), buf_3, "O", "net_10"); Module* mod_0 = nl->create_module("mod_0", nl->get_top_module(), {buf_0, buf_1, and2_0}); Module* mod_1 = nl->create_module("mod_1", nl->get_top_module()); Module* mod_2 = nl->create_module("mod_2", mod_1, {or2_0, buf_2}); Module* mod_3 = nl->create_module("mod_3", mod_1, {and2_1, buf_3}); } VerilogWriter verilog_writer; ASSERT_TRUE(verilog_writer.write(nl.get(), path_netlist)); VerilogParser verilog_parser; std::unique_ptr<Netlist> parsed_nl = verilog_parser.parse_and_instantiate(path_netlist, m_gl); ASSERT_NE(parsed_nl, nullptr); EXPECT_EQ(parsed_nl->get_nets().size(), 11); EXPECT_EQ(parsed_nl->get_global_input_nets().size(), 3); EXPECT_EQ(parsed_nl->get_global_output_nets().size(), 2); EXPECT_EQ(parsed_nl->get_gates().size(), 8); { std::vector<Module*> modules = parsed_nl->get_modules(); EXPECT_EQ(modules.size(), 5); const auto top_mod_it = std::find_if(modules.begin(), modules.end(), [](const Module* module){ return module->get_name() == "top_module"; }); ASSERT_NE(top_mod_it, modules.end()); EXPECT_EQ((*top_mod_it)->get_gates().size(), 1); EXPECT_EQ((*top_mod_it)->get_input_nets().size(), 3); EXPECT_EQ((*top_mod_it)->get_output_nets().size(), 2); EXPECT_TRUE((*top_mod_it)->is_top_module()); const auto mod_0_it = std::find_if(modules.begin(), modules.end(), [](const Module* module){ return module->get_name() == "mod_0"; }); ASSERT_NE(mod_0_it, modules.end()); EXPECT_EQ((*mod_0_it)->get_gates().size(), 3); EXPECT_EQ((*mod_0_it)->get_input_nets().size(), 2); EXPECT_EQ((*mod_0_it)->get_output_nets().size(), 1); EXPECT_EQ((*mod_0_it)->get_parent_module(), parsed_nl->get_top_module()); const auto mod_1_it = std::find_if(modules.begin(), modules.end(), [](const Module* module){ return module->get_name() == "mod_1"; }); ASSERT_NE(mod_1_it, modules.end()); EXPECT_EQ((*mod_1_it)->get_gates().size(), 0); EXPECT_EQ((*mod_1_it)->get_input_nets().size(), 2); EXPECT_EQ((*mod_1_it)->get_output_nets().size(), 2); EXPECT_EQ((*mod_1_it)->get_parent_module(), parsed_nl->get_top_module()); const auto mod_2_it = std::find_if(modules.begin(), modules.end(), [](const Module* module){ return module->get_name() == "mod_2"; }); ASSERT_NE(mod_2_it, modules.end()); EXPECT_EQ((*mod_2_it)->get_gates().size(), 2); EXPECT_EQ((*mod_2_it)->get_input_nets().size(), 2); EXPECT_EQ((*mod_2_it)->get_output_nets().size(), 1); EXPECT_EQ((*mod_2_it)->get_parent_module(), (*mod_1_it)); const auto mod_3_it = std::find_if(modules.begin(), modules.end(), [](const Module* module){ return module->get_name() == "mod_3"; }); ASSERT_NE(mod_3_it, modules.end()); EXPECT_EQ((*mod_3_it)->get_gates().size(), 2); EXPECT_EQ((*mod_3_it)->get_input_nets().size(), 2); EXPECT_EQ((*mod_3_it)->get_output_nets().size(), 1); EXPECT_EQ((*mod_3_it)->get_parent_module(), (*mod_1_it)); } } TEST_END } /** * Test writing generic data. * * Functions: write */ TEST_F(VerilogWriterTest, check_data) { TEST_START { std::filesystem::path path_netlist = test_utils::create_sandbox_path("test.v"); std::unique_ptr<Netlist> nl = std::make_unique<Netlist>(m_gl); Gate* gate = nl->create_gate(m_gl->get_gate_type_by_name("BUF"), "gate_0"); test_utils::connect_global_in(nl.get(), gate, "I"); test_utils::connect_global_out(nl.get(), gate, "O"); Module* mod = nl->create_module("mod", nl->get_top_module(), {gate}); gate->set_data("generic", "test_bit_vector", "bit_vector", "123ABC"); gate->set_data("generic", "test_string", "string", "one_two_three"); gate->set_data("generic", "test_integer", "integer", "123"); gate->set_data("generic", "test_float", "floating_point", "1.001"); gate->set_data("generic", "test_bit_value", "bit_value", "1"); // below data should be ignored when writing gate->set_data("generic", "test_invalid", "invalid", "ignore_me"); gate->set_data("attribute", "test_attr_string", "string", "one_two_three"); gate->set_data("random", "test_rand_string", "string", "one_two_three"); mod->set_data("generic", "test_bit_vector", "bit_vector", "123ABC"); mod->set_data("generic", "test_string", "string", "one_two_three"); mod->set_data("generic", "test_integer", "integer", "123"); mod->set_data("generic", "test_float", "floating_point", "1.001"); mod->set_data("generic", "test_bit_value", "bit_value", "1"); // below data should be ignored when writing mod->set_data("generic", "test_invalid", "invalid", "ignore_me"); mod->set_data("attribute", "test_attr_string", "string", "one_two_three"); mod->set_data("random", "test_rand_string", "string", "one_two_three"); VerilogWriter verilog_writer; ASSERT_TRUE(verilog_writer.write(nl.get(), path_netlist)); VerilogParser verilog_parser; std::unique_ptr<Netlist> parsed_nl = verilog_parser.parse_and_instantiate(path_netlist, m_gl); ASSERT_NE(parsed_nl, nullptr); std::vector<Gate*> gates = parsed_nl->get_gates(); ASSERT_EQ(gates.size(), 1); const Gate* parsed_gate = gates.front(); ASSERT_NE(parsed_gate, nullptr); EXPECT_EQ(parsed_gate->get_data_map().size(), 5); EXPECT_EQ(parsed_gate->get_data("generic", "test_bit_vector"), std::make_tuple(std::string("bit_vector"), std::string("123ABC"))); EXPECT_EQ(parsed_gate->get_data("generic", "test_string"), std::make_tuple(std::string("string"), std::string("one_two_three"))); EXPECT_EQ(parsed_gate->get_data("generic", "test_integer"), std::make_tuple(std::string("integer"), std::string("123"))); EXPECT_EQ(parsed_gate->get_data("generic", "test_float"), std::make_tuple(std::string("floating_point"), std::string("1.001"))); EXPECT_EQ(parsed_gate->get_data("generic", "test_bit_value"), std::make_tuple(std::string("bit_value"), std::string("1"))); std::vector<Module*> modules = parsed_nl->get_modules(); ASSERT_EQ(modules.size(), 2); auto mod_it = std::find_if(modules.begin(), modules.end(), [](const Module* m){ return !m->is_top_module(); }); ASSERT_NE(mod_it, modules.end()); const Module* parsed_module = *mod_it; ASSERT_NE(parsed_module, nullptr); EXPECT_EQ(parsed_module->get_data_map().size(), 5); EXPECT_EQ(parsed_module->get_data("generic", "test_bit_vector"), std::make_tuple(std::string("bit_vector"), std::string("123ABC"))); EXPECT_EQ(parsed_module->get_data("generic", "test_string"), std::make_tuple(std::string("string"), std::string("one_two_three"))); EXPECT_EQ(parsed_module->get_data("generic", "test_integer"), std::make_tuple(std::string("integer"), std::string("123"))); EXPECT_EQ(parsed_module->get_data("generic", "test_float"), std::make_tuple(std::string("floating_point"), std::string("1.001"))); EXPECT_EQ(parsed_module->get_data("generic", "test_bit_value"), std::make_tuple(std::string("bit_value"), std::string("1"))); } TEST_END } /** * Test writing gates with multi-bit pins. * * Functions: write */ TEST_F(VerilogWriterTest, check_multi_bit_pins) { TEST_START { std::filesystem::path path_netlist = test_utils::create_sandbox_path("test.v"); std::unique_ptr<Netlist> nl = std::make_unique<Netlist>(m_gl); Gate* gate = nl->create_gate(m_gl->get_gate_type_by_name("RAM"), "ram"); Module* top_module = nl->get_top_module(); for (u32 i = 0; i < 4; i++) { Net* n = test_utils::connect_global_in(nl.get(), gate, "DATA_IN(" + std::to_string(i) + ")", "DATA_IN(" + std::to_string(i) + ")"); top_module->set_input_port_name(n, "DATA_IN(" + std::to_string(i) + ")"); n = test_utils::connect_global_out(nl.get(), gate, "DATA_OUT(" + std::to_string(i) + ")", "DATA_OUT(" + std::to_string(i) + ")"); top_module->set_output_port_name(n, "DATA_OUT(" + std::to_string(i) + ")"); n = test_utils::connect_global_in(nl.get(), gate, "ADDR(" + std::to_string(i) + ")", "ADDR(" + std::to_string(i) + ")"); top_module->set_input_port_name(n, "ADDR(" + std::to_string(i) + ")"); } std::vector<Endpoint*> fan_in = gate->get_fan_in_endpoints(); std::vector<Endpoint*> fan_out = gate->get_fan_out_endpoints(); VerilogWriter verilog_writer; ASSERT_TRUE(verilog_writer.write(nl.get(), path_netlist)); VerilogParser verilog_parser; std::unique_ptr<Netlist> parsed_nl = verilog_parser.parse_and_instantiate(path_netlist, m_gl); ASSERT_NE(parsed_nl, nullptr); std::vector<Gate*> gates = parsed_nl->get_gates(); ASSERT_EQ(gates.size(), 1); const Gate* parsed_gate = gates.front(); ASSERT_NE(parsed_gate, nullptr); EXPECT_EQ(parsed_gate->get_fan_in_nets().size(), 8); EXPECT_EQ(parsed_gate->get_fan_out_nets().size(), 4); for (const Endpoint* ep : fan_in) { EXPECT_EQ(parsed_gate->get_fan_in_net(ep->get_pin())->get_name(), ep->get_net()->get_name()); } for (const Endpoint* ep : fan_out) { EXPECT_EQ(parsed_gate->get_fan_out_net(ep->get_pin())->get_name(), ep->get_net()->get_name()); } } TEST_END } } //namespace hal
11,207
328
package com.ctg.test.springboothtml.controller; import com.ctg.test.springboothtml.IpUtil; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.autoconfigure.EnableAutoConfiguration; import org.springframework.core.env.Environment; import org.springframework.stereotype.Controller; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.ResponseBody; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import java.net.InetAddress; import java.util.HashMap; import java.util.Map; /** * 启动3个后台进程,端口号分别为:8080,8090,8100,nginx端端口号设置为:8060,8070 *http://localhost:8080/webapp1 *http://localhost:8080/webapp1/test *静态资源:http://localhost:8080/webapp1/static/img/1.jpg * 测试nginx静态资源和nginx负载均衡: * http://192.168.159.142:8060/webapp1 * 或者直接访问: * http://192.168.159.142:8060/webapp1/test/static/img/1.jpg * http://192.168.159.142:8060/webapp1/test * 测试nginx不同端口,一个端口不同上下文 * http://192.168.159.142:8070/webapp2/ * http://192.168.159.142:8070/webapp2/test * 测试nginx一个端口不同上下文 * http://192.168.159.142:8070/webapp2/ * http://192.168.159.142:8070/webapp2/test * http://192.168.159.142:8070/webapp3/ * http://192.168.159.142:8070/webapp3/test * */ @Controller @EnableAutoConfiguration public class IndexController { @Autowired private Environment env; @Autowired IpUtil ipUtil; @RequestMapping("/") String index(HttpServletRequest request) { return "/index"; } @RequestMapping("/index") String index2(HttpServletRequest request) { return "/index"; } @RequestMapping("/test") @ResponseBody public Object test(HttpServletRequest request, HttpServletResponse response) { Map<String,Object> result=new HashMap<>(); result.put("code","200"); String ip=""; try { InetAddress inetAddress=ipUtil.getLocalHostLANAddress(); ip=inetAddress.getHostAddress(); } catch (Exception e) { e.printStackTrace(); } result.put("data","url:"+request.getRequestURL()+",ip: "+ip+",port:"+env.getProperty("server.port")); return result; } }
929
892
<filename>advisories/unreviewed/2022/05/GHSA-229h-mpm4-83qq/GHSA-229h-mpm4-83qq.json { "schema_version": "1.2.0", "id": "GHSA-229h-mpm4-83qq", "modified": "2022-05-02T03:34:35Z", "published": "2022-05-02T03:34:35Z", "aliases": [ "CVE-2009-2372" ], "details": "Drupal 6.x before 6.13 does not prevent users from modifying user signatures after the associated comment format has been changed to an administrator-controlled input format, which allows remote authenticated users to inject arbitrary web script, HTML, and possibly PHP code via a crafted user signature.", "severity": [ ], "affected": [ ], "references": [ { "type": "ADVISORY", "url": "https://nvd.nist.gov/vuln/detail/CVE-2009-2372" }, { "type": "WEB", "url": "http://drupal.org/node/507572" }, { "type": "WEB", "url": "http://osvdb.org/55525" }, { "type": "WEB", "url": "http://secunia.com/advisories/35681" }, { "type": "WEB", "url": "http://www.securitytracker.com/id?1022497" } ], "database_specific": { "cwe_ids": [ "CWE-94" ], "severity": "MODERATE", "github_reviewed": false } }
538
713
<reponame>franz1981/infinispan package org.infinispan.statetransfer; import java.util.concurrent.CompletableFuture; import org.infinispan.commands.VisitableCommand; import org.infinispan.commands.tx.TransactionBoundaryCommand; import org.infinispan.context.InvocationContext; import org.infinispan.context.impl.TxInvocationContext; import org.infinispan.interceptors.BaseAsyncInterceptor; import org.infinispan.transaction.impl.RemoteTransaction; import org.infinispan.util.logging.Log; import org.infinispan.util.logging.LogFactory; /** * With the Non-Blocking State Transfer (NBST) in place it is possible for a transactional command to be forwarded * multiple times, concurrently to the same node. This interceptor makes sure that for any given transaction, the * interceptor chain, post {@link StateTransferInterceptor}, would only allows a single thread to amend a transaction. * </p> * E.g. of when this situation might occur: * <ul> * <li>1) Node A broadcasts PrepareCommand to nodes B, C </li> * <li>2) Node A leaves cluster, causing new topology to be installed </li> * <li>3) The command arrives to B and C, with lower topology than the current one</li> * <li>4) Both B and C forward the command to node D</li> * <li>5) D executes the two commands in parallel and finds out that A has left, therefore executing RollbackCommand></li> * </ul> * <p/> * This interceptor must placed after the logic that handles command forwarding ({@link StateTransferInterceptor}), * otherwise we can end up in deadlocks when a command is forwarded in a loop to the same cache: e.g. A&rarr;B&rarr;C&rarr;A. This * scenario is possible when we have chained topology changes (see <a href="https://issues.jboss.org/browse/ISPN-2578">ISPN-2578</a>). * * @author <NAME> * @since 5.2 */ public class TransactionSynchronizerInterceptor extends BaseAsyncInterceptor { private static final Log log = LogFactory.getLog(TransactionSynchronizerInterceptor.class); @Override public Object visitCommand(InvocationContext ctx, VisitableCommand command) throws Throwable { if (ctx.isOriginLocal() || !(command instanceof TransactionBoundaryCommand)) { return invokeNext(ctx, command); } CompletableFuture<Void> releaseFuture = new CompletableFuture<>(); RemoteTransaction remoteTransaction = ((TxInvocationContext<RemoteTransaction>) ctx).getCacheTransaction(); Object result = asyncInvokeNext(ctx, command, remoteTransaction.enterSynchronizationAsync(releaseFuture)); return makeStage(result).andFinally(ctx, command, (rCtx, rCommand, rv, t) -> { log.tracef("Completing tx command release future for %s", remoteTransaction); releaseFuture.complete(null); }); } }
839
731
// Include standard headers #include <stdio.h> #include <stdlib.h> #include <vector> #include <string> #include <fstream> #include <iostream> #include <cstdlib> //rand #include <chrono> #include "boost/multi_array.hpp" #include "boost/timer.hpp" #include <glad/egl.h> #include <glad/gl.h> #include "lodepng.h" #include <glm/glm.hpp> #include <glm/gtc/matrix_transform.hpp> #include <glm/gtx/transform.hpp> #include <glm/gtc/quaternion.hpp> #include <glm/gtx/quaternion.hpp> using namespace std; #include <common/shader.hpp> #include <common/texture.hpp> #include <common/objloader.hpp> #include <common/vboindexer.hpp> #include <common/cmdline.h> #include <common/controls.hpp> #include <common/semantic_color.hpp> #include <common/MTLobjloader.hpp> #include <common/MTLplyloader.hpp> #include <common/MTLtexture.hpp> #include <zmq.hpp> #ifndef _WIN32 #include <unistd.h> #else #include <windows.h> #define sleep(n) Sleep(n) #endif int main( int argc, char * argv[] ) { cmdline::parser cmdp; cmdp.add<std::string>("modelpath", 'd', "data model directory", true, ""); cmdp.add<int>("Semantic Source", 'r', "Semantic data source", false, 1); cmdp.add<int>("Port", 'p', "Semantic loading port", false, 5055); cmdp.parse_check(argc, argv); std::string model_path = cmdp.get<std::string>("modelpath"); int port = cmdp.get<int>("Port"); int semantic_src = cmdp.get<int>("Semantic Source"); int ply; std::string name_obj = model_path + "/mesh.obj"; name_obj = model_path + "/semantic.obj"; if (semantic_src == 1) ply = 0; if (semantic_src == 2) ply = 1; std::vector<std::vector<glm::vec3>> mtl_vertices; std::vector<std::vector<glm::vec2>> mtl_uvs; std::vector<std::vector<glm::vec3>> mtl_normals; std::vector<glm::vec3> mtl_sem_centers; std::vector<std::string> material_name; std::vector<int> material_id; std::string mtllib; std::vector<glm::vec3> vertices; std::vector<glm::vec2> uvs; std::vector<glm::vec3> normals; std::vector<TextureObj> TextObj; unsigned int num_layers; /* initialize random seed: */ srand (0); bool res; int num_vertices; if (ply > 0) { res = loadPLY_MTL(model_path.c_str(), mtl_vertices, mtl_uvs, mtl_normals, mtl_sem_centers, material_id, mtllib, num_vertices); printf("From ply loaded total of %d vertices\n", num_vertices); } else { res = loadOBJ_MTL(name_obj.c_str(), mtl_vertices, mtl_uvs, mtl_normals, mtl_sem_centers, material_name, mtllib); for (int i = 0; i < 20; i++) { printf("Loaded semantic center %f, %f, %f\n", mtl_sem_centers[i].x, mtl_sem_centers[i].y, mtl_sem_centers[i].z); } } if (res == false) { printf("Was not able to load the semantic.obj file.\n"); exit(-1); } else { printf("Semantic.obj file was loaded with success.\n"); } // Load the textures std::string mtl_path = model_path + "/" + mtllib; bool MTL_loaded; if (ply > 0) { mtl_path = model_path; MTL_loaded = loadPLYtextures(TextObj, material_id); } else { MTL_loaded = loadMTLtextures(mtl_path, TextObj, material_name); } if (MTL_loaded == false) { printf("Was not able to load textures\n"); exit(-1); } else { printf("Texture file was loaded with success, total: %lu\n", TextObj.size()); } // Read our .obj file // Note: use unsigned int because of too many indices std::vector<unsigned int> indices; std::vector<glm::vec3> indexed_vertices; std::vector<glm::vec2> indexed_uvs; std::vector<glm::vec3> indexed_normals; std::vector<glm::vec2> indexed_semantics; /* indexVBO_MTL(mtl_vertices, mtl_uvs, mtl_normals, indices, indexed_vertices, indexed_uvs, indexed_normals, indexed_semantics); std::cout << "Finished indexing vertices v " << indexed_vertices.size() << " uvs " << indexed_uvs.size() << " normals " << indexed_normals.size() << " semantics " << indexed_semantics.size() << std::endl; std::cout << "Semantics "; std::cout << std::endl; */ zmq::context_t context (1); zmq::socket_t socket (context, ZMQ_REP); zmq::message_t request; socket.bind ("tcp://127.0.0.1:" + std::to_string(port)); // Wait for next request from client socket.recv (&request); std::string request_str = std::string(static_cast<char*>(request.data()), request.size()); int dim = 3; int message_sz = mtl_sem_centers.size()*sizeof(float)*dim; zmq::message_t reply (message_sz); float * reply_data_handle = (float*)reply.data(); for (int i = 0; i < mtl_sem_centers.size(); i++) { for (int k = 0; k < dim; k++) { int offset = k; float tmp_float = mtl_sem_centers[i][k]; reply_data_handle[offset + i * dim] = tmp_float; } } socket.send (reply); return 0; }
2,103
4,140
<reponame>FANsZL/hive /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hive.ql.parse.type; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.FunctionInfo; import org.apache.hadoop.hive.ql.exec.FunctionRegistry; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; /** * Test strict type checks for comparison operations between decimal and strings. * * {@link org.apache.hadoop.hive.conf.HiveConf.ConfVars#HIVE_STRICT_CHECKS_TYPE_SAFETY} */ @RunWith(Parameterized.class) public class TestDecimalStringValidation { private static class FunctionCall { private final ExprNodeDesc expL; private final ExprNodeDesc expR; private final FunctionInfo function; public FunctionCall(ExprNodeDesc expL, ExprNodeDesc expR, FunctionInfo function) { this.expL = expL; this.expR = expR; this.function = function; } @Override public String toString() { return function.getDisplayName() + "(" + expL + "," + expR + ")"; } } private final FunctionCall call; public TestDecimalStringValidation(FunctionCall call) { this.call = call; } @Parameterized.Parameters(name = "{index}: {0}") public static Collection<FunctionCall> params() throws Exception { ExprNodeDesc[] characterExps = new ExprNodeDesc[] { new ExprNodeColumnDesc(TypeInfoFactory.varcharTypeInfo, "varchar_col", null, false), new ExprNodeColumnDesc(TypeInfoFactory.charTypeInfo, "char_col", null, false), new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "string_col", null, false), new ExprNodeConstantDesc(TypeInfoFactory.varcharTypeInfo, "123.3"), new ExprNodeConstantDesc(TypeInfoFactory.charTypeInfo, "123.3"), new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "123.3"), }; ExprNodeDesc[] numericExps = new ExprNodeDesc[] { new ExprNodeColumnDesc(TypeInfoFactory.decimalTypeInfo, "decimal_col", null, false), new ExprNodeConstantDesc(TypeInfoFactory.decimalTypeInfo, 123.3), }; FunctionInfo[] functions = new FunctionInfo[] { FunctionRegistry.getFunctionInfo("="), FunctionRegistry.getFunctionInfo("<"), FunctionRegistry.getFunctionInfo(">"), FunctionRegistry.getFunctionInfo("<>"), FunctionRegistry.getFunctionInfo("<="), FunctionRegistry.getFunctionInfo(">="), FunctionRegistry.getFunctionInfo("<=>") }; Collection<FunctionCall> input = new ArrayList<>(); for (ExprNodeDesc chrExp : characterExps) { for (ExprNodeDesc numExp : numericExps) { for (FunctionInfo function : functions) { input.add(new FunctionCall(chrExp, numExp, function)); input.add(new FunctionCall(numExp, chrExp, function)); } } } return input; } @Test public void testValidationDecimalWithCharacterFailsWhenStrictChecksEnabled() { HiveConf conf = new HiveConf(); conf.setBoolVar(HiveConf.ConfVars.HIVE_STRICT_CHECKS_TYPE_SAFETY, true); try { validateCall(conf); Assert.fail("Validation of " + call + " should fail"); } catch (Exception e) { Assert.assertEquals(HiveConf.StrictChecks.checkTypeSafety(conf), e.getMessage()); } } @Test public void testValidationDecimalWithCharacterSucceedsWhenStrictChecksDisabled() throws SemanticException { HiveConf conf = new HiveConf(); conf.setBoolVar(HiveConf.ConfVars.HIVE_STRICT_CHECKS_TYPE_SAFETY, false); validateCall(conf); } private void validateCall(HiveConf conf) throws SemanticException { SessionState.start(conf); TypeCheckCtx ctx = new TypeCheckCtx(null); ExprNodeTypeCheck.getExprNodeDefaultExprProcessor() .validateUDF(null, false, ctx, call.function, Arrays.asList(call.expL, call.expR)); } }
1,845
2,354
<reponame>XinChCh/singa<filename>src/model/layer/flatten.cc /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "singa/model/layer.h" #include "./flatten.h" namespace singa { RegisterLayerClass(singa_flatten, Flatten); RegisterLayerClass(singacpp_flatten, Flatten); RegisterLayerClass(singacuda_flatten, Flatten); RegisterLayerClass(singacl_flatten, Flatten); void Flatten::Setup(const Shape& in_sample, const LayerConf &conf) { Layer::Setup(in_sample, conf); axis_ = conf.flatten_conf().axis(); size_t len = 1; if (axis_ > 0) for (size_t i = axis_ - 1; i < in_sample.size(); i++) len *= in_sample.at(i); out_sample_shape_.push_back(len); } const Tensor Flatten::Forward(int flag, const Tensor &input) { Tensor output; input_shape_ = input.shape(); if (axis_ == 0) output = Reshape(input, vector<size_t>{input.Size()}); else output = Reshape(input, vector<size_t>{input.Size() / out_sample_shape_.at(0), out_sample_shape_.at(0)}); return output; } const std::pair<Tensor, vector<Tensor> > Flatten::Backward(int flag, const Tensor &grad) { vector<Tensor> param_grad; Tensor input_grad = Reshape(grad, input_shape_); return std::make_pair(input_grad, param_grad); } } // namespace singa
752
1,932
package cn.springcloud.book; import org.springframework.boot.SpringApplication; import org.springframework.boot.autoconfigure.SpringBootApplication; import org.springframework.data.jpa.repository.config.EnableJpaAuditing; import org.springframework.data.jpa.repository.config.EnableJpaRepositories; import org.springframework.transaction.annotation.EnableTransactionManagement; @SpringBootApplication @EnableJpaRepositories(basePackages = "cn.springcloud.book.dao") @EnableTransactionManagement @EnableJpaAuditing public class OrderServiceApplication { public static void main(String[] args) { SpringApplication.run(OrderServiceApplication.class, args); } }
195
7,113
<reponame>qixiaobo/otter /* * Copyright (C) 2010-2101 Alibaba Group Holding Limited. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.alibaba.otter.manager.web.home.module.action; import javax.annotation.Resource; import org.apache.commons.lang.StringUtils; import com.alibaba.citrus.service.form.CustomErrors; import com.alibaba.citrus.service.form.Group; import com.alibaba.citrus.turbine.Navigator; import com.alibaba.citrus.turbine.dataresolver.FormField; import com.alibaba.citrus.turbine.dataresolver.FormGroup; import com.alibaba.citrus.turbine.dataresolver.Param; import com.alibaba.citrus.webx.WebxException; import com.alibaba.otter.manager.biz.common.exceptions.RepeatConfigureException; import com.alibaba.otter.manager.biz.config.datamatrix.DataMatrixService; import com.alibaba.otter.manager.web.common.WebConstant; import com.alibaba.otter.shared.common.model.config.data.DataMatrix; public class DataMatrixAction extends AbstractAction { @Resource(name = "dataMatrixService") private DataMatrixService dataMatrixService; public void doAdd(@FormGroup("dataMatrixInfo") Group dataMatrixInfo, @FormField(name = "formDataMatrixError", group = "dataMatrixInfo") CustomErrors err, Navigator nav) throws Exception { DataMatrix matrix = new DataMatrix(); dataMatrixInfo.setProperties(matrix); try { dataMatrixService.create(matrix); } catch (RepeatConfigureException rce) { err.setMessage("invalidDataMatrix"); return; } nav.redirectTo(WebConstant.MATRIX_LIST_LINK); } public void doEdit(@FormGroup("dataMatrixInfo") Group dataMatrixInfo, @FormField(name = "formDataMatrixError", group = "dataMatrixInfo") CustomErrors err, Navigator nav) throws Exception { DataMatrix matrix = new DataMatrix(); dataMatrixInfo.setProperties(matrix); try { dataMatrixService.modify(matrix); } catch (RepeatConfigureException rce) { err.setMessage("invalidDataMatrix"); return; } nav.redirectToLocation("dataMatrixList.htm?matrixId=" + matrix.getId()); } public void doDelete(@Param("matrixId") Long matrixId, Navigator nav) throws WebxException { dataMatrixService.remove(matrixId); nav.redirectToLocation("dataMatrixList.htm"); } public void doSwitch(@Param("matrixId") Long matrixId, Navigator nav) throws WebxException { DataMatrix matrix = dataMatrixService.findById(matrixId); String slave = matrix.getMaster(); String master = matrix.getSlave(); if (StringUtils.isNotEmpty(master) && StringUtils.isNotEmpty(slave)) { matrix.setMaster(master); matrix.setSlave(slave); } dataMatrixService.modify(matrix); nav.redirectToLocation("dataMatrixList.htm?matrixId=" + matrixId); } }
1,399
435
{ "description": "The talk will cover advanced Python modules that can be used to have a fast and memory efficient code.", "language": "eng", "recorded": "2017-05-27", "related_urls": [ { "label": "schedule", "url": "https://pyconweb.com/#schedule" } ], "speakers": [ "<NAME>" ], "tags": [], "thumbnail_url": "https://i.ytimg.com/vi/m-y_hwgxH5U/hqdefault.jpg", "title": "Efficient Python", "videos": [ { "type": "youtube", "url": "https://www.youtube.com/watch?v=m-y_hwgxH5U" } ] }
241
995
package iadgov.svgexport.svg; public abstract class Entity { public abstract String toSvg(final TransformStack transforms); }
38
1,707
#pragma once //------------------------------------------------------------------------------ /** @class Oryol::ResourceRegistry @ingroup Resource @brief map resource locators to resource ids for resource sharing */ #include "Resource/Id.h" #include "Resource/Locator.h" #include "Resource/ResourceLabel.h" #include "Core/Containers/Array.h" #include "Core/Containers/Map.h" namespace Oryol { class ResourceRegistry { public: /// destructor ~ResourceRegistry(); /// setup the registry with an estimated number of entries void Setup(int reserveSize); /// discard the registry void Discard(); /// return true if the registry has been setup bool IsValid() const; /// add a new resource id to the registry void Add(const Locator& loc, Id id, ResourceLabel label); /// lookup resource Id by locator Id Lookup(const Locator& loc) const; /// remove all resource matching label from registry, returns removed Ids Array<Id> Remove(ResourceLabel label); /// check if resource is in registry bool Contains(Id id) const; /// (debug) get the locator of a resource (fail hard if resource doesn't exist) const Locator& GetLocator(Id id) const; /// (debug) get the resource label of a resource (fail hard if resource doesn't exist) ResourceLabel GetLabel(Id id) const; /// (debug) get number of resources in the registry int GetNumResources() const; /// (debug) get resource id by index Id GetIdByIndex(int index) const; #if ORYOL_DEBUG /// validate integrity of internal data structures bool CheckIntegrity() const; #endif struct Entry { Entry(const Locator& loc_, Id id_, ResourceLabel label_) : locator(loc_), id(id_), label(label_) { }; Locator locator; Id id; ResourceLabel label; }; /// find an entry by locator const Entry* findEntryByLocator(const Locator& loc) const; /// find an entry by id const Entry* findEntryById(Id id) const; bool isValid = false; Array<Entry> entries; Map<Locator, int> locatorIndexMap; Map<Id, int> idIndexMap; }; } // namespace Oryol
867
488
// Copyright 2005,2006,2007 <NAME>, <NAME> // $Id: setalgorithms.h,v 1.2 2007-03-08 15:36:49 markus Exp $ // Author: <NAME> #ifndef SETALGORITHMS_H #define SETALGORITHMS_H template<typename set_type> void intersection(const set_type& s1, const set_type& s2, set_type& result) { set_type temp; typename set_type::iterator i1=s1.begin(); typename set_type::iterator i2; while(i1!=s1.end()) { i2=s2.find(*i1++); if(i2!=s2.end()) temp.insert(*i2); } temp.swap(result); } // result=s1-s2; template<typename set_type> void set_difference(const set_type& s1, const set_type& s2, set_type& result) { set_type temp; typename set_type::iterator i1=s1.begin(); typename set_type::iterator i2; while(i1!=s1.end()) { // foreach *i1 in s1 i2=s2.find(*i1); if(i2==s2.end()) // if *i1 notin s2 temp.insert(*i1); // then temp:=temp+*i1 i1++; } temp.swap(result); } #endif
422
3,861
/* crypto/ec/ec_key.c */ /* * Written by <NAME> for the OpenSSL project. */ /* ==================================================================== * Copyright (c) 1998-2005 The OpenSSL Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. All advertising materials mentioning features or use of this * software must display the following acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" * * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to * endorse or promote products derived from this software without * prior written permission. For written permission, please contact * <EMAIL>. * * 5. Products derived from this software may not be called "OpenSSL" * nor may "OpenSSL" appear in their names without prior written * permission of the OpenSSL Project. * * 6. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit (http://www.openssl.org/)" * * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * ==================================================================== * * This product includes cryptographic software written by <NAME> * (<EMAIL>). This product includes software written by <NAME> (<EMAIL>). * */ /* ==================================================================== * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED. * Portions originally developed by SUN MICROSYSTEMS, INC., and * contributed to the OpenSSL project. */ #include <string.h> #include "ec_lcl.h" #include <openssl/err.h> #ifdef OPENSSL_FIPS #include <openssl/fips.h> #endif EC_KEY *EC_KEY_new(void) { EC_KEY *ret; ret=(EC_KEY *)OPENSSL_malloc(sizeof(EC_KEY)); if (ret == NULL) { ECerr(EC_F_EC_KEY_NEW, ERR_R_MALLOC_FAILURE); return(NULL); } ret->version = 1; ret->flags = 0; ret->group = NULL; ret->pub_key = NULL; ret->priv_key= NULL; ret->enc_flag= 0; ret->conv_form = POINT_CONVERSION_UNCOMPRESSED; ret->references= 1; ret->method_data = NULL; return(ret); } EC_KEY *EC_KEY_new_by_curve_name(int nid) { EC_KEY *ret = EC_KEY_new(); if (ret == NULL) return NULL; ret->group = EC_GROUP_new_by_curve_name(nid); if (ret->group == NULL) { EC_KEY_free(ret); return NULL; } return ret; } void EC_KEY_free(EC_KEY *r) { int i; if (r == NULL) return; i=CRYPTO_add(&r->references,-1,CRYPTO_LOCK_EC); #ifdef REF_PRINT REF_PRINT("EC_KEY",r); #endif if (i > 0) return; #ifdef REF_CHECK if (i < 0) { fprintf(stderr,"EC_KEY_free, bad reference count\n"); abort(); } #endif if (r->group != NULL) EC_GROUP_free(r->group); if (r->pub_key != NULL) EC_POINT_free(r->pub_key); if (r->priv_key != NULL) BN_clear_free(r->priv_key); EC_EX_DATA_free_all_data(&r->method_data); OPENSSL_cleanse((void *)r, sizeof(EC_KEY)); OPENSSL_free(r); } EC_KEY *EC_KEY_copy(EC_KEY *dest, const EC_KEY *src) { EC_EXTRA_DATA *d; if (dest == NULL || src == NULL) { ECerr(EC_F_EC_KEY_COPY, ERR_R_PASSED_NULL_PARAMETER); return NULL; } /* copy the parameters */ if (src->group) { const EC_METHOD *meth = EC_GROUP_method_of(src->group); /* clear the old group */ if (dest->group) EC_GROUP_free(dest->group); dest->group = EC_GROUP_new(meth); if (dest->group == NULL) return NULL; if (!EC_GROUP_copy(dest->group, src->group)) return NULL; } /* copy the public key */ if (src->pub_key && src->group) { if (dest->pub_key) EC_POINT_free(dest->pub_key); dest->pub_key = EC_POINT_new(src->group); if (dest->pub_key == NULL) return NULL; if (!EC_POINT_copy(dest->pub_key, src->pub_key)) return NULL; } /* copy the private key */ if (src->priv_key) { if (dest->priv_key == NULL) { dest->priv_key = BN_new(); if (dest->priv_key == NULL) return NULL; } if (!BN_copy(dest->priv_key, src->priv_key)) return NULL; } /* copy method/extra data */ EC_EX_DATA_free_all_data(&dest->method_data); for (d = src->method_data; d != NULL; d = d->next) { void *t = d->dup_func(d->data); if (t == NULL) return 0; if (!EC_EX_DATA_set_data(&dest->method_data, t, d->dup_func, d->free_func, d->clear_free_func)) return 0; } /* copy the rest */ dest->enc_flag = src->enc_flag; dest->conv_form = src->conv_form; dest->version = src->version; dest->flags = src->flags; return dest; } EC_KEY *EC_KEY_dup(const EC_KEY *ec_key) { EC_KEY *ret = EC_KEY_new(); if (ret == NULL) return NULL; if (EC_KEY_copy(ret, ec_key) == NULL) { EC_KEY_free(ret); return NULL; } return ret; } int EC_KEY_up_ref(EC_KEY *r) { int i = CRYPTO_add(&r->references, 1, CRYPTO_LOCK_EC); #ifdef REF_PRINT REF_PRINT("EC_KEY",r); #endif #ifdef REF_CHECK if (i < 2) { fprintf(stderr, "EC_KEY_up, bad reference count\n"); abort(); } #endif return ((i > 1) ? 1 : 0); } int EC_KEY_generate_key(EC_KEY *eckey) { int ok = 0; BN_CTX *ctx = NULL; BIGNUM *priv_key = NULL, *order = NULL; EC_POINT *pub_key = NULL; #ifdef OPENSSL_FIPS if (FIPS_mode()) return FIPS_ec_key_generate_key(eckey); #endif if (!eckey || !eckey->group) { ECerr(EC_F_EC_KEY_GENERATE_KEY, ERR_R_PASSED_NULL_PARAMETER); return 0; } if ((order = BN_new()) == NULL) goto err; if ((ctx = BN_CTX_new()) == NULL) goto err; if (eckey->priv_key == NULL) { priv_key = BN_new(); if (priv_key == NULL) goto err; } else priv_key = eckey->priv_key; if (!EC_GROUP_get_order(eckey->group, order, ctx)) goto err; do if (!BN_rand_range(priv_key, order)) goto err; while (BN_is_zero(priv_key)); if (eckey->pub_key == NULL) { pub_key = EC_POINT_new(eckey->group); if (pub_key == NULL) goto err; } else pub_key = eckey->pub_key; if (!EC_POINT_mul(eckey->group, pub_key, priv_key, NULL, NULL, ctx)) goto err; eckey->priv_key = priv_key; eckey->pub_key = pub_key; ok=1; err: if (order) BN_free(order); if (pub_key != NULL && eckey->pub_key == NULL) EC_POINT_free(pub_key); if (priv_key != NULL && eckey->priv_key == NULL) BN_free(priv_key); if (ctx != NULL) BN_CTX_free(ctx); return(ok); } int EC_KEY_check_key(const EC_KEY *eckey) { int ok = 0; BN_CTX *ctx = NULL; const BIGNUM *order = NULL; EC_POINT *point = NULL; if (!eckey || !eckey->group || !eckey->pub_key) { ECerr(EC_F_EC_KEY_CHECK_KEY, ERR_R_PASSED_NULL_PARAMETER); return 0; } if (EC_POINT_is_at_infinity(eckey->group, eckey->pub_key)) { ECerr(EC_F_EC_KEY_CHECK_KEY, EC_R_POINT_AT_INFINITY); goto err; } if ((ctx = BN_CTX_new()) == NULL) goto err; if ((point = EC_POINT_new(eckey->group)) == NULL) goto err; /* testing whether the pub_key is on the elliptic curve */ if (!EC_POINT_is_on_curve(eckey->group, eckey->pub_key, ctx)) { ECerr(EC_F_EC_KEY_CHECK_KEY, EC_R_POINT_IS_NOT_ON_CURVE); goto err; } /* testing whether pub_key * order is the point at infinity */ order = &eckey->group->order; if (BN_is_zero(order)) { ECerr(EC_F_EC_KEY_CHECK_KEY, EC_R_INVALID_GROUP_ORDER); goto err; } if (!EC_POINT_mul(eckey->group, point, NULL, eckey->pub_key, order, ctx)) { ECerr(EC_F_EC_KEY_CHECK_KEY, ERR_R_EC_LIB); goto err; } if (!EC_POINT_is_at_infinity(eckey->group, point)) { ECerr(EC_F_EC_KEY_CHECK_KEY, EC_R_WRONG_ORDER); goto err; } /* in case the priv_key is present : * check if generator * priv_key == pub_key */ if (eckey->priv_key) { if (BN_cmp(eckey->priv_key, order) >= 0) { ECerr(EC_F_EC_KEY_CHECK_KEY, EC_R_WRONG_ORDER); goto err; } if (!EC_POINT_mul(eckey->group, point, eckey->priv_key, NULL, NULL, ctx)) { ECerr(EC_F_EC_KEY_CHECK_KEY, ERR_R_EC_LIB); goto err; } if (EC_POINT_cmp(eckey->group, point, eckey->pub_key, ctx) != 0) { ECerr(EC_F_EC_KEY_CHECK_KEY, EC_R_INVALID_PRIVATE_KEY); goto err; } } ok = 1; err: if (ctx != NULL) BN_CTX_free(ctx); if (point != NULL) EC_POINT_free(point); return(ok); } int EC_KEY_set_public_key_affine_coordinates(EC_KEY *key, BIGNUM *x, BIGNUM *y) { BN_CTX *ctx = NULL; BIGNUM *tx, *ty; EC_POINT *point = NULL; int ok = 0, tmp_nid, is_char_two = 0; if (!key || !key->group || !x || !y) { ECerr(EC_F_EC_KEY_SET_PUBLIC_KEY_AFFINE_COORDINATES, ERR_R_PASSED_NULL_PARAMETER); return 0; } ctx = BN_CTX_new(); if (!ctx) goto err; point = EC_POINT_new(key->group); if (!point) goto err; tmp_nid = EC_METHOD_get_field_type(EC_GROUP_method_of(key->group)); if (tmp_nid == NID_X9_62_characteristic_two_field) is_char_two = 1; tx = BN_CTX_get(ctx); ty = BN_CTX_get(ctx); #ifndef OPENSSL_NO_EC2M if (is_char_two) { if (!EC_POINT_set_affine_coordinates_GF2m(key->group, point, x, y, ctx)) goto err; if (!EC_POINT_get_affine_coordinates_GF2m(key->group, point, tx, ty, ctx)) goto err; } else #endif { if (!EC_POINT_set_affine_coordinates_GFp(key->group, point, x, y, ctx)) goto err; if (!EC_POINT_get_affine_coordinates_GFp(key->group, point, tx, ty, ctx)) goto err; } /* Check if retrieved coordinates match originals: if not values * are out of range. */ if (BN_cmp(x, tx) || BN_cmp(y, ty)) { ECerr(EC_F_EC_KEY_SET_PUBLIC_KEY_AFFINE_COORDINATES, EC_R_COORDINATES_OUT_OF_RANGE); goto err; } if (!EC_KEY_set_public_key(key, point)) goto err; if (EC_KEY_check_key(key) == 0) goto err; ok = 1; err: if (ctx) BN_CTX_free(ctx); if (point) EC_POINT_free(point); return ok; } const EC_GROUP *EC_KEY_get0_group(const EC_KEY *key) { return key->group; } int EC_KEY_set_group(EC_KEY *key, const EC_GROUP *group) { if (key->group != NULL) EC_GROUP_free(key->group); key->group = EC_GROUP_dup(group); return (key->group == NULL) ? 0 : 1; } const BIGNUM *EC_KEY_get0_private_key(const EC_KEY *key) { return key->priv_key; } int EC_KEY_set_private_key(EC_KEY *key, const BIGNUM *priv_key) { if (key->priv_key) BN_clear_free(key->priv_key); key->priv_key = BN_dup(priv_key); return (key->priv_key == NULL) ? 0 : 1; } const EC_POINT *EC_KEY_get0_public_key(const EC_KEY *key) { return key->pub_key; } int EC_KEY_set_public_key(EC_KEY *key, const EC_POINT *pub_key) { if (key->pub_key != NULL) EC_POINT_free(key->pub_key); key->pub_key = EC_POINT_dup(pub_key, key->group); return (key->pub_key == NULL) ? 0 : 1; } unsigned int EC_KEY_get_enc_flags(const EC_KEY *key) { return key->enc_flag; } void EC_KEY_set_enc_flags(EC_KEY *key, unsigned int flags) { key->enc_flag = flags; } point_conversion_form_t EC_KEY_get_conv_form(const EC_KEY *key) { return key->conv_form; } void EC_KEY_set_conv_form(EC_KEY *key, point_conversion_form_t cform) { key->conv_form = cform; if (key->group != NULL) EC_GROUP_set_point_conversion_form(key->group, cform); } void *EC_KEY_get_key_method_data(EC_KEY *key, void *(*dup_func)(void *), void (*free_func)(void *), void (*clear_free_func)(void *)) { void *ret; CRYPTO_r_lock(CRYPTO_LOCK_EC); ret = EC_EX_DATA_get_data(key->method_data, dup_func, free_func, clear_free_func); CRYPTO_r_unlock(CRYPTO_LOCK_EC); return ret; } void *EC_KEY_insert_key_method_data(EC_KEY *key, void *data, void *(*dup_func)(void *), void (*free_func)(void *), void (*clear_free_func)(void *)) { EC_EXTRA_DATA *ex_data; CRYPTO_w_lock(CRYPTO_LOCK_EC); ex_data = EC_EX_DATA_get_data(key->method_data, dup_func, free_func, clear_free_func); if (ex_data == NULL) EC_EX_DATA_set_data(&key->method_data, data, dup_func, free_func, clear_free_func); CRYPTO_w_unlock(CRYPTO_LOCK_EC); return ex_data; } void EC_KEY_set_asn1_flag(EC_KEY *key, int flag) { if (key->group != NULL) EC_GROUP_set_asn1_flag(key->group, flag); } int EC_KEY_precompute_mult(EC_KEY *key, BN_CTX *ctx) { if (key->group == NULL) return 0; return EC_GROUP_precompute_mult(key->group, ctx); } int EC_KEY_get_flags(const EC_KEY *key) { return key->flags; } void EC_KEY_set_flags(EC_KEY *key, int flags) { key->flags |= flags; } void EC_KEY_clear_flags(EC_KEY *key, int flags) { key->flags &= ~flags; }
5,887
326
# To use Inference Engine backend, specify location of plugins: # export LD_LIBRARY_PATH=/opt/intel/deeplearning_deploymenttoolkit/deployment_tools/external/mklml_lnx/lib:$LD_LIBRARY_PATH import cv2 as cv import numpy as np import argparse #import imutils import time parser = argparse.ArgumentParser( description='This script is used to demonstrate OpenPose human pose estimation network ' 'from https://github.com/CMU-Perceptual-Computing-Lab/openpose project using OpenCV. ' 'The sample and model are simplified and could be used for a single person on the frame.') parser.add_argument('--input', help='Path to input image.') parser.add_argument('--proto', help='Path to .prototxt') parser.add_argument('--model', help='Path to .caffemodel') parser.add_argument('--dataset', help='Specify what kind of model was trained. ' 'It could be (COCO, MPI) depends on dataset.') parser.add_argument('--thr', default=0.1, type=float, help='Threshold value for pose parts heat map') parser.add_argument('--width', default=368, type=int, help='Resize input to specific width.') parser.add_argument('--height', default=368, type=int, help='Resize input to specific height.') args = parser.parse_args() if args.dataset == 'COCO': BODY_PARTS = { "Nose": 0, "Neck": 1, "RShoulder": 2, "RElbow": 3, "RWrist": 4, "LShoulder": 5, "LElbow": 6, "LWrist": 7, "RHip": 8, "RKnee": 9, "RAnkle": 10, "LHip": 11, "LKnee": 12, "LAnkle": 13, "REye": 14, "LEye": 15, "REar": 16, "LEar": 17, "Background": 18 } POSE_PAIRS = [ ["Neck", "RShoulder"], ["Neck", "LShoulder"], ["RShoulder", "RElbow"], ["RElbow", "RWrist"], ["LShoulder", "LElbow"], ["LElbow", "LWrist"], ["Neck", "RHip"], ["RHip", "RKnee"], ["RKnee", "RAnkle"], ["Neck", "LHip"], ["LHip", "LKnee"], ["LKnee", "LAnkle"], ["Neck", "Nose"], ["Nose", "REye"], ["REye", "REar"], ["Nose", "LEye"], ["LEye", "LEar"] ] elif args.dataset=='MPI': #assert(args.dataset == 'MPI') BODY_PARTS = { "Head": 0, "Neck": 1, "RShoulder": 2, "RElbow": 3, "RWrist": 4, "LShoulder": 5, "LElbow": 6, "LWrist": 7, "RHip": 8, "RKnee": 9, "RAnkle": 10, "LHip": 11, "LKnee": 12, "LAnkle": 13, "Chest": 14, "Background": 15 } POSE_PAIRS = [ ["Head", "Neck"], ["Neck", "RShoulder"], ["RShoulder", "RElbow"], ["RElbow", "RWrist"], ["Neck", "LShoulder"], ["LShoulder", "LElbow"], ["LElbow", "LWrist"], ["Neck", "Chest"], ["Chest", "RHip"], ["RHip", "RKnee"], ["RKnee", "RAnkle"], ["Chest", "LHip"], ["LHip", "LKnee"], ["LKnee", "LAnkle"] ] else: BODY_PARTS ={"Nose":0,"Neck":1,"RShoulder":2,"RElbow":3,"RWrist":4,"LShoulder":5,"LElbow":6,"LWrist":7,"MidHip":8,"RHip":9,"RKnee":10,"RAnkle":11,"LHip":12,"LKnee":13,"LAnkle":14,"REye":15,"LEye":16,"REar":17,"LEar":18,"LBigToe":19,"LSmallToe":20,"LHeel":21,"RBigToe":22,"RSmallToe":23,"RHeel":24,"Background":25} POSE_PAIRS =[ ["Neck","MidHip"], ["Neck","RShoulder"], ["Neck","LShoulder"], ["RShoulder","RElbow"], ["RElbow","RWrist"], ["LShoulder","LElbow"], ["LElbow","LWrist"], ["MidHip","RHip"], ["RHip","RKnee"], ["RKnee","RAnkle"], ["MidHip","LHip"], ["LHip","LKnee"], ["LKnee","LAnkle"], ["Neck","Nose"], ["Nose","REye"], ["REye","REar"], ["Nose","LEye"], ["LEye","LEar"], ["RShoulder","REar"], ["LShoulder","LEar"], ["LAnkle","LBigToe"],["LBigToe","LSmallToe"],["LAnkle","LHeel"], ["RAnkle","RBigToe"],["RBigToe","RSmallToe"],["RAnkle","RHeel"] ] inWidth = args.width inHeight = args.height net = cv.dnn.readNetFromCaffe(args.proto, args.model) frame = cv.imread(args.input) frameWidth = frame.shape[1] frameHeight = frame.shape[0] inp = cv.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight), (0, 0, 0), swapRB=False, crop=False) net.setInput(inp) start_t = time.time() out = net.forward() print("time is ",time.time()-start_t) # print(inp.shape) kwinName="Pose Estimation Demo: Cv-Tricks.com" cv.namedWindow(kwinName, cv.WINDOW_AUTOSIZE) #assert(len(BODY_PARTS) == out.shape[1]) points = [] for i in range(len(BODY_PARTS)): # Slice heatmap of corresponging body's part. heatMap = out[0, i, :, :] # Originally, we try to find all the local maximums. To simplify a sample # we just find a global one. However only a single pose at the same time # could be detected this way. _, conf, _, point = cv.minMaxLoc(heatMap) x = (frameWidth * point[0]) / out.shape[3] y = (frameHeight * point[1]) / out.shape[2] # Add a point if it's confidence is higher than threshold. points.append((int(x), int(y)) if conf > args.thr else None) for pair in POSE_PAIRS: partFrom = pair[0] partTo = pair[1] assert(partFrom in BODY_PARTS) assert(partTo in BODY_PARTS) idFrom = BODY_PARTS[partFrom] idTo = BODY_PARTS[partTo] if points[idFrom] and points[idTo]: cv.line(frame, points[idFrom], points[idTo], (255, 74, 0), 3) cv.ellipse(frame, points[idFrom], (4, 4), 0, 0, 360, (255, 255, 255), cv.FILLED) cv.ellipse(frame, points[idTo], (4, 4), 0, 0, 360, (255, 255, 255), cv.FILLED) cv.putText(frame, str(idFrom), points[idFrom], cv.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255),2,cv.LINE_AA) cv.putText(frame, str(idTo), points[idTo], cv.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255),2,cv.LINE_AA) t, _ = net.getPerfProfile() freq = cv.getTickFrequency() / 1000 cv.putText(frame, '%.2fms' % (t / freq), (10, 20), cv.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255),2,cv.LINE_AA) cv.imshow(kwinName, frame) cv.imwrite('result_'+args.input,frame)
2,708
427
package modern.challenge; public class Pizza { }
19
375
/* * Copyright 2019 Nokia Solutions and Networks * Licensed under the Apache License, Version 2.0, * see license.txt file for details. */ package org.rf.ide.core.testdata.text.read.postfixes; import static java.util.stream.Collectors.toList; import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import java.io.File; import java.net.URISyntaxException; import java.util.List; import org.junit.jupiter.api.Test; import org.rf.ide.core.environment.RobotVersion; import org.rf.ide.core.testdata.RobotParser; import org.rf.ide.core.testdata.model.AModelElement; import org.rf.ide.core.testdata.model.ModelType; import org.rf.ide.core.testdata.model.RobotFile; import org.rf.ide.core.testdata.model.RobotFileOutput; import org.rf.ide.core.testdata.model.RobotFileOutput.Status; import org.rf.ide.core.testdata.model.RobotProjectHolder; import org.rf.ide.core.testdata.model.table.keywords.UserKeyword; import org.rf.ide.core.testdata.model.table.tasks.Task; import org.rf.ide.core.testdata.model.table.testcases.TestCase; import org.rf.ide.core.testdata.text.read.recognizer.RobotToken; public class EmptyLinesInExecutableTablesFixerOutputCheckTest { @Test public void thereIsOnlyDocumentationEmptyRowAndExecutionRow_whenEmptyLineIsInsideDocumentationOfTestCase() throws URISyntaxException { final RobotFile modelFile = getModelFile("DocumentationWithEmptyLinesInTestCase.robot"); assertThat(modelFile.getSettingTable().isPresent()).isFalse(); assertThat(modelFile.getVariableTable().isPresent()).isFalse(); assertThat(modelFile.getTestCaseTable().isPresent()).isTrue(); assertThat(modelFile.getTasksTable().isPresent()).isFalse(); assertThat(modelFile.getKeywordTable().isPresent()).isFalse(); final List<TestCase> cases = modelFile.getTestCaseTable().getTestCases(); assertThat(cases).hasSize(1); final TestCase testCase = cases.get(0); final List<AModelElement<TestCase>> elements = testCase.getElements(); assertThat(elements).hasSize(4); assertThat(elements.get(0).getModelType()).isEqualTo(ModelType.TEST_CASE_DOCUMENTATION); assertThat(cellsOf(elements.get(0))).containsExactly("[Documentation]", "...", "first line", "...", "second line"); assertThat(elements.get(1).getModelType()).isEqualTo(ModelType.TEST_CASE_EXECUTABLE_ROW); assertThat(cellsOf(elements.get(1))).containsExactly("Log", "1"); assertThat(elements.get(2).getModelType()).isEqualTo(ModelType.EMPTY_LINE); assertThat(cellsOf(elements.get(2))).containsExactly(""); assertThat(elements.get(3).getModelType()).isEqualTo(ModelType.TEST_CASE_EXECUTABLE_ROW); assertThat(cellsOf(elements.get(3))).containsExactly("Log", "2"); } @Test public void thereIsOnlyDocumentationEmptyRowAndExecutionRow_whenEmptyLineIsInsideDocumentationOfTask() throws URISyntaxException { final RobotFile modelFile = getModelFile("DocumentationWithEmptyLinesInTask.robot"); assertThat(modelFile.getSettingTable().isPresent()).isFalse(); assertThat(modelFile.getVariableTable().isPresent()).isFalse(); assertThat(modelFile.getTestCaseTable().isPresent()).isFalse(); assertThat(modelFile.getTasksTable().isPresent()).isTrue(); assertThat(modelFile.getKeywordTable().isPresent()).isFalse(); final List<Task> tasks = modelFile.getTasksTable().getTasks(); assertThat(tasks).hasSize(1); final Task task = tasks.get(0); final List<AModelElement<Task>> elements = task.getElements(); assertThat(elements).hasSize(4); assertThat(elements.get(0).getModelType()).isEqualTo(ModelType.TASK_DOCUMENTATION); assertThat(cellsOf(elements.get(0))).containsExactly("[Documentation]", "...", "first line", "...", "second line"); assertThat(elements.get(1).getModelType()).isEqualTo(ModelType.TASK_EXECUTABLE_ROW); assertThat(cellsOf(elements.get(1))).containsExactly("Log", "1"); assertThat(elements.get(2).getModelType()).isEqualTo(ModelType.EMPTY_LINE); assertThat(cellsOf(elements.get(2))).containsExactly(""); assertThat(elements.get(3).getModelType()).isEqualTo(ModelType.TASK_EXECUTABLE_ROW); assertThat(cellsOf(elements.get(3))).containsExactly("Log", "2"); } @Test public void thereIsOnlyDocumentationEmptyRowAndExecutionRow_whenEmptyLineIsInsideDocumentationOfUserKeyword() throws URISyntaxException { final RobotFile modelFile = getModelFile("DocumentationWithEmptyLinesInUserKeyword.robot"); assertThat(modelFile.getSettingTable().isPresent()).isFalse(); assertThat(modelFile.getVariableTable().isPresent()).isFalse(); assertThat(modelFile.getTestCaseTable().isPresent()).isFalse(); assertThat(modelFile.getTasksTable().isPresent()).isFalse(); assertThat(modelFile.getKeywordTable().isPresent()).isTrue(); final List<UserKeyword> keywords = modelFile.getKeywordTable().getKeywords(); assertThat(keywords).hasSize(1); final UserKeyword keyword = keywords.get(0); final List<AModelElement<UserKeyword>> elements = keyword.getElements(); assertThat(elements).hasSize(4); assertThat(elements.get(0).getModelType()).isEqualTo(ModelType.USER_KEYWORD_DOCUMENTATION); assertThat(cellsOf(elements.get(0))).containsExactly("[Documentation]", "...", "first line", "...", "second line"); assertThat(elements.get(1).getModelType()).isEqualTo(ModelType.USER_KEYWORD_EXECUTABLE_ROW); assertThat(cellsOf(elements.get(1))).containsExactly("Log", "1"); assertThat(elements.get(2).getModelType()).isEqualTo(ModelType.EMPTY_LINE); assertThat(cellsOf(elements.get(2))).containsExactly(""); assertThat(elements.get(3).getModelType()).isEqualTo(ModelType.USER_KEYWORD_EXECUTABLE_ROW); assertThat(cellsOf(elements.get(3))).containsExactly("Log", "2"); } private List<String> cellsOf(final AModelElement<?> element) { return element.getElementTokens().stream().map(RobotToken::getText).collect(toList()); } private RobotFile getModelFile(final String fileName) throws URISyntaxException { final RobotProjectHolder projectHolder = mock(RobotProjectHolder.class); final File file = new File(RobotParser.class.getResource("parser/bugs/" + fileName).toURI()); when(projectHolder.shouldBeParsed(file)).thenReturn(true); final RobotParser parser = new RobotParser(projectHolder, new RobotVersion(3, 1)); final List<RobotFileOutput> parsed = parser.parse(file); // verify assertThat(parsed).hasSize(1); final RobotFileOutput robotFileOutput = parsed.get(0); assertThat(robotFileOutput.getStatus()).isEqualTo(Status.PASSED); return robotFileOutput.getFileModel(); } }
2,698
543
<reponame>PSFC-HEDP/Orion """Orion Functional API. This module provides a collection of simple python functions that allow using Orion performing as little steps as possible, hidding away all the complexity related to loading data, creating class instances or serializing and de-serializing previously fitted pipelines. Currently implemented functions: - `fit_pipeline`: Learn an Orion pipeline and save it for later usage. - `detect_anomalies`: Analyze a signal to detect anomalies. Optionally learn a pipeline on the way. - `evaluate_pipeline`: Evaluate the performance of a pipeline against a list of known anomalies. """ import json import os from pickle import UnpicklingError from typing import List, Union import pandas as pd from mlblocks import MLPipeline from orion.core import Orion def _load_data(data): if isinstance(data, pd.DataFrame): return data elif isinstance(data, str): return pd.read_csv(data) def _load_dict(path_or_dict): if isinstance(path_or_dict, dict): return path_or_dict elif isinstance(path_or_dict, str) and os.path.exists(path_or_dict): with open(path_or_dict) as json_file: return json.load(json_file) def _load_orion(pipeline, hyperparameters=None): if pipeline is None: return Orion() elif isinstance(pipeline, Orion): return pipeline else: hyperparameters = _load_dict(hyperparameters) try: return Orion(pipeline, hyperparameters) except ValueError: try: return Orion.load(pipeline) except (FileNotFoundError, UnpicklingError): raise ValueError('Invalid pipeline: {}'.format(pipeline)) def fit_pipeline(data: Union[str, pd.DataFrame], pipeline: Union[str, MLPipeline, dict] = None, hyperparameters: Union[str, pd.DataFrame] = None, save_path: str = None) -> Orion: """Fit an Orion pipeline to the data. The pipeine can be passed as: * An ``str`` with a path to a JSON file. * An ``str`` with the name of a registered Orion pipeline. * An ``MLPipeline`` instance. * A ``dict`` with an ``MLPipeline`` specification. If no pipeline is passed, the default Orion pipeline is used. Args: data (str or DataFrame): Data to which the pipeline should be fitted. It can be passed as a path to a CSV file or as a DataFrame. pipeline (str, Pipeline or dict): Pipeline to use. It can be passed as: * An ``str`` with a path to a JSON file. * An ``str`` with the name of a registered pipeline. * An ``MLPipeline`` instance. * A ``dict`` with an ``MLPipeline`` specification. hyperparameters (str or dict): Hyperparameters to set to the pipeline. It can be passed as a hyperparameters ``dict`` in the ``mlblocks`` format or as a path to the corresponding JSON file. Defaults to ``None``. save_path (str): Path to the file where the fitted Orion instance will be stored using ``pickle``. If not given, the Orion instance is returned. Defaults to ``None``. Returns: Orion: If no save_path is provided, the fitted Orion instance is returned. """ data = _load_data(data) hyperparameters = _load_dict(hyperparameters) if pipeline is None: pipeline = Orion.DEFAULT_PIPELINE orion = Orion(pipeline, hyperparameters) orion.fit(data) if save_path: orion.save(save_path) else: return orion def detect_anomalies(data: Union[str, pd.DataFrame] = None, pipeline: Union[Orion, str, MLPipeline, dict] = None, hyperparameters: Union[str, pd.DataFrame] = None, train_data: Union[str, pd.DataFrame] = None) -> pd.DataFrame: """Detect anomalies on timeseries data. The anomalies are detected using an Orion pipeline which can be passed as: * An ``Orion`` instance. * An ``str`` with the path to an Orion pickle file. * An ``str`` with a path to a JSON file. * An ``str`` with the name of a registered Orion pipeline. * An ``MLPipeline`` instance. * A ``dict`` with an ``MLPipeline`` specification. If no pipeline is passed, the default Orion pipeline is used. Optionally, separated learning data can be passed to fit the pipeline to it before using it to detect anomalies. Args: data (str or DataFrame): Data to analyze searching for anomalies. It can be passed as a path to a CSV file or as a DataFrame. pipeline (str or Pipeline or dict): Pipeline to use. It can be passed as: * An ``Orion`` instance. * An ``str`` with the path to an Orion pickle file. * An ``str`` with a path to a JSON file. * An ``str`` with the name of a registered pipeline. * An ``MLPipeline`` instance. * A ``dict`` with an ``MLPipeline`` specification. hyperparameters (str or dict): Hyperparameters to set to the pipeline. It can be passed as a hyperparameters ``dict`` in the ``mlblocks`` format or as a path to the corresponding JSON file. Ignored if being passed a previously serialized ``Orion`` instance. Defaults to ``None``. train_data (str or DataFrame): Data to which the pipeline should be fitted. It can be passed as a path to a CSV file or as a DataFrame. If not given, the pipeline is used without fitting it first. Returns: DataFrame: ``pandas.DataFrame`` containing the detected anomalies. """ data = _load_data(data) orion = _load_orion(pipeline, hyperparameters) if train_data is not None: train_data = _load_data(train_data) orion.fit(train_data) return orion.detect(data) def evaluate_pipeline(data: Union[str, pd.DataFrame], truth: Union[str, pd.DataFrame], pipeline: Union[str, dict, MLPipeline], hyperparameters: Union[str, pd.DataFrame] = None, metrics: List[Union[callable, str]] = None, train_data: Union[str, pd.DataFrame] = None) -> pd.DataFrame: """Evaluate the performance of a pipeline. The pipeline is evaluated by executing it on a signal for which anomalies are known and then applying one or more metrics to it to compute scores. The pipeline can be passed as: * An ``str`` with a path to a JSON file. * An ``str`` with the path to a pickle file. * An ``str`` with the name of a registered Orion pipeline. * An ``MLPipeline`` instance. * A ``dict`` with an ``MLPipeline`` specification. If the pipeline is not fitted, it is possible to pass separated learning data can be passed to fit the pipeline to it before using it to detect anomalies. Args: data (str or DataFrame): Data to analyze searching for anomalies. It can be passed as a path to a CSV file or as a DataFrame. truth (str or DataFrame): Table of known anomalies to use as the ground truth for scoring. It can be passed as a path to a CSV file or as a DataFrame. pipeline (str or Pipeline or dict): Pipeline to use. It can be passed as: * An ``str`` with a path to a JSON file. * An ``str`` with the name of a registered pipeline. * An ``str`` with the path to a pickle file. * An ``MLPipeline`` instance. * A ``dict`` with an ``MLPipeline`` specification. hyperparameters (str or dict): Hyperparameters to set to the pipeline. It can be passed as a hyperparameters ``dict`` in the ``mlblocks`` format or as a path to the corresponding JSON file. Defaults to ``None``. metrics (list[str]): List of metrics to use. If not passed, all the Orion metrics are applied. train_data (str or DataFrame): Data to which the pipeline should be fitted. It can be passed as a path to a CSV file or as a DataFrame. If not given, the pipeline is used without fitting it first. """ data = _load_data(data) truth = _load_data(truth) fit = train_data is not None if fit: train_data = _load_data(train_data) orion = _load_orion(pipeline, hyperparameters) return orion.detect(data, truth, fit, train_data, metrics)
3,572
340
import base64 import json import platform import sys import requests from ..exceptions import Auth0Error, RateLimitError from time import sleep from random import randint UNKNOWN_ERROR = 'a0.sdk.internal.unknown' class RestClientOptions(object): """Configuration object for RestClient. Used for configuring additional RestClient options, such as rate-limit retries. Args: telemetry (bool, optional): Enable or disable Telemetry (defaults to True) timeout (float or tuple, optional): Change the requests connect and read timeout. Pass a tuple to specify both values separately or a float to set both to it. (defaults to 5.0 for both) retries (integer): In the event an API request returns a 429 response header (indicating rate-limit has been hit), the RestClient will retry the request this many times using an exponential backoff strategy, before raising a RateLimitError exception. 10 retries max. (defaults to 3) """ def __init__(self, telemetry=None, timeout=None, retries=None): self.telemetry = True self.timeout = 5.0 self.retries = 3 if telemetry is not None: self.telemetry = telemetry if timeout is not None: self.timeout = timeout if retries is not None: self.retries = retries class RestClient(object): """Provides simple methods for handling all RESTful api endpoints. Args: telemetry (bool, optional): Enable or disable Telemetry (defaults to True) timeout (float or tuple, optional): Change the requests connect and read timeout. Pass a tuple to specify both values separately or a float to set both to it. (defaults to 5.0 for both) options (RestClientOptions): Pass an instance of RestClientOptions to configure additional RestClient options, such as rate-limit retries. Overrides matching options passed to the constructor. (defaults to 3) """ def __init__(self, jwt, telemetry=True, timeout=5.0, options=None): if options is None: options = RestClientOptions(telemetry=telemetry, timeout=timeout) self.options = options self.jwt = jwt self._metrics = {'retries': 0, 'backoff': []} self._skip_sleep = False self.base_headers = { 'Authorization': 'Bearer {}'.format(self.jwt), 'Content-Type': 'application/json', } if options.telemetry: py_version = platform.python_version() version = sys.modules['auth0'].__version__ auth0_client = json.dumps({ 'name': 'auth0-python', 'version': version, 'env': { 'python': py_version, } }).encode('utf-8') self.base_headers.update({ 'User-Agent': 'Python/{}'.format(py_version), 'Auth0-Client': base64.b64encode(auth0_client), }) # For backwards compatibility reasons only # TODO: Deprecate in the next major so we can prune these arguments. Guidance should be to use RestClient.options.* self.telemetry = options.telemetry self.timeout = options.timeout # Returns a hard cap for the maximum number of retries allowed (10) def MAX_REQUEST_RETRIES(self): return 10 # Returns the maximum amount of jitter to introduce in milliseconds (100ms) def MAX_REQUEST_RETRY_JITTER(self): return 100 # Returns the maximum delay window allowed (1000ms) def MAX_REQUEST_RETRY_DELAY(self): return 1000 # Returns the minimum delay window allowed (100ms) def MIN_REQUEST_RETRY_DELAY(self): return 100 def get(self, url, params=None): headers = self.base_headers.copy() # Track the API request attempt number attempt = 0 # Reset the metrics tracker self._metrics = {'retries': 0, 'backoff': []} # Cap the maximum number of retries to 10 or fewer. Floor the retries at 0. retries = min(self.MAX_REQUEST_RETRIES(), max(0, self.options.retries)) while True: # Increment attempt number attempt += 1 # Issue the request response = requests.get(url, params=params, headers=headers, timeout=self.options.timeout); # If the response did not have a 429 header, or the retries were configured at 0, or the attempt number is equal to or greater than the configured retries, break if response.status_code != 429 or retries <= 0 or attempt > retries: break # Retry the request. Apply a exponential backoff for subsequent attempts, using this formula: # max(MIN_REQUEST_RETRY_DELAY, min(MAX_REQUEST_RETRY_DELAY, (100ms * (2 ** attempt - 1)) + random_between(1, MAX_REQUEST_RETRY_JITTER))) # Increases base delay by (100ms * (2 ** attempt - 1)) wait = 100 * 2 ** (attempt - 1) # Introduces jitter to the base delay; increases delay between 1ms to MAX_REQUEST_RETRY_JITTER (100ms) wait += randint(1, self.MAX_REQUEST_RETRY_JITTER()) # Is never more than MAX_REQUEST_RETRY_DELAY (1s) wait = min(self.MAX_REQUEST_RETRY_DELAY(), wait) # Is never less than MIN_REQUEST_RETRY_DELAY (100ms) wait = max(self.MIN_REQUEST_RETRY_DELAY(), wait) self._metrics['retries'] = attempt self._metrics['backoff'].append(wait) # Skip calling sleep() when running unit tests if self._skip_sleep is False: # sleep() functions in seconds, so convert the milliseconds formula above accordingly sleep(wait / 1000) # Return the final Response return self._process_response(response) def post(self, url, data=None): headers = self.base_headers.copy() response = requests.post(url, json=data, headers=headers, timeout=self.options.timeout) return self._process_response(response) def file_post(self, url, data=None, files=None): headers = self.base_headers.copy() headers.pop('Content-Type', None) response = requests.post(url, data=data, files=files, headers=headers, timeout=self.options.timeout) return self._process_response(response) def patch(self, url, data=None): headers = self.base_headers.copy() response = requests.patch(url, json=data, headers=headers, timeout=self.options.timeout) return self._process_response(response) def put(self, url, data=None): headers = self.base_headers.copy() response = requests.put(url, json=data, headers=headers, timeout=self.options.timeout) return self._process_response(response) def delete(self, url, params=None, data=None): headers = self.base_headers.copy() response = requests.delete(url, headers=headers, params=params or {}, json=data, timeout=self.options.timeout) return self._process_response(response) def _process_response(self, response): return self._parse(response).content() def _parse(self, response): if not response.text: return EmptyResponse(response.status_code) try: return JsonResponse(response) except ValueError: return PlainResponse(response) class Response(object): def __init__(self, status_code, content, headers): self._status_code = status_code self._content = content self._headers = headers def content(self): if self._is_error(): if self._status_code == 429: reset_at = int(self._headers.get('x-ratelimit-reset', '-1')) raise RateLimitError(error_code=self._error_code(), message=self._error_message(), reset_at=reset_at) raise Auth0Error(status_code=self._status_code, error_code=self._error_code(), message=self._error_message()) else: return self._content def _is_error(self): return self._status_code is None or self._status_code >= 400 # Adding these methods to force implementation in subclasses because they are references in this parent class def _error_code(self): raise NotImplementedError def _error_message(self): raise NotImplementedError class JsonResponse(Response): def __init__(self, response): content = json.loads(response.text) super(JsonResponse, self).__init__(response.status_code, content, response.headers) def _error_code(self): if 'errorCode' in self._content: return self._content.get('errorCode') elif 'error' in self._content: return self._content.get('error') else: return UNKNOWN_ERROR def _error_message(self): message = self._content.get('message', '') if message is not None and message != '': return message return self._content.get('error', '') class PlainResponse(Response): def __init__(self, response): super(PlainResponse, self).__init__(response.status_code, response.text, response.headers) def _error_code(self): return UNKNOWN_ERROR def _error_message(self): return self._content class EmptyResponse(Response): def __init__(self, status_code): super(EmptyResponse, self).__init__(status_code, '', {}) def _error_code(self): return UNKNOWN_ERROR def _error_message(self): return ''
4,068
41,267
<reponame>FingerCrunch/scrapy<filename>scrapy/http/request/__init__.py """ This module implements the Request class which is used to represent HTTP requests in Scrapy. See documentation in docs/topics/request-response.rst """ import inspect from typing import Callable, List, Optional, Tuple, Type, TypeVar, Union from w3lib.url import safe_url_string import scrapy from scrapy.http.common import obsolete_setter from scrapy.http.headers import Headers from scrapy.utils.curl import curl_to_request_kwargs from scrapy.utils.python import to_bytes from scrapy.utils.trackref import object_ref from scrapy.utils.url import escape_ajax RequestTypeVar = TypeVar("RequestTypeVar", bound="Request") class Request(object_ref): """Represents an HTTP request, which is usually generated in a Spider and executed by the Downloader, thus generating a :class:`Response`. """ attributes: Tuple[str, ...] = ( "url", "callback", "method", "headers", "body", "cookies", "meta", "encoding", "priority", "dont_filter", "errback", "flags", "cb_kwargs", ) """A tuple of :class:`str` objects containing the name of all public attributes of the class that are also keyword parameters of the ``__init__`` method. Currently used by :meth:`Request.replace`, :meth:`Request.to_dict` and :func:`~scrapy.utils.request.request_from_dict`. """ def __init__( self, url: str, callback: Optional[Callable] = None, method: str = "GET", headers: Optional[dict] = None, body: Optional[Union[bytes, str]] = None, cookies: Optional[Union[dict, List[dict]]] = None, meta: Optional[dict] = None, encoding: str = "utf-8", priority: int = 0, dont_filter: bool = False, errback: Optional[Callable] = None, flags: Optional[List[str]] = None, cb_kwargs: Optional[dict] = None, ) -> None: self._encoding = encoding # this one has to be set first self.method = str(method).upper() self._set_url(url) self._set_body(body) if not isinstance(priority, int): raise TypeError(f"Request priority not an integer: {priority!r}") self.priority = priority if callback is not None and not callable(callback): raise TypeError(f'callback must be a callable, got {type(callback).__name__}') if errback is not None and not callable(errback): raise TypeError(f'errback must be a callable, got {type(errback).__name__}') self.callback = callback self.errback = errback self.cookies = cookies or {} self.headers = Headers(headers or {}, encoding=encoding) self.dont_filter = dont_filter self._meta = dict(meta) if meta else None self._cb_kwargs = dict(cb_kwargs) if cb_kwargs else None self.flags = [] if flags is None else list(flags) @property def cb_kwargs(self) -> dict: if self._cb_kwargs is None: self._cb_kwargs = {} return self._cb_kwargs @property def meta(self) -> dict: if self._meta is None: self._meta = {} return self._meta def _get_url(self) -> str: return self._url def _set_url(self, url: str) -> None: if not isinstance(url, str): raise TypeError(f"Request url must be str, got {type(url).__name__}") s = safe_url_string(url, self.encoding) self._url = escape_ajax(s) if ( '://' not in self._url and not self._url.startswith('about:') and not self._url.startswith('data:') ): raise ValueError(f'Missing scheme in request url: {self._url}') url = property(_get_url, obsolete_setter(_set_url, 'url')) def _get_body(self) -> bytes: return self._body def _set_body(self, body: Optional[Union[str, bytes]]) -> None: self._body = b"" if body is None else to_bytes(body, self.encoding) body = property(_get_body, obsolete_setter(_set_body, 'body')) @property def encoding(self) -> str: return self._encoding def __str__(self) -> str: return f"<{self.method} {self.url}>" __repr__ = __str__ def copy(self) -> "Request": return self.replace() def replace(self, *args, **kwargs) -> "Request": """Create a new Request with the same attributes except for those given new values""" for x in self.attributes: kwargs.setdefault(x, getattr(self, x)) cls = kwargs.pop('cls', self.__class__) return cls(*args, **kwargs) @classmethod def from_curl( cls: Type[RequestTypeVar], curl_command: str, ignore_unknown_options: bool = True, **kwargs ) -> RequestTypeVar: """Create a Request object from a string containing a `cURL <https://curl.haxx.se/>`_ command. It populates the HTTP method, the URL, the headers, the cookies and the body. It accepts the same arguments as the :class:`Request` class, taking preference and overriding the values of the same arguments contained in the cURL command. Unrecognized options are ignored by default. To raise an error when finding unknown options call this method by passing ``ignore_unknown_options=False``. .. caution:: Using :meth:`from_curl` from :class:`~scrapy.http.Request` subclasses, such as :class:`~scrapy.http.JSONRequest`, or :class:`~scrapy.http.XmlRpcRequest`, as well as having :ref:`downloader middlewares <topics-downloader-middleware>` and :ref:`spider middlewares <topics-spider-middleware>` enabled, such as :class:`~scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware`, :class:`~scrapy.downloadermiddlewares.useragent.UserAgentMiddleware`, or :class:`~scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware`, may modify the :class:`~scrapy.http.Request` object. To translate a cURL command into a Scrapy request, you may use `curl2scrapy <https://michael-shub.github.io/curl2scrapy/>`_. """ request_kwargs = curl_to_request_kwargs(curl_command, ignore_unknown_options) request_kwargs.update(kwargs) return cls(**request_kwargs) def to_dict(self, *, spider: Optional["scrapy.Spider"] = None) -> dict: """Return a dictionary containing the Request's data. Use :func:`~scrapy.utils.request.request_from_dict` to convert back into a :class:`~scrapy.Request` object. If a spider is given, this method will try to find out the name of the spider methods used as callback and errback and include them in the output dict, raising an exception if they cannot be found. """ d = { "url": self.url, # urls are safe (safe_string_url) "callback": _find_method(spider, self.callback) if callable(self.callback) else self.callback, "errback": _find_method(spider, self.errback) if callable(self.errback) else self.errback, "headers": dict(self.headers), } for attr in self.attributes: d.setdefault(attr, getattr(self, attr)) if type(self) is not Request: d["_class"] = self.__module__ + '.' + self.__class__.__name__ return d def _find_method(obj, func): """Helper function for Request.to_dict""" # Only instance methods contain ``__func__`` if obj and hasattr(func, '__func__'): members = inspect.getmembers(obj, predicate=inspect.ismethod) for name, obj_func in members: # We need to use __func__ to access the original function object because instance # method objects are generated each time attribute is retrieved from instance. # # Reference: The standard type hierarchy # https://docs.python.org/3/reference/datamodel.html if obj_func.__func__ is func.__func__: return name raise ValueError(f"Function {func} is not an instance method in: {obj}")
3,428
491
# -*- coding: utf-8 -*- import thriftpy2 from thriftpy2.protocol import TCyBinaryProtocolFactory from thriftpy2.transport import TCyBufferedTransportFactory from thriftpy2.rpc import make_server calc_thrift = thriftpy2.load("calc.thrift", module_name="calc_thrift") class Dispatcher(object): def add(self, a, b): print("add -> %s + %s" % (a, b)) return a + b def sub(self, a, b): print("sub -> %s - %s" % (a, b)) return a - b def mult(self, a, b): print("mult -> %s * %s" % (a, b)) return a * b def div(self, a, b): print("div -> %s / %s" % (a, b)) return a // b def main(): server = make_server(calc_thrift.Calculator, Dispatcher(), '127.0.0.1', 6000, proto_factory=TCyBinaryProtocolFactory(), trans_factory=TCyBufferedTransportFactory()) print("serving...") server.serve() if __name__ == '__main__': main()
482
1,350
<filename>sdk/resourcemanager/azure-resourcemanager-keyvault/src/main/java/com/azure/resourcemanager/keyvault/fluent/models/package-info.java<gh_stars>1000+ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. // Code generated by Microsoft (R) AutoRest Code Generator. /** * Package containing the inner data models for KeyVaultManagementClient. The Azure management API provides a RESTful * set of web services that interact with Azure Key Vault. */ package com.azure.resourcemanager.keyvault.fluent.models;
150
796
<filename>backdoors/shell/__pupy/pupy/packages/all/pupyutils/search.py #!/usr/bin/env python # -*- coding: UTF8 -*- import os import os.path import re def search_file(path, search_strings): buf=b"" line_nb=0 try: with open(path, 'rb') as f: for line in f: line=line.lower() for s in search_strings: start=0 while True: i=line.find(s.lower(), start) if i==-1: break start=i+1 yield (line_nb, line[i-50:i+50].strip()) line_nb+=1 except Exception: pass def search_path(path, search_strings, files_extensions=None, max_size=None): """ search recursively for a string in all files in the path """ if not files_extensions: files_extensions=None if files_extensions is not None: files_extensions=tuple(files_extensions) for root, dirs, files in os.walk(path): for f in files: if files_extensions is None or f.lower().endswith(files_extensions): if max_size is None or os.path.getsize(os.path.join(root,f))<max_size: for res in search_file(os.path.join(root,f),search_strings): yield (os.path.join(root,f), res[0], res[1]) if __name__=="__main__": import sys search_path(sys.argv[1],[sys.argv[2]])
518
675
#include "pugixml.hpp" namespace pugi { struct xml_string_writer : pugi::xml_writer { std::string m_result; virtual void write(const void* data, size_t size) { m_result.append(static_cast<const char*>(data), size); } }; // save doc to string inline std::string get_doc_string(xml_document& doc) { xml_string_writer writer; doc.save(writer); return writer.m_result; } }
160
345
<filename>test/programytest/storage/stores/nosql/mongo/dao/test_property.py import unittest from programy.storage.stores.nosql.mongo.dao.property import DefaultVariable from programy.storage.stores.nosql.mongo.dao.property import Property from programy.storage.stores.nosql.mongo.dao.property import Regex class PropertyTests(unittest.TestCase): def test_init_no_id(self): property = Property(name='name', value='value') self.assertIsNotNone(property) self.assertIsNone(property.id) self.assertEqual('name', property.name) self.assertEqual('value', property.value) self.assertEqual({'value': 'value', 'name': 'name'}, property.to_document()) def test_init_with_id(self): property = Property(name='name', value='value') property.id = '666' self.assertIsNotNone(property) self.assertIsNotNone(property.id) self.assertEqual('666', property.id) self.assertEqual('name', property.name) self.assertEqual('value', property.value) self.assertEqual({'_id': '666', 'value': 'value', 'name': 'name'}, property.to_document()) def test_from_document_no_id(self): property1 = Property.from_document({'value': 'value', 'name': 'name'}) self.assertIsNotNone(property1) self.assertIsNone(property1.id) self.assertEqual('name', property1.name) self.assertEqual('value', property1.value) def test_from_document_with_id(self): property2 = Property.from_document({'_id': '666', 'value': 'value', 'name': 'name'}) self.assertIsNotNone(property2) self.assertIsNotNone(property2.id) self.assertEqual('666', property2.id) self.assertEqual('666', property2.id) self.assertEqual('name', property2.name) self.assertEqual('value', property2.value) def test_repr_no_id(self): property1 = Property.from_document({'value': 'value', 'name': 'name'}) self.assertEquals("<Property(id='n/a', name='name', value='value')>", str(property1)) def test_repr_with_id(self): property2 = Property.from_document({'_id': '666', 'value': 'value', 'name': 'name'}) self.assertEquals("<Property(id='666', name='name', value='value')>", str(property2)) class DefaultVariableTests(unittest.TestCase): def test_init_no_id(self): property = DefaultVariable(name='name', value='value') self.assertIsNotNone(property) self.assertIsNone(property.id) self.assertEqual('name', property.name) self.assertEqual('value', property.value) self.assertEqual({'value': 'value', 'name': 'name'}, property.to_document()) def test_init_with_id(self): property = DefaultVariable(name='name', value='value') property.id = '666' self.assertIsNotNone(property) self.assertIsNotNone(property.id) self.assertEqual('666', property.id) self.assertEqual('name', property.name) self.assertEqual('value', property.value) self.assertEqual({'_id': '666', 'value': 'value', 'name': 'name'}, property.to_document()) def test_from_document_no_id(self): property1 = DefaultVariable.from_document({'value': 'value', 'name': 'name'}) self.assertIsNotNone(property1) self.assertIsNone(property1.id) self.assertEqual('name', property1.name) self.assertEqual('value', property1.value) def test_from_document_with_id(self): property2 = DefaultVariable.from_document({'_id': '666', 'value': 'value', 'name': 'name'}) self.assertIsNotNone(property2) self.assertIsNotNone(property2.id) self.assertEqual('666', property2.id) self.assertEqual('name', property2.name) self.assertEqual('value', property2.value) def test_repr_no_id(self): property1 = DefaultVariable.from_document({'value': 'value', 'name': 'name'}) self.assertEqual("<DefaultVariable(id='n/a', name='name', value='value')>", str(property1)) def test_repr_with_id(self): property2 = DefaultVariable.from_document({'_id': '666', 'value': 'value', 'name': 'name'}) self.assertEqual("<DefaultVariable(id='666', name='name', value='value')>", str(property2)) class RegexTests(unittest.TestCase): def test_init_no_id(self): property = Regex(name='name', value='value') self.assertIsNotNone(property) self.assertIsNone(property.id) self.assertEqual('name', property.name) self.assertEqual('value', property.value) self.assertEqual({'value': 'value', 'name': 'name'}, property.to_document()) def test_init_with_id(self): property = Regex(name='name', value='value') property.id = '666' self.assertIsNotNone(property) self.assertIsNotNone(property.id) self.assertEqual('666', property.id) self.assertEqual('name', property.name) self.assertEqual('value', property.value) self.assertEqual({'_id': '666', 'value': 'value', 'name': 'name'}, property.to_document()) def test_from_document_no_id(self): property1 = Regex.from_document({'value': 'value', 'name': 'name'}) self.assertIsNotNone(property1) self.assertIsNone(property1.id) self.assertEqual('name', property1.name) self.assertEqual('value', property1.value) def test_from_document_with_id(self): property2 = Regex.from_document({'_id': '666', 'value': 'value', 'name': 'name'}) self.assertIsNotNone(property2) self.assertIsNotNone(property2.id) self.assertEqual('666', property2.id) self.assertEqual('name', property2.name) self.assertEqual('value', property2.value) def test_repr_no_id(self): property1 = Regex.from_document({'value': 'value', 'name': 'name'}) self.assertEquals("<Regex(id='n/a', name='name', value='value')>", str(property1)) def test_repr_with_id(self): property2 = Regex.from_document({'_id': '666', 'value': 'value', 'name': 'name'}) self.assertEquals("<Regex(id='666', name='name', value='value')>", str(property2))
2,529
372
/* * Copyright © BeyondTrust Software 2004 - 2019 * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * BEYONDTRUST MAKES THIS SOFTWARE AVAILABLE UNDER OTHER LICENSING TERMS AS * WELL. IF YOU HAVE ENTERED INTO A SEPARATE LICENSE AGREEMENT WITH * BEYONDTRUST, THEN YOU MAY ELECT TO USE THE SOFTWARE UNDER THE TERMS OF THAT * SOFTWARE LICENSE AGREEMENT INSTEAD OF THE TERMS OF THE APACHE LICENSE, * NOTWITHSTANDING THE ABOVE NOTICE. IF YOU HAVE QUESTIONS, OR WISH TO REQUEST * A COPY OF THE ALTERNATE LICENSING TERMS OFFERED BY BEYONDTRUST, PLEASE CONTACT * BEYONDTRUST AT beyondtrust.com/contact */ /* * Copyright (C) BeyondTrust Software. All rights reserved. * * Module Name: * * test_ptlwregd.c * * Abstract: * * Registry * * Multi-threaded lwregd test client * * Authors: <NAME> (<EMAIL>) */ #define TEST_ADD 0 #define TEST_DELETE 1 #define TEST_GET_KEY 2 #define TEST_GET_VALUE 3 #define LWREGD_MAX_THEADS 2 #define LWREGD_MAX_ITERATIONS 10 #include "includes.h" #include "rsutils.h" typedef struct _PTLWREGD_CONTEXT { HANDLE hReg; pthread_t thread; PSTR pszKeyPath; PSTR pszKeyNamePrefix; DWORD dwRange; DWORD dwIterations; DWORD dwOperation; } PTLWREGD_CONTEXT, *PPTLWREGD_CONTEXT; DWORD ThreadTestAddKey( HANDLE hReg, PSTR pszKeyPath, PSTR pszKeyNamePrefix, DWORD dwRange, DWORD dwIterations) { DWORD dwError = 0; DWORD dwCount = 0; DWORD dwKeyNum = 0; PSTR pszKeyName = NULL; PSTR pszSubKeyPath = NULL; for (dwCount=0; dwCount<dwIterations; dwCount++) { printf("ThreadTestAddKey: %d %s\\%s\n", dwCount, pszKeyPath, pszKeyNamePrefix); for (dwKeyNum=0; dwKeyNum<dwRange; dwKeyNum++) { dwError = RegCStringAllocatePrintf( &pszKeyName, "%s-%d", pszKeyNamePrefix, dwKeyNum); BAIL_ON_REG_ERROR(dwError); dwError = RegShellUtilAddKey(hReg, NULL, pszKeyPath, pszKeyName, FALSE); BAIL_ON_REG_ERROR(dwError); printf(" >>ThreadTestAddKey: %d %s\\%s\n", dwCount, pszKeyPath, pszKeyName); dwError = RegCStringAllocatePrintf( &pszSubKeyPath, "%s\\%s", pszKeyPath, pszKeyName); BAIL_ON_REG_ERROR(dwError); dwError = RegShellUtilAddKey(hReg, NULL, pszSubKeyPath, pszKeyName, FALSE); BAIL_ON_REG_ERROR(dwError); printf(" >>ThreadTestAddKey: %d %s\\%s\n", dwCount, pszSubKeyPath, pszKeyName); } } cleanup: LWREG_SAFE_FREE_STRING(pszKeyName); LWREG_SAFE_FREE_STRING(pszSubKeyPath); return dwError; error: if (dwError) { RegPrintError("ThreadTestAddKey", dwError); } goto cleanup; } DWORD ThreadTestGetKeys( HANDLE hReg, PSTR pszKeyPath, PSTR pszKeyNamePrefix, DWORD dwRange, DWORD dwIterations) { DWORD dwError = 0; DWORD dwCount = 0; DWORD dwKeyNum = 0; PSTR pszKeyName = NULL; LW_WCHAR **ppSubKeys = NULL; DWORD dwRetSubKeyCount = 0; int i = 0; for (dwCount=0; dwCount<dwIterations; dwCount++) { printf("ThreadTestGetKeys: %d %s\\%s\n", dwCount, pszKeyPath, pszKeyNamePrefix); for (dwKeyNum=0; dwKeyNum<dwRange; dwKeyNum++) { dwError = RegCStringAllocatePrintf( &pszKeyName, "%s-%d", pszKeyNamePrefix, dwKeyNum); BAIL_ON_REG_ERROR(dwError); dwError = RegShellUtilGetKeys(hReg, NULL, pszKeyPath, pszKeyName, &ppSubKeys, &dwRetSubKeyCount); BAIL_ON_REG_ERROR(dwError); printf(" >>ThreadTestGetKeys: %d %s\\%s has %d subkeys\n", dwCount, pszKeyPath, pszKeyName, dwRetSubKeyCount); } } cleanup: for (i = 0; i < dwRetSubKeyCount; i++) { LWREG_SAFE_FREE_MEMORY(ppSubKeys[i]); } LWREG_SAFE_FREE_MEMORY(ppSubKeys); LWREG_SAFE_FREE_STRING(pszKeyName); return dwError; error: if (dwError) { RegPrintError("ThreadTestGetKeys", dwError); } goto cleanup; } DWORD ThreadTestGetValues( HANDLE hReg, PSTR pszKeyPath, PSTR pszKeyNamePrefix, DWORD dwRange, DWORD dwIterations) { DWORD dwError = 0; DWORD dwCount = 0; DWORD dwKeyNum = 0; PSTR pszKeyName = NULL; PREGSHELL_UTIL_VALUE valueArray = NULL; DWORD dwValueArrayLen = 0; int i = 0; for (dwCount=0; dwCount<dwIterations; dwCount++) { printf("ThreadTestGetValues: %d %s\\%s\n", dwCount, pszKeyPath, pszKeyNamePrefix); for (dwKeyNum=0; dwKeyNum<dwRange; dwKeyNum++) { dwError = RegCStringAllocatePrintf( &pszKeyName, "%s-%d", pszKeyNamePrefix, dwKeyNum); BAIL_ON_REG_ERROR(dwError); dwError = RegShellUtilGetValues(hReg, NULL, pszKeyPath, pszKeyName, &valueArray, &dwValueArrayLen); BAIL_ON_REG_ERROR(dwError); printf(" >>ThreadTestGetValues: %d %s\\%s has %d value\n", dwCount, pszKeyPath, pszKeyName, dwValueArrayLen); } } cleanup: for (i=0; i<dwValueArrayLen; i++) { LWREG_SAFE_FREE_MEMORY(valueArray[i].pValueName); LWREG_SAFE_FREE_MEMORY(valueArray[i].pData); } LWREG_SAFE_FREE_MEMORY(valueArray); LWREG_SAFE_FREE_STRING(pszKeyName); return dwError; error: if (dwError) { RegPrintError("ThreadTestGetValues", dwError); } goto cleanup; } DWORD ThreadTestDeleteKey( HANDLE hReg, PSTR pszKeyPath, PSTR pszKeyNamePrefix, DWORD dwRange, DWORD dwIterations) { DWORD dwError = 0; DWORD dwCount = 0; DWORD dwKeyNum = 0; PSTR pszKeyName = NULL; PSTR pszSubKeyPath = NULL; for (dwCount=0; dwCount<dwIterations; dwCount++) { printf("ThreadTestDeleteKey: %d %s\\%s\n", dwCount, pszKeyPath, pszKeyNamePrefix); for (dwKeyNum=0; dwKeyNum<dwRange; dwKeyNum++) { dwError = RegCStringAllocatePrintf( &pszKeyName, "%s-%d", pszKeyNamePrefix, dwKeyNum); BAIL_ON_REG_ERROR(dwError); dwError = RegCStringAllocatePrintf( &pszSubKeyPath, "%s\\%s", pszKeyPath, pszKeyName); BAIL_ON_REG_ERROR(dwError); dwError = RegShellUtilDeleteKey(hReg, NULL, pszSubKeyPath, pszKeyName); BAIL_ON_REG_ERROR(dwError); printf(" >>ThreadTestDeleteKey: %d %s\\%s\n", dwCount, pszSubKeyPath, pszKeyName); dwError = RegShellUtilDeleteKey(hReg, NULL, pszKeyPath, pszKeyName); BAIL_ON_REG_ERROR(dwError); printf(" >>ThreadTestDeleteKey: %d %s\\%s\n", dwCount, pszKeyPath, pszKeyName); } } cleanup: LWREG_SAFE_FREE_STRING(pszKeyName); LWREG_SAFE_FREE_STRING(pszSubKeyPath); return dwError; error: if (dwError == LWREG_ERROR_FAILED_DELETE_HAS_SUBKEY || dwError == LWREG_ERROR_KEY_IS_ACTIVE || dwError == LWREG_ERROR_NO_SUCH_KEY_OR_VALUE) { dwError = 0; } if (dwError) { RegPrintError("ThreadTestDeleteKey", dwError); } goto cleanup; } void * ThreadTestPtKey( void *pctx) { DWORD dwError = 0; PPTLWREGD_CONTEXT context = (PPTLWREGD_CONTEXT) pctx; PSTR pszOperation = NULL; switch (context->dwOperation) { case TEST_ADD: pszOperation = "AddKey"; break; case TEST_DELETE: pszOperation = "DeleteKey"; break; case TEST_GET_KEY: pszOperation = "GetKeys"; break; case TEST_GET_VALUE: pszOperation = "GetValues"; break; default: pszOperation = "Undefined"; } printf("ThreadTestPt%sKey: starting %s\\%s\n", pszOperation, context->pszKeyPath, context->pszKeyNamePrefix); switch (context->dwOperation) { case TEST_ADD: dwError = ThreadTestAddKey( context->hReg, context->pszKeyPath, context->pszKeyNamePrefix, context->dwRange, context->dwIterations); break; case TEST_DELETE: dwError = ThreadTestDeleteKey( context->hReg, context->pszKeyPath, context->pszKeyNamePrefix, context->dwRange, context->dwIterations); break; case TEST_GET_KEY: dwError = ThreadTestGetKeys( context->hReg, context->pszKeyPath, context->pszKeyNamePrefix, context->dwRange, context->dwIterations); break; case TEST_GET_VALUE: dwError = ThreadTestGetValues( context->hReg, context->pszKeyPath, context->pszKeyNamePrefix, context->dwRange, context->dwIterations); pszOperation = "GetValues"; break; default: pszOperation = "Undefined"; } BAIL_ON_REG_ERROR(dwError); printf("ThreadTestPt%sKey: %s\\%s done.\n", pszOperation, context->pszKeyPath, context->pszKeyNamePrefix); cleanup: return NULL; error: goto cleanup; } DWORD ThreadTestPtFree( PPTLWREGD_CONTEXT pCtx) { DWORD dwError = 0; BAIL_ON_INVALID_HANDLE(pCtx); RegCloseServer(pCtx->hReg); RegMemoryFree(pCtx->pszKeyNamePrefix); RegMemoryFree(pCtx->pszKeyPath); RegMemoryFree(pCtx); cleanup: return dwError; error: goto cleanup; } DWORD ThreadTestPtInit( PSTR pszKeyPath, PSTR pszKeyNamePrefix, DWORD dwKeyNameSuffix, DWORD dwIterations, DWORD dwOperation, PPTLWREGD_CONTEXT *ppRetCtx) { PPTLWREGD_CONTEXT pCtx = NULL; DWORD dwError = 0; HANDLE hReg = NULL; dwError = RegAllocateMemory(sizeof(*pCtx), (PVOID*)&pCtx); BAIL_ON_REG_ERROR(dwError); dwError = RegOpenServer(&hReg); BAIL_ON_REG_ERROR(dwError); dwError = RegCStringDuplicate(&pCtx->pszKeyPath, pszKeyPath); BAIL_ON_REG_ERROR(dwError); dwError = RegAllocateMemory(strlen(pszKeyNamePrefix) + 11, (PVOID*)&pCtx->pszKeyNamePrefix); BAIL_ON_REG_ERROR(dwError); sprintf(pCtx->pszKeyNamePrefix, "%s%d", pszKeyNamePrefix, dwKeyNameSuffix); pCtx->hReg = hReg; pCtx->dwRange = 1000; pCtx->dwIterations = dwIterations; pCtx->dwOperation = dwOperation; *ppRetCtx = pCtx; cleanup: return dwError; error: goto cleanup; } int main(int argc, char *argv[]) { PPTLWREGD_CONTEXT ctxAdd[LWREGD_MAX_THEADS] = {0}; PPTLWREGD_CONTEXT ctxDel[LWREGD_MAX_THEADS] = {0}; PPTLWREGD_CONTEXT ctxGetKeys[LWREGD_MAX_THEADS] = {0}; PPTLWREGD_CONTEXT ctxGetValues[LWREGD_MAX_THEADS] = {0}; DWORD dwError = 0; int sts = 0; DWORD i = 0; for (i=0; i<LWREGD_MAX_THEADS; i++) { ThreadTestPtInit("thread_tests", "TestKey", i, LWREGD_MAX_ITERATIONS, TEST_ADD, &ctxAdd[i]); } for (i=0; i<LWREGD_MAX_THEADS; i++) { ThreadTestPtInit("thread_tests", "TestKey", i, LWREGD_MAX_ITERATIONS, TEST_DELETE, &ctxDel[i]); } for (i=0; i<LWREGD_MAX_THEADS; i++) { ThreadTestPtInit("thread_tests", "TestKey", i, LWREGD_MAX_ITERATIONS, TEST_GET_KEY, &ctxGetKeys[i]); } for (i=0; i<LWREGD_MAX_THEADS; i++) { ThreadTestPtInit("thread_tests", "TestKey", i, LWREGD_MAX_ITERATIONS, TEST_GET_VALUE, &ctxGetValues[i]); } for (i=0; i<LWREGD_MAX_THEADS; i++) { sts = pthread_create(&ctxAdd[i]->thread, NULL, ThreadTestPtKey, ctxAdd[i]); if (sts == -1) { printf("pthread_create: Error ThreadTestPtAddkey(ctxAdd[%d])\n", i); return 1; } } for (i=0; i<LWREGD_MAX_THEADS; i++) { sts = pthread_create(&ctxDel[i]->thread, NULL, ThreadTestPtKey, ctxDel[i]); if (sts == -1) { printf("pthread_create: Error ThreadTestPtDeletekey(ctxDel[%d])\n", i); return 1; } } for (i=0; i<LWREGD_MAX_THEADS; i++) { sts = pthread_create(&ctxGetKeys[i]->thread, NULL, ThreadTestPtKey, ctxGetKeys[i]); if (sts == -1) { printf("pthread_create: Error ThreadTestPtGetKeys(ctxGetKeys[%d])\n", i); return 1; } } for (i=0; i<LWREGD_MAX_THEADS; i++) { sts = pthread_create(&ctxGetValues[i]->thread, NULL, ThreadTestPtKey, ctxGetValues[i]); if (sts == -1) { printf("pthread_create: Error ThreadTestPtGetValues(ctxGetValues[%d])\n", i); return 1; } } for (i=0; i<LWREGD_MAX_THEADS; i++) { pthread_join(ctxAdd[i]->thread, NULL); pthread_join(ctxDel[i]->thread, NULL); pthread_join(ctxGetKeys[i]->thread, NULL); pthread_join(ctxGetValues[i]->thread, NULL); } for (i=0; i<LWREGD_MAX_THEADS; i++) { ThreadTestPtFree(ctxAdd[i]); ThreadTestPtFree(ctxDel[i]); ThreadTestPtFree(ctxGetKeys[i]); ThreadTestPtFree(ctxGetValues[i]); } return dwError; }
8,666
3,102
// RUN: %clang_cc1 -fsyntax-only -Wunreachable-code -verify %s static const bool False = false; struct A { ~A(); operator bool(); }; void Bar(); void Foo() { if (False && A()) { Bar(); // expected-no-diagnostics } }
91
310
{ "name": "XEmacs", "description": "A version of Emacs.", "url": "https://en.wikipedia.org/wiki/XEmacs" }
44
506
// https://abc088.contest.atcoder.jp/tasks/abc088_d #include <iostream> #include <queue> #include <tuple> #include <unordered_set> #include <vector> using namespace std; typedef vector<bool> vb; typedef vector<vb> vvb; typedef tuple<int, int> ii; typedef tuple<int, int, int> iii; typedef unordered_set<ii> sii; typedef priority_queue<iii> pq; namespace std { template<> struct hash<ii> { size_t operator()(const ii &i) const { return hash<int>()(get<0>(i) * 100 + get<1>(i)); }; }; } int s(const vvb &m, int h, int w, sii &v, pq &q) { int k, i, j; while (true) { if (q.empty()) return -1; tie(k, i, j) = q.top(); k = -k; q.pop(); if (v.count(make_tuple(i, j))) continue; v.insert(make_tuple(i, j)); break; } if (i == h - 1 && j == w - 1) return k; if (i > 0 && m[i - 1][j] && !v.count(make_tuple(i - 1, j))) q.push(make_tuple(-(k + 1), i - 1, j)); if (i < h - 1 && m[i + 1][j] && !v.count(make_tuple(i + 1, j))) q.push(make_tuple(-(k + 1), i + 1, j)); if (j > 0 && m[i][j - 1] && !v.count(make_tuple(i, j - 1))) q.push(make_tuple(-(k + 1), i, j - 1)); if (j < w - 1 && m[i][j + 1] && !v.count(make_tuple(i, j + 1))) q.push(make_tuple(-(k + 1), i, j + 1)); return s(m, h, w, v, q); } int main() { int h, w; cin >> h >> w; vvb m(h); int b = 0; for (int i = 0; i < h; i++) { m[i] = vb(w); for (int j = 0; j < w; j++) { char c; cin >> c; m[i][j] = c == '.'; b += c == '#'; } } pq q; q.push(make_tuple(-1, 0, 0)); sii v; int c = s(m, h, w, v, q); if (c == -1) cout << -1 << endl; else cout << h * w - c - b << endl; }
812
3,200
# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """ build MindData lite minimum library """ import glob import itertools import json from operator import itemgetter import os from pprint import pprint import sys import warnings import parser DEPENDENCY_FILENAME = 'dependencies.txt' ASSOCIATION_FILENAME = 'associations.txt' ALL_DEPS_FILENAME = 'needed_dependencies.txt' OBJECTS_DIR = 'tmp/' ESSENTIAL_OBJECTS = [ # 'types.cc.o', # 'tensor_impl.cc.o', 'random_sampler.cc.o', # default value for datasets (may not exist in their code) 'random_sampler_ir.cc.o', # default value for datasets (may not exist in their code) ] def load_dependencies(): """ Read dependencies.txt and load it into a dict. :return: a dict containing list of dependencies for almost any file in MindData lite """ if not os.path.isfile(DEPENDENCY_FILENAME): raise FileNotFoundError("dependency file ({}) does not exist.\n" "Please run cropper_configure.py first.".format(DEPENDENCY_FILENAME)) with open(DEPENDENCY_FILENAME) as f: dep_dict = json.load(f) return dep_dict def load_associations(): """ Read associations.txt and load it into a dict. :return: a dict containing entry point (a filename) for each op """ if not os.path.isfile(ASSOCIATION_FILENAME): raise FileNotFoundError("association file ({}) does not exist.\n" "Please run cropper_configure.py first.".format(ASSOCIATION_FILENAME)) with open(ASSOCIATION_FILENAME) as f: _dict = json.load(f) return _dict def get_unique_dependencies(dependencies_dict, associations_dict, user_ops): """ Find which dependencies we need to include according to the ops found in the user code. :param dependencies_dict: a dict containing list of dependencies for almost any file in MindData lite :param associations_dict: a dcit containing entry point (a filename) for each op :param user_ops: a list of ops found in the user code :return: a list of dependencies needed based on the user code """ selected_entries = [] # itemgetter(*user_ops)(associations_dict) for op in user_ops: print('{} --> {}'.format(op, associations_dict[op])) selected_entries.append(associations_dict[op]) selected_files = itemgetter(*selected_entries)(dependencies_dict) selected_files = list(itertools.chain(*selected_files)) return sorted(list(set().union(selected_files))) def remove_unused_objects(final_deps, essentials, all_object_files): """ Remove object files that are determined to be NOT needed to run user code as they are not in the dependencies of user code. :param final_deps: a list of dependencies needed based on the user code :param essentials: essential objects that should not be removed from final lib :param all_object_files: a lsit of all objects available in our static library :return: None """ # find objects which are not part of any dependency (lstrip is needed for remove '_' added in crop.sh) to_be_removed = [x for x in all_object_files if not any(x.lstrip('_')[:-5] in y for y in final_deps)] # keep the ones that are not an essential object file. (lstrip is needed for remove '_' added in crop.sh) to_be_removed = [x for x in to_be_removed if not any(x.lstrip('_') in y for y in essentials)] print('Removing:', len(to_be_removed), 'unused objects.') pprint(sorted(to_be_removed)) for filename in to_be_removed: os.remove(os.path.join(OBJECTS_DIR, filename)) def main(): # load tables created using cropper.py dependencies_dict = load_dependencies() associations_dict = load_associations() # get all objects filename all_object_files = [os.path.basename(x) for x in glob.glob('{}*.o'.format(OBJECTS_DIR))] print("All Obj files: {}".format(len(all_object_files))) # find ops in user code my_parser = parser.SimpleParser() temp = [my_parser.parse(x) for x in user_code_filenames] user_ops = set(itertools.chain(*temp)) print('user ops: {}'.format(user_ops)) # user is not using any MindData op if not user_ops: warnings.warn('No MindData Ops detected in your code...') remove_unused_objects([], [], all_object_files) with os.fdopen(os.open(os.path.join(OBJECTS_DIR, ALL_DEPS_FILENAME), os.O_WRONLY | os.O_CREAT, 0o660), "w+") as _: pass exit(0) # find dependencies required (based on user ops) unique_deps = get_unique_dependencies(dependencies_dict, associations_dict, user_ops) print('Unique Deps (.h): {}'.format(len(unique_deps))) print('Unique Deps (.cc): {}'.format(len(list(filter(lambda x: x[-2:] == 'cc', unique_deps))))) # add essential files to dependency files final_deps = set(unique_deps + dependencies_dict['ESSENTIAL']) print('Total Deps (.h): {}'.format(len(final_deps))) # delete the rest of the object files from directory. remove_unused_objects(final_deps, ESSENTIAL_OBJECTS, all_object_files) # write all dependencies to the file (for extracting external ones) with os.fdopen(os.open(os.path.join(OBJECTS_DIR, ALL_DEPS_FILENAME), os.O_WRONLY | os.O_CREAT, 0o660), "w+") as fout: fout.write("\n".join(unique_deps) + '\n') if __name__ == "__main__": # get user code filename(s) as argument(s) to code if len(sys.argv) <= 1: print("usage: python build_lib.py <xxx.y> [<xxx.z>]") exit(1) user_code_filenames = sys.argv[1:] main()
2,252
1,585
<reponame>j-xiong/ompi #include <stdlib.h> #include <stdio.h> #include "k-partitioning.h" #include "tm_mt.h" #include "tm_verbose.h" void memory_allocation(PriorityQueue ** Q, PriorityQueue ** Qinst, double *** D, int n, int k); void initialization(int * const part, double ** const matrice, PriorityQueue * const Qpart, PriorityQueue * const Q, PriorityQueue * const Qinst, double ** const D, int n, int k, int * const deficit, int * const surplus); void algo(int * const part, double ** const matrice, PriorityQueue * const Qpart, PriorityQueue * const Q, PriorityQueue * const Qinst, double ** const D, int n, int * const deficit, int * const surplus); double nextGain(PriorityQueue * const Qpart, PriorityQueue * const Q, int * const deficit, int * const surplus); void balancing(int n, int deficit, int surplus, double ** const D, int * const part); void destruction(PriorityQueue * Qpart, PriorityQueue * Q, PriorityQueue * Qinst, double ** D, int n, int k); void allocate_vertex2(int u, int *res, double **comm, int n, int *size, int max_size); double eval_cost2(int *,int,double **); int *kpartition_greedy2(int k, double **comm, int n, int nb_try_max, int *constraints, int nb_constraints); int* build_p_vector(double **comm, int n, int k, int greedy_trials, int * constraints, int nb_constraints); int* kPartitioning(double ** comm, int n, int k, int * constraints, int nb_constraints, int greedy_trials) { /* ##### declarations & allocations ##### */ PriorityQueue Qpart, *Q = NULL, *Qinst = NULL; double **D = NULL; int deficit, surplus, *part = NULL; int real_n = n-nb_constraints; part = build_p_vector(comm, n, k, greedy_trials, constraints, nb_constraints); memory_allocation(&Q, &Qinst, &D, real_n, k); /* ##### Initialization ##### */ initialization(part, comm, &Qpart, Q, Qinst, D, real_n, k, &deficit, &surplus); /* ##### Main loop ##### */ while((nextGain(&Qpart, Q, &deficit, &surplus))>0) { algo(part, comm, &Qpart, Q, Qinst, D, real_n, &deficit, &surplus); } /* ##### Balancing the partition ##### */ balancing(real_n, deficit, surplus, D, part); /*if partition isn't balanced we have to make one last move*/ /* ##### Memory deallocation ##### */ destruction(&Qpart, Q, Qinst, D, real_n, k); return part; } void memory_allocation(PriorityQueue ** Q, PriorityQueue ** Qinst, double *** D, int n, int k) { int i; *Q = calloc(k, sizeof(PriorityQueue)); /*one Q for each partition*/ *Qinst = calloc(n, sizeof(PriorityQueue)); /*one Qinst for each vertex*/ *D = malloc(sizeof(double *) * n); /*D's size is n * k*/ for(i=0; i < n; ++i) (*D)[i] = calloc(k, sizeof(double)); } void initialization(int * const part, double ** const matrice, PriorityQueue * const Qpart, PriorityQueue * const Q, PriorityQueue * const Qinst, double ** const D, int n, int k, int * const deficit, int * const surplus) { int i,j; /* ##### PriorityQueue initializations ##### */ /* We initialize Qpart with a size of k because it contains the subsets's indexes. */ PQ_init(Qpart, k); /* We initialize each Q[i] with a size of n because each vertex is in one of these queue at any time. */ /* However we could set a size of (n/k)+1 as this is the maximum size of a subset when the partition is not balanced. */ for(i=0; i<k; ++i) PQ_init(&Q[i], n); /* We initialize each Qinst[i] with a size of k because fo each vertex i, Qinst[i] contains the D(i,j) values for j = 0...(k-1) */ for(i=0; i<n; ++i) PQ_init(&Qinst[i], k); /* ##### Computing the D(i,j) values ##### */ for(i=0; i < n; ++i) /*for each vertex i*/ { for(j=0; j < n; ++j) /*and for each vertex j*/ { D[i][part[j]] += matrice[i][j]; } } /* ##### Filling up the queues ##### */ /* ### Qinst ### */ for(i=0; i < n; ++i) /*for each vertex i*/ for(j=0; j < k; ++j) /*and for each subset j*/ PQ_insert(&Qinst[i], j, D[i][j]); /*we insert the corresponding D(i,j) value in Qinst[i]*/ /* ### Q ### */ for(i=0; i<n; ++i) /*for each vertex i*/ PQ_insert(&Q[part[i]], i, PQ_findMaxKey(&Qinst[i])-D[i][part[i]]); /*we insert in Q[part[i]] the vertex i with its highest possible gain*/ /* ### Qpart ### */ for(i=0; i < k; ++i) /*for each subset i*/ PQ_insert(Qpart, i, PQ_findMaxKey(&Q[i])); /*we insert it in Qpart with the highest possible gain by one of its vertex as key*/ /* ##### Initialization of deficit/surplus ##### */ *surplus = *deficit = 0; } void algo(int * const part, double ** const matrice, PriorityQueue * const Qpart, PriorityQueue * const Q, PriorityQueue * const Qinst, double ** const D, int n, int * const deficit, int * const surplus) { int p,u,v,j; double d; if(*deficit == *surplus) /*if the current partition is balanced*/ { p = PQ_deleteMax(Qpart); /*we get the subset with the highest possible gain in p and remove it from Qpart*/ u = PQ_deleteMax(&Q[p]); /*then we get the vertex with this highest possible gain in u and remove it from Q[p] */ *deficit = part[u]; /*p becomes the deficit */ } else /*the current partition is not balanced*/ { u = PQ_deleteMax(&Q[*surplus]); /*we get the vertex with the highest possible gain in surplus and remove it from Q[surplus] */ PQ_delete(Qpart, part[u]); /*then we remove surplus from Qpart (note that u is from surplus so part[u] is surplus) */ } d = PQ_findMaxKey(&Q[part[u]]); /*we get the next highest possible gain in part[u] (without taking u in account as we already removed it from Q[part[u])*/ PQ_insert(Qpart, part[u], d); /*we put part[u] back in Qpart with its new highest possible gain*/ j = PQ_deleteMax(&Qinst[u]); /*we get from Qinst[u] the subset in which we have to move u to get the highest gain.*/ if ( j < 0){ if(tm_get_verbose_level() >= CRITICAL) fprintf(stderr,"Error Max element in priority queue negative!\n"); exit(-1); } *surplus = j; /*this subset becomes surplus*/ for(v=0; v < n; ++v) /*we scan though all edges (u,v) */ { j = part[u]; /*we set j to the starting subset */ D[v][j]= D[v][j] - matrice[u][v]; /*we compute the new D[v, i] (here j has the value of the starting subset of u, that's why we say i) */ PQ_adjustKey(&Qinst[v], j, D[v][j]); /*we update this gain in Qinst[v]*/ j = *surplus; /*we put back the arrival subset in j*/ D[v][j] = D[v][j] + matrice[u][v]; /*matrice[u][v]; we compute the new D[v, j]*/ PQ_adjustKey(&Qinst[v], j, D[v][j]);/*we update this gain in Qinst[v]*/ d = PQ_findMaxKey(&Qinst[v]) - D[v][part[v]]; /*we compute v's new highest possible gain*/ PQ_adjustKey(&Q[part[v]], v, d); /*we update it in Q[p[v]]*/ d = PQ_findMaxKey(&Q[part[v]]); /*we get the highest possible gain in v's subset*/ PQ_adjustKey(Qpart, part[v], d); /*we update it in Qpart*/ } part[u] = *surplus; /*we move u from i to j (here surplus has the value of j the arrival subset)*/ d = PQ_findMaxKey(&Qinst[u]) - D[u][part[u]]; /*we compute the new u's highest possible gain*/ if(!PQ_isEmpty(&Qinst[u])) /*if at least one more move of u is possible*/ PQ_insert(&Q[part[u]], u, d); /*we insert u in the Q queue of its new subset*/ PQ_adjustKey(Qpart, part[u], d); /*we update the new highest possible gain in u's subset*/ } double nextGain(PriorityQueue * const Qpart, PriorityQueue * const Q, int * const deficit, int * const surplus) { double res; if(*deficit == *surplus) /*if the current partition is balanced*/ res = PQ_findMaxKey(Qpart); /*we get the highest possible gain*/ else /*the current partition is not balanced*/ res = PQ_findMaxKey(&Q[*surplus]); /*we get the highest possible gain from surplus*/ return res; } void balancing(int n, int deficit, int surplus, double ** const D, int * const part) { if(surplus != deficit) /*if the current partition is not balanced*/ { int i; PriorityQueue moves; /*we use a queue to store the possible moves from surplus to deficit*/ PQ_init(&moves, n); for(i=0; i<n; ++i) /*for each vertex*/ { if(part[i] == surplus) /*if i is from surplus*/ PQ_insert(&moves, i, D[i][deficit]-D[i][surplus]); /*we insert i in moves with the gain we get from moving i from surplus to deficit as key */ } part[PQ_deleteMax(&moves)] = deficit; /*we put the i from moves with the highest gain in deficit*/ PQ_exit(&moves); } } void destruction(PriorityQueue * Qpart, PriorityQueue * Q, PriorityQueue * Qinst, double ** D, int n, int k) { int i; PQ_exit(Qpart); for(i=0; i<k; ++i) PQ_exit(&Q[i]); free(Q); for(i=0; i<n; ++i) { PQ_exit(&Qinst[i]); } free(Qinst); for(i=0; i<n; ++i) free(D[i]); free(D); } int *kpartition_greedy2(int k, double **comm, int n, int nb_try_max, int *constraints, int nb_constraints) { int *res = NULL, *best_res=NULL, *size = NULL; int i,j,nb_trials; int max_size; double cost, best_cost = -1; for( nb_trials = 0 ; nb_trials < nb_try_max ; nb_trials++ ){ res = (int *)malloc(sizeof(int)*n); for ( i = 0 ; i < n ; ++i ) res[i] = -1; size = (int *)calloc(k,sizeof(int)); max_size = n/k; /* put "dumb" vertices in the correct partition if there are any*/ if (nb_constraints){ /*if there are at least one constraint*/ int nb_real_nodes = n-nb_constraints; /*this is the number of "real" nodes by opposition to the dumb ones*/ for(i=0; i<nb_constraints; ++i) /*for each constraint*/ { int i_part = constraints[i]/max_size; /*we compute its partition*/ res[nb_real_nodes+i] = i_part; /*and we set it in partition vector*/ size[i_part]++; /*we update the partition's size*/ } } /* choose k initial "true" vertices at random and put them in a different partition */ for ( i = 0 ; i < k ; ++i ){ /* if the partition is full of dumb vertices go to next partition*/ if(size[i] >= max_size) continue; /* find a vertex not already partitionned*/ do{ /* call the mersenne twister PRNG of tm_mt.c*/ j = genrand_int32() % n; } while ( res[j] != -1 ); /* allocate and update size of partition*/ res[j] = i; /* printf("random: %d -> %d\n",j,i); */ size[i]++; } /* allocate each unallocated vertices in the partition that maximize the communication*/ for( i = 0 ; i < n ; ++i ) if( res[i] == -1) allocate_vertex2(i, res, comm, n-nb_constraints, size, max_size); cost = eval_cost2(res,n-nb_constraints,comm); /*print_1D_tab(res,n); printf("cost=%.2f\n",cost);*/ if((cost<best_cost) || (best_cost == -1)){ best_cost=cost; free(best_res); best_res=res; }else free(res); free(size); } /*print_1D_tab(best_res,n); printf("best_cost=%.2f\n",best_cost); */ return best_res; } void allocate_vertex2(int u, int *res, double **comm, int n, int *size, int max_size) { int i,best_part = -1; double cost, best_cost = -1; /*printf("\n"); print_1D_tab(res,n);*/ for( i = 0 ; i < n ; ++i){ if (( res[i] != -1 ) && ( size[res[i]] < max_size )){ cost = comm[u][i]; if (( cost > best_cost)){ best_cost = cost; best_part = res[i]; } } } /* printf("size[%d]: %d\n",best_part, size[best_part]);*/ /* printf("putting(%.2f): %d -> %d\n",best_cost, u, best_part); */ res[u] = best_part; size[best_part]++; } double eval_cost2(int *partition, int n, double **comm) { double cost = 0; int i,j; for( i = 0 ; i < n ; ++i ) for( j = i+1 ; j < n ; ++j ) if(partition[i] != partition[j]) cost += comm[i][j]; return cost; } int* build_p_vector(double **comm, int n, int k, int greedy_trials, int * constraints, int nb_constraints) { int * part = NULL; if(greedy_trials>0) /*if greedy_trials > 0 then we use kpartition_greedy with greedy_trials trials*/ { part = kpartition_greedy2(k, comm, n, greedy_trials, constraints, nb_constraints); } else { int * size = calloc(k, sizeof(int)); int i,j; int nodes_per_part = n/k; int nb_real_nodes = n-nb_constraints; part = malloc(sizeof(int) * n); for(i=0; i<nb_constraints; i++) /*for each constraints*/ { int i_part = constraints[i]/nodes_per_part; /*we compute the partition where we have to put this constraint*/ part[nb_real_nodes+i] = i_part; size[i_part]++; } j=0; /* now we have to fill the partitions with the "real" nodes */ for(i=0; i<nb_real_nodes; i++) /*for each node*/ { if(size[j] < nodes_per_part) /*if j partition isn't full*/ { size[j]++; part[i] = j; /*then we put the node in this part*/ } else /*otherwise we decrement i to get the same node in the next loop*/ { i--; } j = (j+1)%k; /*and we change j to the next partition*/ } free(size); } return part; }
5,015
479
import json import paho.mqtt.client as mqtt import logger LWT_ONLINE = "online" LWT_OFFLINE = "offline" _LOGGER = logger.get(__name__) class MqttClient: def __init__(self, config): self._config = config self._mqttc = mqtt.Client( client_id=self.client_id, clean_session=False, userdata={"global_topic_prefix": self.topic_prefix}, ) if self.username and self.password: self.mqttc.username_pw_set(self.username, self.password) if self.ca_cert: cert_reqs = mqtt.ssl.CERT_REQUIRED if self.ca_verify else mqtt.ssl.CERT_NONE self.mqttc.tls_set(self.ca_cert, cert_reqs=cert_reqs) self.mqttc.tls_insecure_set(not self.ca_verify) if self.availability_topic: topic = self._format_topic(self.availability_topic) _LOGGER.debug("Setting LWT to: %s" % topic) self.mqttc.will_set(topic, payload=LWT_OFFLINE, retain=True) def publish(self, messages): if not messages: return for m in messages: if m.use_global_prefix: topic = self._format_topic(m.topic) else: topic = m.topic self.mqttc.publish(topic, m.payload, retain=m.retain) @property def client_id(self): return ( self._config["client_id"] if "client_id" in self._config else "bt-mqtt-gateway" ) @property def hostname(self): return self._config["host"] @property def port(self): return self._config["port"] if "port" in self._config else 1883 @property def username(self): return str(self._config["username"]) if "username" in self._config else None @property def password(self): return str(self._config["password"]) if "password" in self._config else None @property def ca_cert(self): return self._config["ca_cert"] if "ca_cert" in self._config else None @property def ca_verify(self): if "ca_verify" in self._config: # Constrain config input to boolean value if self._config["ca_verify"]: return True else: return False else: return True @property def topic_prefix(self): return self._config["topic_prefix"] if "topic_prefix" in self._config else None @property def availability_topic(self): return ( self._config["availability_topic"] if "availability_topic" in self._config else None ) @property def mqttc(self): return self._mqttc # noinspection PyUnusedLocal def on_connect(self, client, userdata, flags, rc): if self.availability_topic: self.publish( [ MqttMessage( topic=self.availability_topic, payload=LWT_ONLINE, retain=True ) ] ) def callbacks_subscription(self, callbacks): self.mqttc.on_connect = self.on_connect self.mqttc.connect(self.hostname, port=self.port) for topic, callback in callbacks: topic = self._format_topic(topic) _LOGGER.debug("Subscribing to: %s" % topic) self.mqttc.message_callback_add(topic, callback) self.mqttc.subscribe(topic) self.mqttc.loop_start() def __del__(self): if self.availability_topic: self.publish( [ MqttMessage( topic=self.availability_topic, payload=LWT_OFFLINE, retain=True ) ] ) def _format_topic(self, topic): return "{}/{}".format(self.topic_prefix, topic) if self.topic_prefix else topic class MqttMessage: use_global_prefix = True def __init__(self, topic=None, payload=None, retain=False): self._topic = topic self._payload = payload self._retain = retain @property def topic(self): return self._topic @topic.setter def topic(self, new_topic): self._topic = new_topic @property def payload(self): if isinstance(self.raw_payload, str): return self.raw_payload else: return json.dumps(self.raw_payload) @property def raw_payload(self): return self._payload @property def retain(self): return self._retain @retain.setter def retain(self, new_retain): self._retain = new_retain @property def as_dict(self): return {"topic": self.topic, "payload": self.payload} def __repr__(self): return self.as_dict.__str__() def __str__(self): return self.__repr__() class MqttConfigMessage(MqttMessage): SENSOR = "sensor" CLIMATE = "climate" BINARY_SENSOR = "binary_sensor" COVER = "cover" SWITCH = "switch" use_global_prefix = False def __init__(self, component, name, payload=None, retain=False): super().__init__("{}/{}/config".format(component, name), payload, retain)
2,498
921
// Copyright (c) 2015-2020 <NAME> <<EMAIL>> Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file. package com.vladsch.md.nav.todo; import com.intellij.lexer.Lexer; import com.intellij.psi.impl.cache.impl.OccurrenceConsumer; import com.intellij.psi.impl.cache.impl.todo.LexerBasedTodoIndexer; import org.jetbrains.annotations.NotNull; public class MdTodoIndexer extends LexerBasedTodoIndexer { @NotNull @Override public Lexer createLexer(@NotNull OccurrenceConsumer consumer) { return MdIdIndexer.createIndexingLexer(consumer); } }
213
2,151
<filename>cc/paint/image_transfer_cache_entry.h // Copyright (c) 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CC_PAINT_IMAGE_TRANSFER_CACHE_ENTRY_H_ #define CC_PAINT_IMAGE_TRANSFER_CACHE_ENTRY_H_ #include <vector> #include "base/atomic_sequence_num.h" #include "base/containers/span.h" #include "cc/paint/transfer_cache_entry.h" #include "third_party/skia/include/core/SkImage.h" namespace cc { static constexpr uint32_t kInvalidImageTransferCacheEntryId = static_cast<uint32_t>(-1); // Client/ServiceImageTransferCacheEntry implement a transfer cache entry // for transferring image data. On the client side, this is a CPU SkPixmap, // on the service side the image is uploaded and is a GPU SkImage. class CC_PAINT_EXPORT ClientImageTransferCacheEntry : public ClientTransferCacheEntryBase<TransferCacheEntryType::kImage> { public: explicit ClientImageTransferCacheEntry( const SkPixmap* pixmap, const SkColorSpace* target_color_space); ~ClientImageTransferCacheEntry() final; uint32_t Id() const final; // ClientTransferCacheEntry implementation: size_t SerializedSize() const final; bool Serialize(base::span<uint8_t> data) const final; private: uint32_t id_; const SkPixmap* const pixmap_; const SkColorSpace* const target_color_space_; size_t size_ = 0; static base::AtomicSequenceNumber s_next_id_; }; class CC_PAINT_EXPORT ServiceImageTransferCacheEntry : public ServiceTransferCacheEntryBase<TransferCacheEntryType::kImage> { public: ServiceImageTransferCacheEntry(); ~ServiceImageTransferCacheEntry() final; ServiceImageTransferCacheEntry(ServiceImageTransferCacheEntry&& other); ServiceImageTransferCacheEntry& operator=( ServiceImageTransferCacheEntry&& other); // ServiceTransferCacheEntry implementation: size_t CachedSize() const final; bool Deserialize(GrContext* context, base::span<const uint8_t> data) final; const sk_sp<SkImage>& image() { return image_; } private: sk_sp<SkImage> image_; size_t size_ = 0; }; } // namespace cc #endif // CC_PAINT_IMAGE_TRANSFER_CACHE_ENTRY_H_
715
1,056
<gh_stars>1000+ /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.netbeans.modules.profiler.actions; import java.awt.BorderLayout; import java.awt.Dimension; import java.awt.FlowLayout; import org.openide.util.NbBundle; import java.awt.event.ActionEvent; import java.io.File; import java.text.DateFormat; import java.util.Arrays; import java.util.Calendar; import java.util.Date; import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.Vector; import java.util.logging.Level; import javax.swing.*; import javax.swing.event.ListSelectionEvent; import javax.swing.event.ListSelectionListener; import javax.swing.table.DefaultTableCellRenderer; import javax.swing.table.DefaultTableModel; import javax.swing.table.TableModel; import org.netbeans.lib.profiler.common.Profiler; import org.netbeans.lib.profiler.global.CalibrationDataFileIO; import org.netbeans.lib.profiler.ui.swing.ProfilerTable; import org.netbeans.lib.profiler.ui.swing.ProfilerTableContainer; import org.netbeans.modules.profiler.api.JavaPlatform; import org.netbeans.modules.profiler.api.ProfilerDialogs; import org.netbeans.modules.profiler.spi.JavaPlatformManagerProvider; import org.netbeans.modules.profiler.spi.JavaPlatformProvider; import org.netbeans.modules.profiler.utilities.ProfilerUtils; import org.openide.DialogDescriptor; import org.openide.DialogDisplayer; import org.openide.awt.ActionID; import org.openide.awt.Mnemonics; import org.openide.util.HelpCtx; import org.openide.util.Lookup; import org.openide.util.RequestProcessor; /** * Provisionary action to explicitely run Profiler calibration. * * @author <NAME> * @author <NAME> */ @NbBundle.Messages({ "LBL_RunCalibrationAction=&Manage Calibration Data", "HINT_RunCalibrationAction=Manage Calibration Data", "HINT_CalibrateDisabled=Calibration is done Automatically for remote platforms", "LBL_JavaPlatform=Java Platform", "LBL_LastCalibrated=Last Calibrated", "LBL_NotCalibrated=Not calibrated", "LBL_JavaPlatformsForProfiling=&Java platforms available for profiling:", "LBL_Calibrate=&Calibrate", "LBL_JavaPlatforms=Java &Platforms...", "MSG_CalibrationOnProfile=Profiling session is currently in progress.\nDo you want to stop the current session and perform the calibration?", "MSG_CalibrationOnAttach=Profiling session is currently in progress\nDo you want to detach from the target application and perform the calibration?", "MSG_CalibrationFailed=Calibration failed.\nPlease check your setup and run the calibration again.", "TTP_PlatformName=Java platform name", "TTP_CalibrationDate=Date of last calibration" }) @ActionID(category="Profile", id="org.netbeans.modules.profiler.actions.RunCalibrationAction") //@ActionRegistration(displayName="#LBL_RunCalibrationAction") //@ActionReference(path="Menu/Profile/Advanced", position=100) public final class RunCalibrationAction extends AbstractAction { private static final HelpCtx HELP_CTX = new HelpCtx("ManageCalibration.HelpCtx"); // NOI18N //~ Constructors ------------------------------------------------------------------------------------------------------------- public RunCalibrationAction() { putValue(Action.NAME, Bundle.LBL_RunCalibrationAction()); putValue(Action.SHORT_DESCRIPTION, Bundle.HINT_RunCalibrationAction()); putValue("noIconInMenu", Boolean.TRUE); // NOI18N } //~ Methods ------------------------------------------------------------------------------------------------------------------ /** * Invoked when an action occurs. */ public void actionPerformed(final ActionEvent e) { List<JavaPlatform> platforms = JavaPlatform.getPlatforms(); String[] columnNames = new String[] { Bundle.LBL_JavaPlatform(), Bundle.LBL_LastCalibrated() }; Object[][] columnData = new Object[platforms.size()][2]; for (int i = 0; i < platforms.size(); i++) columnData[i] = new Object[] { platforms.get(i), null }; final TableModel model = new DefaultTableModel(columnData, columnNames) { public boolean isCellEditable(int row, int column) { return false; } }; SwingUtilities.invokeLater(new Runnable() { public void run() { displayUI(model); } }); RequestProcessor.getDefault().post(new Runnable() { public void run() { refreshTimes(model); } }); } private void displayUI(final TableModel model) { final ProfilerTable table = new ProfilerTable(model, false, true, null); table.getColumnModel().getColumn(1).setCellRenderer(new CalibrationDateCellRenderer()); table.setDefaultColumnWidth(getColumnWidth()); table.setSortColumn(0); table.setPreferredScrollableViewportSize(new Dimension(400, 10)); table.setVisibleRows(6); table.setColumnToolTips(new String[] { Bundle.TTP_PlatformName(), Bundle.TTP_CalibrationDate() }); ProfilerTableContainer container = new ProfilerTableContainer(table, true, null); container.setBorder(BorderFactory.createEmptyBorder(0, 10, 0, 10)); JLabel label = new JLabel(); Mnemonics.setLocalizedText(label, Bundle.LBL_JavaPlatformsForProfiling()); label.setLabelFor(table); label.setBorder(BorderFactory.createEmptyBorder(15, 10, 5, 10)); final JButton calibrate = new JButton() { protected void fireActionPerformed(ActionEvent e) { calibrate(table); } }; Mnemonics.setLocalizedText(calibrate, Bundle.LBL_Calibrate()); JButton platforms = new JButton() { protected void fireActionPerformed(ActionEvent e) { JavaPlatform.showCustomizer(); refreshModel(table); } }; Mnemonics.setLocalizedText(platforms, Bundle.LBL_JavaPlatforms()); JPanel buttons = new JPanel(new FlowLayout(FlowLayout.LEADING, 10, 10)); buttons.add(calibrate); buttons.add(platforms); table.setDefaultAction(new AbstractAction() { public void actionPerformed(ActionEvent e) { calibrate(table); } }); table.getSelectionModel().addListSelectionListener(new ListSelectionListener() { public void valueChanged(ListSelectionEvent e) { if (table.getSelectedRow() != -1) { boolean remote = isRemotePlatform((JavaPlatform)table.getValueAt(table.getSelectedRow(), table.convertColumnIndexToView(0))); if (remote) { calibrate.setToolTipText(Bundle.HINT_CalibrateDisabled()); } else { calibrate.setToolTipText(""); //NOI18N } calibrate.setEnabled(!remote); } } }); calibrate.setEnabled(false); table.clearSelection(); JPanel panel = new JPanel(new BorderLayout()); panel.add(label, BorderLayout.NORTH); panel.add(container, BorderLayout.CENTER); panel.add(buttons, BorderLayout.SOUTH); DialogDescriptor dd = new DialogDescriptor(panel, Bundle.HINT_RunCalibrationAction(), true, new Object[] { DialogDescriptor.CLOSED_OPTION }, DialogDescriptor.CLOSED_OPTION, 0, HELP_CTX, null); DialogDisplayer.getDefault().createDialog(dd).setVisible(true); } private int getColumnWidth() { int width = new JLabel(Bundle.LBL_LastCalibrated() + "XXX") // NOI18N .getPreferredSize().width; width = Math.max(width, new JLabel(Bundle.LBL_NotCalibrated() + "XXX") // NOI18N .getPreferredSize().width); width = Math.max(width, new JLabel(DateFormat.getDateInstance() .format(new Date()) + "XXX").getPreferredSize().width); // NOI18N width = Math.max(width, new JLabel(DateFormat.getTimeInstance() .format(new Date()) + "XXX").getPreferredSize().width); // NOI18N return width; } private void refreshTimes(final TableModel model) { for (int i = 0; i < model.getRowCount(); i++) { JavaPlatform platform = (JavaPlatform)model.getValueAt(i, 0); boolean remote = isRemotePlatform(platform); String version = platform.getPlatformJDKVersion(); Long modified = null; if (remote) { try { File f = new File(CalibrationDataFileIO.getCalibrationDataFileName(version)+"."+platform.getProperties().get("platform.host")); //NOI18N if (f.isFile()) modified = Long.valueOf(f.lastModified()); } catch (Exception e) {} } else { try { File f = new File(CalibrationDataFileIO.getCalibrationDataFileName(version)); if (f.isFile()) modified = Long.valueOf(f.lastModified()); } catch (Exception e) {} } final int index = i; final Long _modified = modified; SwingUtilities.invokeLater(new Runnable() { public void run() { model.setValueAt(_modified, index, 1); } }); } } private void refreshModel(final ProfilerTable table) { Object selected = null; Set original = new HashSet(); int selrow = table.getSelectedRow(); int column = table.convertColumnIndexToView(0); for (int row = 0; row < table.getRowCount(); row++) { Object value = table.getValueAt(row, column); original.add(value); if (row == selrow) selected = value; } final DefaultTableModel model = (DefaultTableModel)table.getModel(); Vector data = model.getDataVector(); data.clear(); for (JavaPlatform platform : JavaPlatform.getPlatforms()) { data.add(new Vector(Arrays.asList(platform, null))); if (!original.contains(platform)) selected = platform; } table.clearSelection(); model.fireTableDataChanged(); if (selected != null) table.selectValue(selected, column, true); RequestProcessor.getDefault().post(new Runnable() { public void run() { refreshTimes(model); } }); } private void calibrate(ProfilerTable table) { int row = table.getSelectedRow(); if (row == -1) return; int col = table.convertColumnIndexToView(0); final JavaPlatform platform = (JavaPlatform)table.getValueAt(row, col); final DefaultTableModel model = (DefaultTableModel)table.getModel(); SwingUtilities.invokeLater(new Runnable() { public void run() { calibrate(platform, model); } }); } private static boolean isRemotePlatform(final JavaPlatform platform) { JavaPlatformManagerProvider impl = Lookup.getDefault().lookup(JavaPlatformManagerProvider.class); if (impl == null) { ProfilerUtils.getProfilerErrorManager().log(Level.WARNING.intValue(), "No instance of JavaPlatformManagerProvider found in Lookup"); //NOI18N return false; } for (JavaPlatformProvider jpp : impl.getPlatforms()) { if ( (platform.getPlatformId() != null) && (platform.getPlatformId().equals(jpp.getPlatformId())) && (platform.getProperties().containsKey("platform.host")) ) {//NOI18N return true; } } return false; } private void calibrate(final JavaPlatform platform, final TableModel model) { final int state = Profiler.getDefault().getProfilingState(); final int mode = Profiler.getDefault().getProfilingMode(); boolean terminate = false; boolean detach = false; if ((state == Profiler.PROFILING_PAUSED) || (state == Profiler.PROFILING_RUNNING)) { if (mode == Profiler.MODE_PROFILE) { if (!ProfilerDialogs.displayConfirmation( Bundle.MSG_CalibrationOnProfile(), Bundle.CAPTION_Question())) { return; } terminate = true; } else { if (!ProfilerDialogs.displayConfirmation( Bundle.MSG_CalibrationOnAttach(), Bundle.CAPTION_Question())) { return; } detach = true; } } final boolean doDetach = detach; final boolean doStop = terminate; ProfilerUtils.getProfilerRequestProcessor().post(new Runnable() { public void run() { if (doDetach) { Profiler.getDefault().detachFromApp(); } else if (doStop) { Profiler.getDefault().stopApp(); } boolean calibrated = Profiler.getDefault().runCalibration(false, platform.getPlatformJavaFile(), platform.getPlatformJDKVersion(), platform.getPlatformArchitecture()); refreshTimes(model); if (!calibrated) ProfilerDialogs.displayError(Bundle.MSG_CalibrationFailed()); } }, 0, Thread.MAX_PRIORITY); } private static class CalibrationDateCellRenderer extends DefaultTableCellRenderer { private static Calendar CALENDAR; private static int REF_DAY_OF_YEAR = -1; private static int DAY_OF_YEAR = -1; private static int YEAR = -1; private static int ERA = -1; private static Date DATE; private static DateFormat FORMAT_TIME; private static DateFormat FORMAT_DATE; CalibrationDateCellRenderer() { setHorizontalAlignment(TRAILING); } protected void setValue(Object value) { if (value == null) { setText(Bundle.LBL_NotCalibrated()); } else { long time = ((Long)value).longValue(); setValue(time, isToday(time)); } } private void setValue(long time, boolean today) { DateFormat format; if (today) { if (FORMAT_TIME == null) FORMAT_TIME = DateFormat.getTimeInstance(); format = FORMAT_TIME; } else { if (FORMAT_DATE == null) FORMAT_DATE = DateFormat.getDateInstance(); format = FORMAT_DATE; } if (DATE == null) DATE = new Date(); DATE.setTime(time); setText(format.format(DATE)); } private static boolean isToday(long time) { if (REF_DAY_OF_YEAR != -1 && CALENDAR.get(Calendar.DAY_OF_YEAR) != REF_DAY_OF_YEAR) CALENDAR = null; if (CALENDAR == null) initializeCalendar(); CALENDAR.setTimeInMillis(time); return DAY_OF_YEAR == CALENDAR.get(Calendar.DAY_OF_YEAR) && YEAR == CALENDAR.get(Calendar.YEAR) && ERA == CALENDAR.get(Calendar.ERA); } private static void initializeCalendar() { CALENDAR = Calendar.getInstance(); DAY_OF_YEAR = CALENDAR.get(Calendar.DAY_OF_YEAR); YEAR = CALENDAR.get(Calendar.YEAR); ERA = CALENDAR.get(Calendar.ERA); if (REF_DAY_OF_YEAR == -1) REF_DAY_OF_YEAR = DAY_OF_YEAR; } } }
7,111
3,693
<gh_stars>1000+ import numpy as np import matplotlib.pyplot as plt import os import sys from collections import defaultdict sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../../Search_based_Planning/") from Search_3D.env3D import env from Search_3D import Astar3D from Search_3D.utils3D import StateSpace, getDist, getNearest, getRay, isinbound, isinball, isCollide, children, cost, \ initcost from Search_3D.plot_util3D import visualization class D_star(object): def __init__(self, resolution=1): self.Alldirec = {(1, 0, 0): 1, (0, 1, 0): 1, (0, 0, 1): 1, \ (-1, 0, 0): 1, (0, -1, 0): 1, (0, 0, -1): 1, \ (1, 1, 0): np.sqrt(2), (1, 0, 1): np.sqrt(2), (0, 1, 1): np.sqrt(2), \ (-1, -1, 0): np.sqrt(2), (-1, 0, -1): np.sqrt(2), (0, -1, -1): np.sqrt(2), \ (1, -1, 0): np.sqrt(2), (-1, 1, 0): np.sqrt(2), (1, 0, -1): np.sqrt(2), \ (-1, 0, 1): np.sqrt(2), (0, 1, -1): np.sqrt(2), (0, -1, 1): np.sqrt(2), \ (1, 1, 1): np.sqrt(3), (-1, -1, -1) : np.sqrt(3), \ (1, -1, -1): np.sqrt(3), (-1, 1, -1): np.sqrt(3), (-1, -1, 1): np.sqrt(3), \ (1, 1, -1): np.sqrt(3), (1, -1, 1): np.sqrt(3), (-1, 1, 1): np.sqrt(3)} self.settings = 'CollisionChecking' self.env = env(resolution=resolution) self.X = StateSpace(self.env) self.x0, self.xt = getNearest(self.X, self.env.start), getNearest(self.X, self.env.goal) # self.x0, self.xt = tuple(self.env.start), tuple(self.env.goal) self.b = defaultdict(lambda: defaultdict(dict)) # back pointers every state has one except xt. self.OPEN = {} # OPEN list, here use a hashmap implementation. hash is point, key is value self.h = {} # estimate from a point to the end point self.tag = {} # set all states to new self.V = set() # vertice in closed # for visualization self.ind = 0 self.Path = [] self.done = False self.Obstaclemap = {} def checkState(self, y): if y not in self.h: self.h[y] = 0 if y not in self.tag: self.tag[y] = 'New' def get_kmin(self): # get the minimum of the k val in OPEN # -1 if it does not exist if self.OPEN: return min(self.OPEN.values()) return -1 def min_state(self): # returns the state in OPEN with min k(.) # if empty, returns None and -1 # it also removes this min value form the OPEN set. if self.OPEN: minvalue = min(self.OPEN.values()) for k in self.OPEN.keys(): if self.OPEN[k] == minvalue: return k, self.OPEN.pop(k) return None, -1 def insert(self, x, h_new): # inserting a key and value into OPEN list (s, kx) # depending on following situations if self.tag[x] == 'New': kx = h_new if self.tag[x] == 'Open': kx = min(self.OPEN[x], h_new) if self.tag[x] == 'Closed': kx = min(self.h[x], h_new) self.OPEN[x] = kx self.h[x], self.tag[x] = h_new, 'Open' def process_state(self): # main function of the D star algorithm, perform the process state # around the old path when needed. x, kold = self.min_state() self.tag[x] = 'Closed' self.V.add(x) if x is None: return -1 # check if 1st timer s self.checkState(x) if kold < self.h[x]: # raised states for y in children(self, x): # check y self.checkState(y) a = self.h[y] + cost(self, y, x) if self.h[y] <= kold and self.h[x] > a: self.b[x], self.h[x] = y, a if kold == self.h[x]: # lower for y in children(self, x): # check y self.checkState(y) bb = self.h[x] + cost(self, x, y) if self.tag[y] == 'New' or \ (self.b[y] == x and self.h[y] != bb) or \ (self.b[y] != x and self.h[y] > bb): self.b[y] = x self.insert(y, bb) else: for y in children(self, x): # check y self.checkState(y) bb = self.h[x] + cost(self, x, y) if self.tag[y] == 'New' or \ (self.b[y] == x and self.h[y] != bb): self.b[y] = x self.insert(y, bb) else: if self.b[y] != x and self.h[y] > bb: self.insert(x, self.h[x]) else: if self.b[y] != x and self.h[y] > bb and \ self.tag[y] == 'Closed' and self.h[y] == kold: self.insert(y, self.h[y]) return self.get_kmin() def modify_cost(self, x): xparent = self.b[x] if self.tag[x] == 'Closed': self.insert(x, self.h[xparent] + cost(self, x, xparent)) def modify(self, x): self.modify_cost(x) while True: kmin = self.process_state() # visualization(self) if kmin >= self.h[x]: break def path(self, goal=None): path = [] if not goal: x = self.x0 else: x = goal start = self.xt while x != start: path.append([np.array(x), np.array(self.b[x])]) x = self.b[x] return path def run(self): # put G (ending state) into the OPEN list self.OPEN[self.xt] = 0 self.tag[self.x0] = 'New' # first run while True: # TODO: self.x0 = self.process_state() # visualization(self) if self.tag[self.x0] == "Closed": break self.ind += 1 self.Path = self.path() self.done = True visualization(self) plt.pause(0.2) # plt.show() # when the environemnt changes over time for i in range(5): self.env.move_block(a=[0, -0.50, 0], s=0.5, block_to_move=1, mode='translation') self.env.move_block(a=[-0.25, 0, 0], s=0.5, block_to_move=0, mode='translation') # travel from end to start s = tuple(self.env.start) # self.V = set() while s != self.xt: if s == tuple(self.env.start): sparent = self.b[self.x0] else: sparent = self.b[s] # if there is a change of Cost, or a collision. if cost(self, s, sparent) == np.inf: self.modify(s) continue self.ind += 1 s = sparent self.Path = self.path() visualization(self) plt.show() if __name__ == '__main__': D = D_star(1) D.run()
4,038
15,689
<reponame>YuanYuanDog/YYKit // // YYTextAttachmentExample.h // YYKitExample // // Created by ibireme on 15/8/21. // Copyright (c) 2015 ibireme. All rights reserved. // #import <UIKit/UIKit.h> @interface YYTextAttachmentExample : UIViewController @end
101
631
<filename>metron-platform/metron-management/src/test/java/org/apache/metron/management/utils/FileUtils.java<gh_stars>100-1000 /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.metron.management.utils; import com.google.common.base.Joiner; import com.google.common.io.Files; import java.io.File; import java.io.IOException; import java.nio.charset.Charset; public class FileUtils { public static String slurp(String loc) { try { return Joiner.on("\n").join(Files.readLines( new File(loc), Charset.defaultCharset())).trim(); } catch (IOException e) { throw new IllegalStateException(e); } } }
414
777
<reponame>google-ar/chromium // Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/ui/webui/options/automatic_settings_reset_handler.h" #include "base/bind.h" #include "base/bind_helpers.h" #include "base/macros.h" #include "base/time/time.h" #include "chrome/browser/prefs/chrome_pref_service_factory.h" #include "chrome/browser/profiles/profile.h" #include "chrome/common/url_constants.h" #include "chrome/grit/generated_resources.h" #include "components/strings/grit/components_strings.h" #include "content/public/browser/web_ui.h" namespace { void OnDismissedAutomaticSettingsResetBanner(Profile* profile, const base::ListValue* value) { chrome_prefs::ClearResetTime(profile); } } // namespace namespace options { AutomaticSettingsResetHandler::AutomaticSettingsResetHandler() {} AutomaticSettingsResetHandler::~AutomaticSettingsResetHandler() {} void AutomaticSettingsResetHandler::InitializePage() { static const int kBannerShowTimeInDays = 5; const base::Time then = chrome_prefs::GetResetTime(Profile::FromWebUI(web_ui())); if (!then.is_null()) { const base::Time now = base::Time::Now(); if ((now - then).InDays() < kBannerShowTimeInDays) web_ui()->CallJavascriptFunctionUnsafe( "AutomaticSettingsResetBanner.show"); } } void AutomaticSettingsResetHandler::GetLocalizedValues( base::DictionaryValue* localized_strings) { DCHECK(localized_strings); static const OptionsStringResource resources[] = { { "automaticSettingsResetBannerResetButtonText", IDS_AUTOMATIC_SETTINGS_RESET_BANNER_RESET_BUTTON_TEXT }, { "automaticSettingsResetBannerText", IDS_AUTOMATIC_SETTINGS_RESET_BANNER_TEXT }, { "automaticSettingsResetLearnMoreUrl", IDS_LEARN_MORE }, }; RegisterStrings(localized_strings, resources, arraysize(resources)); localized_strings->SetString( "automaticSettingsResetLearnMoreUrl", chrome::kAutomaticSettingsResetLearnMoreURL); } void AutomaticSettingsResetHandler::RegisterMessages() { web_ui()->RegisterMessageCallback( "onDismissedAutomaticSettingsResetBanner", base::Bind(&OnDismissedAutomaticSettingsResetBanner, Profile::FromWebUI(web_ui()))); } } // namespace options
880
7,131
{ "uri": "mongodb+srv://test14.test.build.10gen.cc/", "seeds": [], "hosts": [], "error": true, "comment": "Should fail because returned host name's part \"not-test\" mismatches URI parent part \"test\"." }
81
756
// Copyright (C) 2020 <NAME>, <NAME> // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation // the rights to use, copy, modify, merge, publish, distribute, sublicense, // and/or sell copies of the Software, and to permit persons to whom the // Software is furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. #include <math.h> #include <vector> #include <algorithm> #include "image_labelers/diff_helpers/angle_diff.h" namespace depth_clustering { AngleDiff::AngleDiff(const cv::Mat* source_image, const ProjectionParams* params) : AbstractDiff{source_image}, _params{params} { PreComputeAlphaVecs(); } float AngleDiff::DiffAt(const PixelCoord& from, const PixelCoord& to) const { const float& current_depth = _source_image->at<float>(from.row, from.col); const float& neighbor_depth = _source_image->at<float>(to.row, to.col); float alpha = ComputeAlpha(from, to); if (alpha > _params->h_span().val() - 0.05) { // we are over the border const float span = _params->h_span().val(); if (alpha > span) { alpha -= span; } else { alpha = span - alpha; } } float d1 = std::max(current_depth, neighbor_depth); float d2 = std::min(current_depth, neighbor_depth); float beta = std::atan2((d2 * sin(alpha)), (d1 - d2 * cos(alpha))); return fabs(beta); } void AngleDiff::PreComputeAlphaVecs() { _row_alphas.reserve(_params->rows()); for (size_t r = 0; r < _params->rows() - 1; ++r) { _row_alphas.push_back( fabs((_params->AngleFromRow(r + 1) - _params->AngleFromRow(r)).val())); } // add last row alpha _row_alphas.push_back(0.0f); // now handle the cols _col_alphas.reserve(_params->cols()); for (size_t c = 0; c < _params->cols() - 1; ++c) { _col_alphas.push_back( fabs((_params->AngleFromCol(c + 1) - _params->AngleFromCol(c)).val())); } // handle last angle where we wrap columns float last_alpha = fabs((_params->AngleFromCol(0) - _params->AngleFromCol(_params->cols() - 1)).val()); last_alpha -= _params->h_span().val(); _col_alphas.push_back(last_alpha); } float AngleDiff::ComputeAlpha(const PixelCoord& current, const PixelCoord& neighbor) const { if ((current.col == 0 && neighbor.col == static_cast<int>(_params->cols() - 1)) || (neighbor.col == 0 && current.col == static_cast<int>(_params->cols() - 1))) { // this means we wrap around return _col_alphas.back(); } if (current.row < neighbor.row) { return _row_alphas[current.row]; } else if (current.row > neighbor.row) { return _row_alphas[neighbor.row]; } else if (current.col < neighbor.col) { return _col_alphas[current.col]; } else if (current.col > neighbor.col) { return _col_alphas[neighbor.col]; } return 0; } AngleDiffPrecomputed::AngleDiffPrecomputed(const cv::Mat* source_image, const ProjectionParams* params) : AbstractDiff{source_image}, _params{params} { PreComputeAlphaVecs(); PreComputeBetaAngles(); } float AngleDiffPrecomputed::DiffAt(const PixelCoord& from, const PixelCoord& to) const { int16_t row = 0; int16_t col = 0; /// If one of rows is biggest possible - use it. Otherwise use /// `std::min(from.row, to.row)`. int16_t last_row = static_cast<int16_t>(_params->rows()) - 1; bool row_crosses_border = (from.row == last_row && to.row == 0) || (from.row == 0 && to.row == last_row); if (row_crosses_border) { row = last_row; } else { row = std::min(from.row, to.row); } /// If one of cols is biggest possible - use it. Otherwise use /// `std::min(from.col, to.col)`. int16_t last_col = static_cast<int16_t>(_params->cols()) - 1; bool col_crosses_border = (from.col == last_col && to.col == 0) || (from.col == 0 && to.col == last_col); if (col_crosses_border) { col = last_col; } else { col = std::min(from.col, to.col); } // If row changed -> pick row-wise beta matrix if (from.row != to.row) { return _beta_rows.at<float>(row, col); } // If col changed -> pick col-wise beta matrix if (from.col != to.col) { return _beta_cols.at<float>(row, col); } throw std::runtime_error("Asking for difference of same pixels."); } cv::Mat AngleDiffPrecomputed::Visualize() const { cv::Mat colors = cv::Mat::zeros(_beta_rows.rows, _beta_rows.cols, CV_8UC3); float max_angle_deg = 90.0f; for (int r = 0; r < _beta_rows.rows; ++r) { for (int c = 0; c < _beta_rows.cols; ++c) { if (_source_image->at<float>(r, c) < 0.01f) { continue; } auto row_angle = Radians::FromRadians(_beta_rows.at<float>(r, c)); auto col_angle = Radians::FromRadians(_beta_cols.at<float>(r, c)); uint8_t row_color = 255 * (row_angle.ToDegrees() / max_angle_deg); uint8_t col_color = 255 * (col_angle.ToDegrees() / max_angle_deg); cv::Vec3b color(255 - row_color, 255 - col_color, 0); colors.at<cv::Vec3b>(r, c) = color; } } return colors; } void AngleDiffPrecomputed::PreComputeAlphaVecs() { _row_alphas.reserve(_params->rows()); for (size_t r = 0; r < _params->rows() - 1; ++r) { _row_alphas.push_back( fabs((_params->AngleFromRow(r + 1) - _params->AngleFromRow(r)).val())); } // add last row alpha _row_alphas.push_back(0.0f); // now handle the cols _col_alphas.reserve(_params->cols()); for (size_t c = 0; c < _params->cols() - 1; ++c) { _col_alphas.push_back( fabs((_params->AngleFromCol(c + 1) - _params->AngleFromCol(c)).val())); } // handle last angle where we wrap columns float last_alpha = fabs((_params->AngleFromCol(0) - _params->AngleFromCol(_params->cols() - 1)).val()); last_alpha -= _params->h_span().val(); _col_alphas.push_back(fabs(last_alpha)); } void AngleDiffPrecomputed::PreComputeBetaAngles() { _beta_rows = cv::Mat::zeros(_params->rows(), _params->cols(), CV_32F); _beta_cols = cv::Mat::zeros(_params->rows(), _params->cols(), CV_32F); for (size_t r = 0; r < _params->rows(); ++r) { float angle_rows = _row_alphas[r]; for (size_t c = 0; c < _params->cols(); ++c) { if (_source_image->at<float>(r, c) < 0.001) { continue; } float angle_cols = _col_alphas[c]; float curr = _source_image->at<float>(r, c); // Compute beta in horizontal (col) direction. Note wrapping around. size_t next_c = (c + 1) % _params->cols(); _beta_cols.at<float>(r, c) = GetBeta(angle_cols, curr, _source_image->at<float>(r, next_c)); // Compute beta in vertical (row) direction // Protect the last row. There are 0s stores there. size_t next_r = r + 1; if (next_r >= _params->rows()) { continue; } _beta_rows.at<float>(r, c) = GetBeta(angle_rows, curr, _source_image->at<float>(next_r, c)); } } } float AngleDiffPrecomputed::GetBeta(float alpha, float current_depth, float neighbor_depth) const { float d1 = std::max(current_depth, neighbor_depth); float d2 = std::min(current_depth, neighbor_depth); float beta = std::atan2((d2 * sin(alpha)), (d1 - d2 * cos(alpha))); return fabs(beta); } } // namespace depth_clustering
3,323
3,428
{"id":"01226","group":"spam-2","checksum":{"type":"MD5","value":"4aaf4e328bd55191a1c46bc374069048"},"text":"From <EMAIL> Thu Aug 1 17:04:08 2002\nReturn-Path: <<EMAIL>>\nDelivered-To: y<EMAIL>.com\nReceived: from localhost (localhost [127.0.0.1])\n\tby phobos.labs.netnoteinc.com (Postfix) with ESMTP id B92DF440F1\n\tfor <jm@localhost>; Thu, 1 Aug 2002 12:03:59 -0400 (EDT)\nReceived: from phobos [127.0.0.1]\n\tby localhost with IMAP (fetchmail-5.9.0)\n\tfor jm@localhost (single-drop); Thu, 01 Aug 2002 17:03:59 +0100 (IST)\nReceived: from 192.168.127.12 ([218.31.42.88]) by dogma.slashnull.org\n (8.11.6/8.11.6) with SMTP id g71Fwq219842 for <<EMAIL>>;\n Thu, 1 Aug 2002 16:58:53 +0100\nMessage-Id: <<EMAIL>>\nReceived: from [192.168.3.11] by rly-xl04.mx.aol.com with smtp;\n Aug, 01 2002 16:28:53 +0300\nReceived: from unknown (HELO mail.gmx.net) (172.16.58.3)by\n rly-xl04.mx.aol.com with local; Aug, 01 2002 15:56:37 -0100\nReceived: from rly-yk04.mx.aol.com ([30.241.135.107]) by q4.quik.com with\n SMTP; Aug, 01 2002 14:43:36 +0400\nFrom: Bulk Email Lists <<EMAIL>>\nTo: <EMAIL>\nCc: \nSubject: $16.99 per 500,000 verified email addresses\nSender: Bulk Email Lists <<EMAIL>>\nMIME-Version: 1.0\nContent-Type: text/plain; charset=\"iso-8859-1\"\nDate: Thu, 1 Aug 2002 16:57:57 +0100\nX-Mailer: QUALCOMM Windows Eudora Version 5.1\n\nWe are offering you quality marketing lists which have been verified giving you a 90% delivery rate for your marketing campaign.\n\nThe lists are downloadable along with free sending software. We are currently adding 500,000 new addresses for download each week.\n\nPlease Call [USA] 303 889 5732 (24hr Recorded Information)\n\n\n"}
679
331
<filename>src/main/java/org/yx/validate/ComplexParamValidator.java /** * Copyright (C) 2016 - 2030 youtongluan. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.yx.validate; import java.lang.reflect.Array; import java.util.List; import org.yx.annotation.Bean; import org.yx.exception.InvalidParamException; import org.yx.exception.SimpleSumkException; import org.yx.log.Logs; @Bean public class ComplexParamValidator implements Validator { @Override public void valid(final ParameterInfo info, Object arg) throws InvalidParamException { if (info == null || arg == null) { return; } Class<?> clz = arg.getClass(); if (clz.isArray()) { int length = Array.getLength(arg); if (length == 0) { return; } for (int i = 0; i < length; i++) { Validators.check(info, Array.get(arg, i)); } return; } if (!info.isComplex()) { return; } this.checkFields(FieldParameterHolder.get(clz), arg); } protected void checkFields(List<FieldParameterInfo> infos, Object obj) throws InvalidParamException { if (infos == null) { return; } try { for (FieldParameterInfo info : infos) { Validators.check(info, info.field.get(obj)); } } catch (Exception e) { if (e instanceof InvalidParamException) { throw (InvalidParamException) e; } Logs.system().warn("参数校验发生异常," + e.getLocalizedMessage(), e); throw new SimpleSumkException(5346614, "校验时发生异常,异常信息为" + e.getLocalizedMessage()); } } @Override public int order() { return 10000; } }
747
977
<filename>src/main/java/io/leangen/graphql/execution/DerivedTypeRegistry.java package io.leangen.graphql.execution; import io.leangen.graphql.generator.mapping.DelegatingOutputConverter; import io.leangen.graphql.metadata.TypedElement; import java.lang.reflect.AnnotatedElement; import java.lang.reflect.AnnotatedType; import java.util.ArrayList; import java.util.Collections; import java.util.IdentityHashMap; import java.util.List; import java.util.Map; @SuppressWarnings("rawtypes") class DerivedTypeRegistry { private final Map<AnnotatedType, List<AnnotatedType>> derivedTypes; DerivedTypeRegistry(List<TypedElement> elements, List<DelegatingOutputConverter> derivers) { this.derivedTypes = new IdentityHashMap<>(); elements.forEach(element -> derive(element, element.getJavaType(), derivers)); } private void registerDerivatives(AnnotatedElement element, AnnotatedType type, List<AnnotatedType> derivedTypes, List<DelegatingOutputConverter> derivers) { derivedTypes.forEach(derived -> { this.derivedTypes.computeIfAbsent(type, k -> new ArrayList<>()); this.derivedTypes.get(type).add(derived); derive(element, derived, derivers); }); } @SuppressWarnings("unchecked") private void derive(AnnotatedElement element, AnnotatedType type, List<DelegatingOutputConverter> derivers) { derivers.stream() .filter(deriver -> deriver.supports(element, type)) .findFirst() .ifPresent(deriver -> registerDerivatives(element, type, deriver.getDerivedTypes(type), derivers)); } List<AnnotatedType> getDerived(AnnotatedType type) { return derivedTypes.getOrDefault(type, Collections.emptyList()); } }
658
782
/* * Copyright (c) 2020, <NAME>. All Rights Reserved. * * This file is part of BoofCV (http://boofcv.org). * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package boofcv.alg.feature.detect.edge; import boofcv.alg.misc.ImageMiscOps; import boofcv.concurrency.BoofConcurrency; import boofcv.struct.image.GrayF32; import boofcv.struct.image.GrayS8; import org.openjdk.jmh.annotations.*; import org.openjdk.jmh.runner.Runner; import org.openjdk.jmh.runner.RunnerException; import org.openjdk.jmh.runner.options.Options; import org.openjdk.jmh.runner.options.OptionsBuilder; import org.openjdk.jmh.runner.options.TimeValue; import java.util.Random; import java.util.concurrent.TimeUnit; @BenchmarkMode(Mode.AverageTime) @OutputTimeUnit(TimeUnit.MILLISECONDS) @Warmup(iterations = 2) @Measurement(iterations = 5) @State(Scope.Benchmark) @Fork(value = 1) public class BenchmarkGradientToEdge { @Param({"true", "false"}) boolean concurrent; final int imageSize = 800; GrayF32 derivX_F32 = new GrayF32(imageSize, imageSize); GrayF32 derivY_F32 = new GrayF32(imageSize, imageSize); GrayF32 intensity_F32 = new GrayF32(imageSize, imageSize); GrayF32 orientation_F32 = new GrayF32(imageSize, imageSize); GrayS8 direction = new GrayS8(imageSize, imageSize); @Setup public void setup() { BoofConcurrency.USE_CONCURRENT = concurrent; var rand = new Random(234234); ImageMiscOps.fillUniform(derivX_F32, rand, 0, 255); ImageMiscOps.fillUniform(derivY_F32, rand, 0, 255); ImageMiscOps.fillUniform(orientation_F32, rand, (float)(-Math.PI/2.0), (float)(Math.PI/2.0)); } // @formatter:off @Benchmark public void Euclidian_F32() {GradientToEdgeFeatures.intensityE(derivX_F32,derivY_F32,intensity_F32);} @Benchmark public void Abs_F32() {GradientToEdgeFeatures.intensityAbs(derivX_F32,derivY_F32,intensity_F32);} @Benchmark public void Orientation_F32() {GradientToEdgeFeatures.direction(derivX_F32,derivY_F32,intensity_F32);} @Benchmark public void Orientation2_F32() {GradientToEdgeFeatures.direction2(derivX_F32,derivY_F32,intensity_F32);} @Benchmark public void Discretize4() {GradientToEdgeFeatures.discretizeDirection4(intensity_F32,direction);} @Benchmark public void Discretize8() {GradientToEdgeFeatures.discretizeDirection8(intensity_F32,direction);} // @formatter:on public static void main( String[] args ) throws RunnerException { Options opt = new OptionsBuilder() .include(BenchmarkGradientToEdge.class.getSimpleName()) .warmupTime(TimeValue.seconds(1)) .measurementTime(TimeValue.seconds(1)) .build(); new Runner(opt).run(); } }
1,080
10,225
package io.quarkus.qute.deployment; import io.quarkus.builder.item.MultiBuildItem; import io.quarkus.qute.TemplateNode.Origin; public final class IncorrectExpressionBuildItem extends MultiBuildItem { public final String expression; public final String property; public final String clazz; public final Origin origin; public final String reason; public IncorrectExpressionBuildItem(String expression, String property, String clazz, Origin origin) { this(expression, property, clazz, origin, null); } public IncorrectExpressionBuildItem(String expression, String reason, Origin origin) { this(expression, null, null, origin, reason); } public IncorrectExpressionBuildItem(String expression, String property, String clazz, Origin origin, String reason) { this.expression = expression; this.property = property; this.clazz = clazz; this.origin = origin; this.reason = reason; } }
330
1,936
/* * Copyright (C) 2014 <NAME>, ASL, ETH Zurich, Switzerland * You can contact the author at <slynen at ethz dot ch> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef MAPLAB_COMMON_TESTING_PREDICATES_H_ #define MAPLAB_COMMON_TESTING_PREDICATES_H_ #include <cmath> #include <string> #include <glog/logging.h> #include <gtest/gtest.h> namespace common { template <typename LeftMat, typename RightMat> ::testing::AssertionResult MatricesEqual( const LeftMat& A, const RightMat& B, double threshold) { if (A.rows() != B.rows() || A.cols() != B.cols()) { return ::testing::AssertionFailure() << "Matrix size mismatch: " << A.rows() << "x" << A.cols() << " != " << B.rows() << "x" << B.cols(); } bool success = true; std::string message; for (int i = 0; i < A.rows(); ++i) { for (int j = 0; j < A.cols(); ++j) { double Aij = A(i, j); double Bij = B(i, j); if (std::abs(Aij - Bij) > threshold) { success = false; message += "\n Mismatch at [" + std::to_string(i) + "," + std::to_string(j) + "] : " + std::to_string(Aij) + " != " + std::to_string(Bij); } } } return success ? ::testing::AssertionSuccess() : ::testing::AssertionFailure() << message << std::endl; } } // namespace common #define __INTERNAL_GTEST_NEAR_EIGEN(PREDICATE, matrix_A, matrix_B, precision) \ PREDICATE##_TRUE( \ ((matrix_A) - (matrix_B)).cwiseAbs().maxCoeff() <= precision) \ << "For matrices '" << #matrix_A << "' and '" << #matrix_B << "'." \ << std::endl \ << "Where '" << #matrix_A << "' equals: " << std::endl \ << (matrix_A) << std::endl \ << "and '" << #matrix_B << "' equals: " << std::endl \ << (matrix_B) << std::endl \ << "and precision equals: " << precision; #define __INTERNAL_GTEST_ZERO_EIGEN(PREDICATE, matrix_A, precision) \ PREDICATE##_TRUE((matrix_A).isZero(precision)) \ << "For matrix '" << #matrix_A << "'." << std::endl \ << "Where '" << #matrix_A << "' equals: " << std::endl \ << (matrix_A) << std::endl \ << "and precision equals: " << precision; #define __INTERNAL_GTEST_NEAR_KINDR_QUATERNION( \ PREDICATE, quat_A, quat_B, precision) \ PREDICATE##_TRUE((quat_A).getDisparityAngle(quat_B) <= precision) \ << "For quaternions '" << #quat_A << "' and '" << #quat_B << "'." \ << std::endl \ << "Where '" << #quat_A << "' equals: " << (quat_A).getUnique() \ << std::endl \ << "and '" << #quat_B << "' equals: " << (quat_B).getUnique() \ << std::endl \ << "the disparity angle equals: " << (quat_A).getDisparityAngle(quat_B) \ << " rad" << std::endl \ << "and precision equals: " << precision << " rad"; #define __INTERNAL_GTEST_NEAR_EIGEN_QUATERNION( \ PREDICATE, quat_A, quat_B, precision) \ PREDICATE##_TRUE((quat_A).isApprox((quat_B), precision)) \ << "For quaternions '" << #quat_A << "' and '" << #quat_B << "'." \ << std::endl \ << "Where '" << #quat_A << "' equals: " << (quat_A).coeffs() \ << std::endl \ << "and '" << #quat_B << "' equals: " << (quat_B).coeffs() << std::endl \ << "and precision equals: " << precision << " rad"; #define EXPECT_NEAR_EIGEN(matrix_A, matrix_B, precision) \ __INTERNAL_GTEST_NEAR_EIGEN(EXPECT, matrix_A, matrix_B, precision) #define ASSERT_NEAR_EIGEN(matrix_A, matrix_B, precision) \ __INTERNAL_GTEST_NEAR_EIGEN(ASSERT, matrix_A, matrix_B, precision) #define EXPECT_ZERO_EIGEN(matrix_A, precision) \ __INTERNAL_GTEST_ZERO_EIGEN(EXPECT, matrix_A, precision) #define ASSERT_ZERO_EIGEN(matrix_A, precision) \ __INTERNAL_GTEST_ZERO_EIGEN(ASSERT, matrix_A, precision) #define EXPECT_NEAR_EIGEN_QUATERNION(quat_A, quat_B, precision) \ __INTERNAL_GTEST_NEAR_EIGEN_QUATERNION(EXPECT, quat_A, quat_B, precision) #define ASSERT_NEAR_EIGEN_QUATERNION(quat_A, quat_B, precision) \ __INTERNAL_GTEST_NEAR_EIGEN_QUATERNION(ASSERT, quat_A, quat_B, precision) #define EXPECT_NEAR_KINDR_QUATERNION(quat_A, quat_B, precision) \ __INTERNAL_GTEST_NEAR_KINDR_QUATERNION(EXPECT, quat_A, quat_B, precision) #define ASSERT_NEAR_KINDR_QUATERNION(quat_A, quat_B, precision) \ __INTERNAL_GTEST_NEAR_KINDR_QUATERNION(ASSERT, quat_A, quat_B, precision) #define EXPECT_NEAR_ASLAM_TRANSFORMATION(T_A, T_B, precision) \ __INTERNAL_GTEST_NEAR_EIGEN( \ EXPECT, (T_A).getTransformationMatrix(), \ (T_B).getTransformationMatrix(), precision) #define ASSERT_NEAR_ASLAM_TRANSFORMATION(T_A, T_B, precision) \ __INTERNAL_GTEST_NEAR_EIGEN( \ ASSERT, (T_A).getTransformationMatrix(), \ (T_B).getTransformationMatrix(), precision) #endif // MAPLAB_COMMON_TESTING_PREDICATES_H_
3,285
1,056
package test10; import test9.CCTest9a; public class CCTest10bii { public static void main(String[] args) { CCTest9a e; //Check that CCTest9a is in the CC } }
96