max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
624
package com.pinterest.doctork.errors; import java.lang.Exception; public class ClusterInfoError extends Exception { String[] errors; public ClusterInfoError() { this.errors = new String[1]; this.errors[0] = "Unknown error"; } public ClusterInfoError(String... errors) { this.errors = new String[errors.length]; int i = 0; for(String error : errors ) { this.errors[i++] = error; } } }
185
416
// // MLModelMetadataKeys.h // CoreML // // Copyright © 2017 Apple Inc. All rights reserved. // #import <Foundation/Foundation.h> /*! * Keys to a dictionary that holds useful information about a model. * All are optional with the aim of being helpful to a developer or user * for descriptive purposes. */ typedef NSString * MLModelMetadataKey NS_STRING_ENUM; /// A short description of what the model does and/or its purpose FOUNDATION_EXPORT MLModelMetadataKey const MLModelDescriptionKey API_AVAILABLE(macos(10.13), ios(11.0), watchos(4.0), tvos(11.0)); /// A version number encoded as a string FOUNDATION_EXPORT MLModelMetadataKey const MLModelVersionStringKey API_AVAILABLE(macos(10.13), ios(11.0), watchos(4.0), tvos(11.0)); /// The author of this model FOUNDATION_EXPORT MLModelMetadataKey const MLModelAuthorKey API_AVAILABLE(macos(10.13), ios(11.0), watchos(4.0), tvos(11.0)); /// License information for the model FOUNDATION_EXPORT MLModelMetadataKey const MLModelLicenseKey API_AVAILABLE(macos(10.13), ios(11.0), watchos(4.0), tvos(11.0)); /// Any additional pertinent information specified by the model creator FOUNDATION_EXPORT MLModelMetadataKey const MLModelCreatorDefinedKey API_AVAILABLE(macos(10.13), ios(11.0), watchos(4.0), tvos(11.0));
427
577
<filename>bugtests/test333s3.py import sys def myfunc(): print "myfunc" sys.exitfunc = myfunc raise "Exc"
46
11,868
<reponame>JigarJoshi/openapi-generator # coding: utf-8 """ OpenAPI Petstore This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501 The version of the OpenAPI document: 1.0.0 Generated by: https://openapi-generator.tech """ import sys import unittest import petstore_api from petstore_api.model.dog import Dog from petstore_api.model.legs import Legs class TestSetAttrForComposedSchema(unittest.TestCase): """TestSetAttrForComposedSchema unit test""" def setUp(self): pass def tearDown(self): pass def testSetAttrForComposedSchema(self): """Test SetAttrForComposedSchema""" try: dog_instance = Dog(class_name="Dog", color="Black") dog_instance.breed = "bulldog" dog_instance.legs = Legs(legs="4") except petstore_api.exceptions.ApiTypeError: self.assertTrue(False) if __name__ == '__main__': unittest.main()
417
435
{ "copyright_text": "Creative Commons Attribution license (reuse allowed)", "description": "<NAME>\nhttps://kiwi.pycon.org/schedule/presentation/116/\nToo often when developing Python web apps we first choose a framework and database, then code to their rules and restrictions. It's only later that we start thinking about our application's business logic. If we reverse the order and work on our logic first it can make code easier to test, and let us make more informed decisions of what frameworks and data-stores best suit our project.", "duration": 1489, "language": "eng", "recorded": "2016-09-11", "related_urls": [ "https://kiwi.pycon.org/schedule/presentation/116/" ], "speakers": [ "<NAME>" ], "tags": [], "thumbnail_url": "https://i.ytimg.com/vi/hXIAA8F8aPE/maxresdefault.jpg", "title": "Middle-Out Python Development", "videos": [ { "type": "youtube", "url": "https://www.youtube.com/watch?v=hXIAA8F8aPE" } ] }
327
435
{ "description": "Ever since a moth was discovered in the Mark II computer we've dealt with bugs in software. This talk will show different methods for determining what the code is doing. We'll start with the lowly print statement and introduce advanced forms of logging. We'll also cover several Python debuggers and talk about best practices for debugging and preventing bugs in Python code.", "language": "eng", "recorded": "2017-07-30", "slug": "introduction-to-debugging-with-python", "speakers": [ "<NAME>" ], "title": "Introduction to Debugging with Python", "videos": [ { "type": "archive.org", "url": "https://archive.org/details/pyohio_2017-Introduction_to_Debugging_with_Python" }, { "type": "youtube", "url": "https://youtu.be/BixeKmlKOJc" } ] }
264
14,668
<gh_stars>1000+ // Copyright 2019 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/signin/cookie_reminter_factory.h" #include "chrome/browser/profiles/profile.h" #include "chrome/browser/signin/identity_manager_factory.h" #include "components/keyed_service/content/browser_context_dependency_manager.h" #include "components/signin/core/browser/cookie_reminter.h" CookieReminterFactory::CookieReminterFactory() : BrowserContextKeyedServiceFactory( "CookieReminter", BrowserContextDependencyManager::GetInstance()) { DependsOn(IdentityManagerFactory::GetInstance()); } CookieReminterFactory::~CookieReminterFactory() {} // static CookieReminter* CookieReminterFactory::GetForProfile(Profile* profile) { return static_cast<CookieReminter*>( GetInstance()->GetServiceForBrowserContext(profile, true)); } // static CookieReminterFactory* CookieReminterFactory::GetInstance() { return base::Singleton<CookieReminterFactory>::get(); } KeyedService* CookieReminterFactory::BuildServiceInstanceFor( content::BrowserContext* context) const { Profile* profile = Profile::FromBrowserContext(context); signin::IdentityManager* identity_manager = IdentityManagerFactory::GetForProfile(profile); return new CookieReminter(identity_manager); }
435
18,697
""" This file has tests to deploy apps in a project created in a cluster. Test requirements: Env variables - Cattle_url, Admin Token, User Token, Cluster Name Test on at least 3 worker nodes App versions are given in 'cataloglib_appversion.json' file """ import json from .common import os from .common import pytest from .common import create_ns from .common import create_catalog_external_id from .common import validate_app_deletion from .common import get_user_client_and_cluster from .common import create_kubeconfig from .common import get_cluster_client_for_token from .common import create_project from .common import random_test_name from .common import get_defaut_question_answers from .common import validate_catalog_app from .common import get_project_client_for_token from .common import USER_TOKEN from .common import get_user_client cluster_info = {"cluster": None, "cluster_client": None, "project": None, "project_client": None, "user_client": None} catalog_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), "./resource/cataloglib_appversion.json") with open(catalog_filename, "r") as app_v: app_data = json.load(app_v) @pytest.mark.parametrize('app_name, app_version', app_data.items()) def test_catalog_app_deploy(app_name, app_version): """ Runs for app from 'cataloglib_appversion.json', creates relevant namespace and deploy them. Validates status of the app, version and answer. try block is to make sure apps are deleted even after they fail to validate. """ user_client = cluster_info["user_client"] project_client = cluster_info["project_client"] cluster_client = cluster_info["cluster_client"] cluster = cluster_info["cluster"] project = cluster_info["project"] ns = create_ns(cluster_client, cluster, project, app_name) app_ext_id = create_catalog_external_id('library', app_name, app_version) answer = get_defaut_question_answers(user_client, app_ext_id) try: app = project_client.create_app( name=random_test_name(), externalId=app_ext_id, targetNamespace=ns.name, projectId=ns.projectId, answers=answer) validate_catalog_app(project_client, app, app_ext_id, answer) except (AssertionError, RuntimeError): assert False, "App {} deployment/Validation failed.".format(app_name) finally: project_client.delete(app) validate_app_deletion(project_client, app.id) user_client.delete(ns) @pytest.fixture(scope='module', autouse="True") def create_project_client(request): """ Creates project in a cluster and collects details of user, project and cluster """ user_client, cluster = get_user_client_and_cluster() create_kubeconfig(cluster) cluster_client = get_cluster_client_for_token(cluster, USER_TOKEN) project = create_project(user_client, cluster, random_test_name("App-deployment")) project_client = get_project_client_for_token(project, USER_TOKEN) cluster_info["cluster"] = cluster cluster_info["cluster_client"] = cluster_client cluster_info["project"] = project cluster_info["project_client"] = project_client cluster_info["user_client"] = user_client def fin(): client = get_user_client() client.delete(cluster_info["project"]) request.addfinalizer(fin)
1,429
6,270
<gh_stars>1000+ [ { "type": "feature", "category": "CodePipeline", "description": "Add support for viewing details of each action execution belonging to past and latest pipeline executions that have occurred in customer's pipeline. The details include start/updated times, action execution results, input/output artifacts information, etc. Customers also have the option to add pipelineExecutionId in the input to filter the results down to a single pipeline execution." }, { "type": "feature", "category": "CognitoIdentity", "description": "This release adds tags and tag-based access control support to Amazon Cognito Identity Pools (Federated Identities). " }, { "type": "feature", "category": "MarketplaceMetering", "description": "This release increases AWS Marketplace Metering Service maximum usage quantity to 2147483647 and makes parameters usage quantity and dryrun optional." }, { "type": "feature", "category": "cors", "description": "add cors support for IAM and ResouceGroups services" } ]
359
2,753
<filename>src/shogun/distance/KernelDistance.cpp<gh_stars>1000+ /* * This software is distributed under BSD 3-clause license (see LICENSE file). * * Authors: <NAME>, <NAME> */ #include <shogun/lib/config.h> #include <shogun/lib/common.h> #include <shogun/io/SGIO.h> #include <shogun/distance/KernelDistance.h> #include <shogun/features/DenseFeatures.h> #include <utility> using namespace shogun; KernelDistance::KernelDistance() : Distance() { init(); } KernelDistance::KernelDistance(float64_t w, std::shared_ptr<Kernel> k) : Distance() { init(); kernel=std::move(k); width=w; ASSERT(kernel) } KernelDistance::KernelDistance( std::shared_ptr<Features >l, std::shared_ptr<Features >r, float64_t w , std::shared_ptr<Kernel> k) : Distance() { init(); kernel=std::move(k); width=w; ASSERT(kernel) init(std::move(l), std::move(r)); } KernelDistance::~KernelDistance() { // important to have the cleanup of Distance first, it calls get_name which // uses the distance cleanup(); } bool KernelDistance::init(std::shared_ptr<Features> l, std::shared_ptr<Features> r) { ASSERT(kernel) kernel->init(l,r); return Distance::init(l,r); } float64_t KernelDistance::compute(int32_t idx_a, int32_t idx_b) { float64_t result=kernel->kernel(idx_a, idx_b); return exp(-result/width); } void KernelDistance::init() { kernel = NULL; width = 0.0; SG_ADD(&width, "width", "Width of RBF Kernel", ParameterProperties::HYPER); SG_ADD(&kernel, "kernel", "Kernel."); }
572
716
<reponame>rnui2k/vivisect<gh_stars>100-1000 from vivisect.symboliks.common import * from vivisect.symboliks.effects import * class SymbolikTranslator: ''' The SymbolikTranslator is responsible for translating architecture specific sequences of Envi Opcode objects into a sequence of purely symbolik effects. ''' def __init__(self, vw): self.vw = vw self._eff_log = [] self._con_log = [] self._op_methods = {} for name in dir(self): if name.startswith("i_"): self._op_methods[name[2:]] = getattr(self, name) self._cur_va = None def effSetVariable(self, rname, rsym): ''' This may *only* be called with 'natural' register definitions or pure symbols... (any meta register processing is the responsiblity of the translator calling this interface!) ''' self._eff_log.append(SetVariable(self._cur_va, rname, rsym)) def effReadMemory(self, symaddr, symsize): self._eff_log.append(ReadMemory(self._cur_va, symaddr, symsize)) return Mem(symaddr, symsize) def effWriteMemory(self, symaddr, symsize, symobj): self._eff_log.append(WriteMemory(self._cur_va, symaddr, symsize, symobj)) def effFofX(self, funcsym, argsyms=None): self._eff_log.append(CallFunction(self._cur_va, funcsym, argsyms)) def effConstrain(self, addrsym, conssym): self._con_log.append(ConstrainPath(self._cur_va, addrsym, conssym)) def effDebug(self, msg): self._eff_log.append(DebugEffect(self._cur_va, msg)) def translateOpcode(self, op): self._cur_va = op.va meth = self._op_methods.get(op.mnem, None) if meth is None: # print('Symboliks: %s: %s Needs: %s' % (hex(op.va), self.__class__.__name__, repr(op))) self.effDebug("%s Needs %s" % (self.__class__.__name__, repr(op))) return DebugEffect(op.va, "%s Needs %s" % (self.__class__.__name__, repr(op))) # instruction translator methods may return branches / constraints ret = meth(op) if ret is not None: for symaddr, symcons in ret: self.effConstrain(symaddr, symcons) return ret def getEffects(self, copy=False): ''' Return the list of symboliks effects which have been logged by this translator. ''' if copy: return list(self._eff_log) return self._eff_log def getConstraints(self, copy=False): ''' Return the list of constraints which have been logged by this translator. ''' if copy: return list(self._con_log) return self._con_log def clearEffects(self): ''' Clear the translator's list of symbolik effects thus far. ''' self._eff_log = [] self._con_log = []
1,283
2,728
<gh_stars>1000+ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- try: from .query_body_py3 import QueryBody from .column_py3 import Column from .table_py3 import Table from .query_results_py3 import QueryResults from .error_detail_py3 import ErrorDetail from .error_info_py3 import ErrorInfo from .error_response_py3 import ErrorResponse, ErrorResponseException except (SyntaxError, ImportError): from .query_body import QueryBody from .column import Column from .table import Table from .query_results import QueryResults from .error_detail import ErrorDetail from .error_info import ErrorInfo from .error_response import ErrorResponse, ErrorResponseException __all__ = [ 'QueryBody', 'Column', 'Table', 'QueryResults', 'ErrorDetail', 'ErrorInfo', 'ErrorResponse', 'ErrorResponseException', ]
364
1,086
<gh_stars>1000+ /* Copyright (c) 2013-2020 <NAME> <<EMAIL>> */ #ifndef GCOMBOBOX_H #define GCOMBOBOX_H #include <QApplication> #include "ringqt.h" #include <QComboBox> extern "C" { #include "ring.h" } class GComboBox : public QComboBox { Q_OBJECT public: struct VM *pVM; struct List *pParaList; char cactivatedEvent[RINGQT_EVENT_SIZE]; char ccurrentIndexChangedEvent[RINGQT_EVENT_SIZE]; char ceditTextChangedEvent[RINGQT_EVENT_SIZE]; char chighlightedEvent[RINGQT_EVENT_SIZE]; GComboBox(QWidget *parent,VM *pVM ); ~GComboBox(); void geteventparameters(void) ; void setactivatedEvent(const char *cStr); void setcurrentIndexChangedEvent(const char *cStr); void seteditTextChangedEvent(const char *cStr); void sethighlightedEvent(const char *cStr); const char *getactivatedEvent(void); const char *getcurrentIndexChangedEvent(void); const char *geteditTextChangedEvent(void); const char *gethighlightedEvent(void); public slots: void activatedSlot(); void currentIndexChangedSlot(); void editTextChangedSlot(); void highlightedSlot(); }; #endif
434
1,350
<filename>sdk/security/azure-resourcemanager-security/src/main/java/com/azure/resourcemanager/security/implementation/RegulatoryComplianceControlsImpl.java<gh_stars>1000+ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. // Code generated by Microsoft (R) AutoRest Code Generator. package com.azure.resourcemanager.security.implementation; import com.azure.core.http.rest.PagedIterable; import com.azure.core.http.rest.Response; import com.azure.core.http.rest.SimpleResponse; import com.azure.core.util.Context; import com.azure.core.util.logging.ClientLogger; import com.azure.resourcemanager.security.fluent.RegulatoryComplianceControlsClient; import com.azure.resourcemanager.security.fluent.models.RegulatoryComplianceControlInner; import com.azure.resourcemanager.security.models.RegulatoryComplianceControl; import com.azure.resourcemanager.security.models.RegulatoryComplianceControls; import com.fasterxml.jackson.annotation.JsonIgnore; public final class RegulatoryComplianceControlsImpl implements RegulatoryComplianceControls { @JsonIgnore private final ClientLogger logger = new ClientLogger(RegulatoryComplianceControlsImpl.class); private final RegulatoryComplianceControlsClient innerClient; private final com.azure.resourcemanager.security.SecurityManager serviceManager; public RegulatoryComplianceControlsImpl( RegulatoryComplianceControlsClient innerClient, com.azure.resourcemanager.security.SecurityManager serviceManager) { this.innerClient = innerClient; this.serviceManager = serviceManager; } public PagedIterable<RegulatoryComplianceControl> list(String regulatoryComplianceStandardName) { PagedIterable<RegulatoryComplianceControlInner> inner = this.serviceClient().list(regulatoryComplianceStandardName); return Utils.mapPage(inner, inner1 -> new RegulatoryComplianceControlImpl(inner1, this.manager())); } public PagedIterable<RegulatoryComplianceControl> list( String regulatoryComplianceStandardName, String filter, Context context) { PagedIterable<RegulatoryComplianceControlInner> inner = this.serviceClient().list(regulatoryComplianceStandardName, filter, context); return Utils.mapPage(inner, inner1 -> new RegulatoryComplianceControlImpl(inner1, this.manager())); } public RegulatoryComplianceControl get( String regulatoryComplianceStandardName, String regulatoryComplianceControlName) { RegulatoryComplianceControlInner inner = this.serviceClient().get(regulatoryComplianceStandardName, regulatoryComplianceControlName); if (inner != null) { return new RegulatoryComplianceControlImpl(inner, this.manager()); } else { return null; } } public Response<RegulatoryComplianceControl> getWithResponse( String regulatoryComplianceStandardName, String regulatoryComplianceControlName, Context context) { Response<RegulatoryComplianceControlInner> inner = this .serviceClient() .getWithResponse(regulatoryComplianceStandardName, regulatoryComplianceControlName, context); if (inner != null) { return new SimpleResponse<>( inner.getRequest(), inner.getStatusCode(), inner.getHeaders(), new RegulatoryComplianceControlImpl(inner.getValue(), this.manager())); } else { return null; } } private RegulatoryComplianceControlsClient serviceClient() { return this.innerClient; } private com.azure.resourcemanager.security.SecurityManager manager() { return this.serviceManager; } }
1,256
1,909
package org.knowm.xchange.bitfinex.v2.dto.marketdata; import static org.assertj.core.api.Assertions.assertThat; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.type.CollectionType; import java.io.IOException; import java.io.InputStream; import java.util.List; import org.junit.Test; import org.knowm.xchange.bitfinex.service.BitfinexAdapters; public class BitfinexTickerJSONTest { @Test public void testUnmarshal() throws IOException { // Read in the JSON from the example resources InputStream is = BitfinexTickerJSONTest.class.getResourceAsStream( "/org/knowm/xchange/bitfinex/v2/dto/marketdata/example-ticker-data.json"); // Use Jackson to parse it ObjectMapper mapper = new ObjectMapper(); CollectionType constructCollectionType = mapper.getTypeFactory().constructCollectionType(List.class, ArrayNode.class); List<ArrayNode> tickers0 = mapper.readValue(is, constructCollectionType); BitfinexTicker[] tickers = BitfinexAdapters.adoptBitfinexTickers(tickers0); // Verify that the example data was unmarshalled correctly // funding currency: BitfinexTickerFundingCurrency bitfinexTicker = (BitfinexTickerFundingCurrency) tickers[0]; assertThat(bitfinexTicker.getSymbol()).isEqualTo("fLEO"); assertThat(bitfinexTicker.getFrr()).isEqualTo("1.0958904109589042e-08"); assertThat(bitfinexTicker.getBid()).isEqualTo("0"); assertThat(bitfinexTicker.getBidPeriod()).isEqualTo("0"); assertThat(bitfinexTicker.getBidSize()).isEqualTo("0"); assertThat(bitfinexTicker.getAsk()).isEqualByComparingTo("1e-08"); assertThat(bitfinexTicker.getAskPeriod()).isEqualTo("2"); assertThat(bitfinexTicker.getAskSize()).isEqualTo("2663861.8810786298"); assertThat(bitfinexTicker.getDailyChange()).isEqualTo("0"); assertThat(bitfinexTicker.getDailyChangePerc()).isEqualTo("0"); assertThat(bitfinexTicker.getLastPrice()).isEqualByComparingTo("1e-08"); assertThat(bitfinexTicker.getVolume()).isEqualTo("664.1085"); assertThat(bitfinexTicker.getHigh()).isEqualByComparingTo("1e-08"); assertThat(bitfinexTicker.getLow()).isEqualByComparingTo("1e-08"); assertThat(bitfinexTicker.getPlaceHolder0()).isNull(); assertThat(bitfinexTicker.getPlaceHolder1()).isNull(); assertThat(bitfinexTicker.getFrrAmountAvailable()).isEqualTo("2594257.74114297"); // traiding pair: BitfinexTickerTraidingPair bitfinexTicker2 = (BitfinexTickerTraidingPair) tickers[1]; assertThat(bitfinexTicker2.getSymbol()).isEqualTo("tBTCUSD"); assertThat(bitfinexTicker2.getBid()).isEqualTo("7381.6"); assertThat(bitfinexTicker2.getBidSize()).isEqualTo("38.644979070000005"); assertThat(bitfinexTicker2.getAsk()).isEqualTo("7381.7"); assertThat(bitfinexTicker2.getAskSize()).isEqualByComparingTo("32.145906579999995"); assertThat(bitfinexTicker2.getDailyChange()).isEqualTo("126.6"); assertThat(bitfinexTicker2.getDailyChangePerc()).isEqualTo("0.0175"); assertThat(bitfinexTicker2.getLastPrice()).isEqualByComparingTo("7381.2"); assertThat(bitfinexTicker2.getVolume()).isEqualTo("1982.88275223"); assertThat(bitfinexTicker2.getHigh()).isEqualByComparingTo("7390"); assertThat(bitfinexTicker2.getLow()).isEqualByComparingTo("7228.1"); } }
1,318
831
/* * Copyright (C) 2018 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.android.tools.idea.ui.resourcechooser.util; import sun.awt.image.IntegerComponentRaster; import java.awt.*; import java.awt.geom.AffineTransform; import java.awt.geom.Rectangle2D; import java.awt.image.ColorModel; import java.awt.image.Raster; import java.awt.image.WritableRaster; import java.util.Arrays; /** * A {@link Paint} that will paint a checkerboard pattern. The current implementation aligns the pattern to the window (device) * coordinates, so the checkerboard remains stationary, even when the panel and the image is scrolled. */ public class CheckerboardPaint implements Paint, PaintContext { private final int myCheckerSize; private final int myDoubleCheckerSize; private static final int LIGHT_COLOR = 0xFFFFFFFF; private static final int DARK_COLOR = 0xFFC0C0C0; public CheckerboardPaint(int size) { myCheckerSize = size; myDoubleCheckerSize = 2 * myCheckerSize; } // Cached raster and pixel values. They are re-allocated whenever a larger size is required. The raster's data is updated each time // a raster is requested in #getRaster(int, int, int, int). // A checkerboard can be broken down into rows of squares of alternating colors. There are two alternating rows: those that start with // a dark color and those that start with the light color. We cache the pixel values of a single raster scan line for both types of // rows, so they don't need to be computed every time. private WritableRaster cachedRaster; private int[] cachedEvenRow = new int[0]; private int[] cachedOddRow = new int[0]; @Override public PaintContext createContext( ColorModel cm, Rectangle deviceBounds, Rectangle2D userBounds, AffineTransform xform, RenderingHints hints) { return this; } @Override public void dispose() { cachedRaster = null; } @Override public ColorModel getColorModel() { return ColorModel.getRGBdefault(); } @Override public Raster getRaster(int x, int y, int w, int h) { WritableRaster raster = cachedRaster; if (raster == null || w > raster.getWidth() || h > raster.getHeight()) { cachedRaster = raster = getColorModel().createCompatibleWritableRaster(w, h); } w = raster.getWidth(); h = raster.getHeight(); // Compute the x & y pixel offsets into a 2x2 checker tile. The checkerboard is aligned to (0, 0). int xOffset = x % myDoubleCheckerSize, yOffset = y % myDoubleCheckerSize; int[] evenRow = cachedEvenRow, oddRow = cachedOddRow; if (evenRow.length < xOffset + w || oddRow.length < xOffset + w) { // The scan line caches are sized in multiples of 2 checker squares. evenRow = new int[myDoubleCheckerSize * ((xOffset + w + myDoubleCheckerSize - 1) / myDoubleCheckerSize)]; oddRow = new int[evenRow.length]; // Fill in the cached scan lines, two squares at a time. for (int i = 0; i < evenRow.length; i += myDoubleCheckerSize) { // The even row is light, dark, light, dark, etc. Arrays.fill(evenRow, i, i + myCheckerSize, LIGHT_COLOR); Arrays.fill(evenRow, i + myCheckerSize, i + myDoubleCheckerSize, DARK_COLOR); // The odd row is dark, light, dark, light, etc. Arrays.fill(oddRow, i, i + myCheckerSize, DARK_COLOR); Arrays.fill(oddRow, i + myCheckerSize, i + myDoubleCheckerSize, LIGHT_COLOR); } } // The pixels array is a w * h row major storage backend of the raster data. int[] pixels = ((IntegerComponentRaster)raster).getDataStorage(); int[][] rows = new int[][] { evenRow, oddRow }; // The current checker row being copied. Initialized to align to the requested (x, y) coordinates. int curRowPointer = (yOffset < myCheckerSize) ? 0 : 1; int[] curRow = rows[curRowPointer]; // Copy the cached scan lines into the raster. for (int i = 0, done = 0, tileY = yOffset % myCheckerSize; i < h; i++, tileY++, done += w) { if (tileY >= myCheckerSize) { // We've completed a row of checker squares, switch to the other row type. //noinspection AssignmentToForLoopParameter tileY = 0; curRowPointer = (curRowPointer + 1) & 1; curRow = rows[curRowPointer]; } // The scan lines are aligned to 2x2 checker tiles, so we copy starting at xOffset. System.arraycopy(curRow, xOffset, pixels, done, w); } return raster; } @Override public int getTransparency() { return Transparency.OPAQUE; } }
1,692
2,577
<filename>engine-dmn/engine/src/test/java/org/camunda/bpm/dmn/engine/type/CustomDataTypeTransformerRegistryTest.java /* * Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH * under one or more contributor license agreements. See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. Camunda licenses this file to you under the Apache License, * Version 2.0; you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.camunda.bpm.dmn.engine.type; import org.camunda.bpm.dmn.engine.DmnEngineConfiguration; import org.camunda.bpm.dmn.engine.impl.DefaultDmnEngineConfiguration; import org.camunda.bpm.dmn.engine.impl.spi.type.DmnDataTypeTransformer; import org.camunda.bpm.dmn.engine.impl.spi.type.DmnDataTypeTransformerRegistry; import org.camunda.bpm.dmn.engine.impl.type.DefaultDataTypeTransformerRegistry; import org.camunda.bpm.dmn.engine.test.DecisionResource; import org.camunda.bpm.dmn.engine.test.DmnEngineTest; import org.camunda.bpm.engine.variable.Variables; import org.camunda.bpm.engine.variable.value.TypedValue; import org.junit.Test; /** * @author <NAME> */ public class CustomDataTypeTransformerRegistryTest extends DmnEngineTest { protected static final String DMN_INPUT_FILE = "org/camunda/bpm/dmn/engine/type/CustomInputDefinition.dmn"; protected static final String DMN_OUTPUT_FILE = "org/camunda/bpm/dmn/engine/type/CustomOutputDefinition.dmn"; @Override public DmnEngineConfiguration getDmnEngineConfiguration() { DefaultDmnEngineConfiguration configuration = new DefaultDmnEngineConfiguration(); configuration.getTransformer().setDataTypeTransformerRegistry(new CustomDataTypeTransformerRegistry()); configuration.enableFeelLegacyBehavior(true); return configuration; } @Test @DecisionResource(resource = DMN_OUTPUT_FILE) public void customOutputTransformer() { variables.put("output", 21); assertThatDecisionTableResult() .hasSingleResult() .hasSingleEntry(CustomDataTypeTransformer.CUSTOM_OBJECT.getValue()); } @Test @DecisionResource(resource = DMN_INPUT_FILE) public void customInputTransformer() { variables.put("input", 21); assertThatDecisionTableResult() .hasSingleResult() .hasSingleEntry("isCustom"); } protected static class CustomDataTypeTransformerRegistry implements DmnDataTypeTransformerRegistry { protected final DmnDataTypeTransformerRegistry defaultRegistry = new DefaultDataTypeTransformerRegistry(); @Override public DmnDataTypeTransformer getTransformer(String typeName) { if (typeName.equals("custom")) { return new CustomDataTypeTransformer(); } else { return defaultRegistry.getTransformer(typeName); } } @Override public void addTransformer(String typeName, DmnDataTypeTransformer transformer) { defaultRegistry.addTransformer(typeName, transformer); } } protected static class CustomDataTypeTransformer implements DmnDataTypeTransformer { protected static final TypedValue CUSTOM_OBJECT = Variables.integerValue(42); @Override public TypedValue transform(Object value) throws IllegalArgumentException { return CUSTOM_OBJECT; } } }
1,140
4,538
<gh_stars>1000+ /* * Copyright (C) 2015-2020 Alibaba Group Holding Limited */ #ifndef AOS_PM_H #define AOS_PM_H typedef enum { AOS_CHARGER_STAT_SHUTDOWN = 0, AOS_CHARGER_STAT_CHECK, AOS_CHARGER_STAT_TRICKLE, AOS_CHARGER_STAT_PRE, AOS_CHARGER_STAT_CC, AOS_CHARGER_STAT_CV, AOS_CHARGER_STAT_TERMINAL, AOS_CHARGER_STAT_FAULT } aos_charger_state_t; /** * System enter sleep * * @return 0 : on success, negative number : if an error occurred with any step */ int aos_system_sleep(void); /** * Enable system autosleep interface * * @param[in] mode 1 - autosleep enable, 0 - autosleep disable * * @return 0 : on success, negative number : if an error occurred with any step */ int aos_system_autosleep(int mode); /** * Accquire wakelock * * @param[in] wakelock wakelock instance * * @return 0 : on success, negative number : if an error occurred with any step */ int aos_wakelock_lock(void *wakelock); /** * Release wakelock * * @param[in] wakelock wakelock instance * * @return 0 : on success, negative number : if an error occurred with any step */ int aos_wakelock_unlock(void *wakelock); /** * Accquire wakelock within given time * * @param[in] wakelock wakelock instance * @param[in] msec wakelock keep time in ms * * @return 0 : on success, negative number : if an error occurred with any step */ int aos_wakelock_timedlock(void *wakelock, unsigned int msec); /** * Create wakelock * * @param[in] name wakelock name * * @return 0 : on success, negative number : if an error occurred with any step */ void *aos_wakelock_create(const char *name); /** * Destroy wakelock * * @param[in] wakelock wakelock instance * * @return 0 : on success, negative number : if an error occurred with any step */ void aos_wakelock_release(void *wakelock); /** * Register power key state notifier * * @param[in] cb power key notifier callback (argment: 1 - key down, 0 - key up) * * @return 0 : on success, negative number : if an error occurred with any step */ int aos_pwrkey_notify_register(void (*cb)(int)); /** * Device power down * * @return 0 : on success, negative number : if an error occurred with any step */ int aos_power_down(void); /** * Device power reset * * @return 0 : on success, negative number : if an error occurred with any step */ int aos_power_reset(void); /** * Get battery connection state * * @param[in] state (1 - connected, 0 - disconnected) * * @return 0 : on success, negative number : if an error occurred with any step */ int aos_battery_connect_state_get(int *state); /** * Get battery connection state * * @param[in] store voltage in mV * * @return 0 : on success, negative number : if an error occurred with any step */ int aos_battery_voltage_get(int *voltage); /** * Get battery level * * @param[in] store battery level (0 - 100) * * @return 0 : on success, negative number : if an error occurred with any step */ int aos_battery_level_get(int *level); /** * Get battery temperature * * @param[in] store temperature * * @return 0 : on success, negative number : if an error occurred with any step */ int aos_battery_temperature_get(int *temperature); /** * Get charger connection state * * @param[in] store connection state (1 - connected, 0 - disconnected) * * @return 0 : on success, negative number : if an error occurred with any step */ int aos_charger_connect_state_get(int *state); /** * Get charger state * * @param[in] store charger state * * @return 0 : on success, negative number : if an error occurred with any step */ int aos_charger_state_get(aos_charger_state_t *state); /** * Get charger current * * @param[in] store charger current in mA * * @return 0 : on success, negative number : if an error occurred with any step */ int aos_charger_current_get(int *current); /** * Set charger switch (1 - ON, 0 - OFF) * * @param[in] charger switch onoff * * @return 0 : on success, negative number : if an error occurred with any step */ int aos_charger_switch_set(int enable); /** * Register charger state notify * * @param[in] charger state notify callback (state: 0 - disconnect, 1 - connect) * * @return 0 : on success, negative number : if an error occurred with any step */ int aos_charger_state_notify_register(void (*cb)(int state)); #endif /* AOS_PM_H */
1,438
6,224
/* SPDX-License-Identifier: MIT */ /****************************************************************************** * arch-arm.h * * Guest OS interface to ARM Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright 2011 (C) Citrix Systems */ #ifndef __XEN_PUBLIC_ARCH_ARM_H__ #define __XEN_PUBLIC_ARCH_ARM_H__ #include <kernel.h> /* * `incontents 50 arm_abi Hypercall Calling Convention * * A hypercall is issued using the ARM HVC instruction. * * A hypercall can take up to 5 arguments. These are passed in * registers, the first argument in x0/r0 (for arm64/arm32 guests * respectively irrespective of whether the underlying hypervisor is * 32- or 64-bit), the second argument in x1/r1, the third in x2/r2, * the forth in x3/r3 and the fifth in x4/r4. * * The hypercall number is passed in r12 (arm) or x16 (arm64). In both * cases the relevant ARM procedure calling convention specifies this * is an inter-procedure-call scratch register (e.g. for use in linker * stubs). This use does not conflict with use during a hypercall. * * The HVC ISS must contain a Xen specific TAG: XEN_HYPERCALL_TAG. * * The return value is in x0/r0. * * The hypercall will clobber x16/r12 and the argument registers used * by that hypercall (except r0 which is the return value) i.e. in * addition to x16/r12 a 2 argument hypercall will clobber x1/r1 and a * 4 argument hypercall will clobber x1/r1, x2/r2 and x3/r3. * * Parameter structs passed to hypercalls are laid out according to * the Procedure Call Standard for the ARM Architecture (AAPCS, AKA * EABI) and Procedure Call Standard for the ARM 64-bit Architecture * (AAPCS64). Where there is a conflict the 64-bit standard should be * used regardless of guest type. Structures which are passed as * hypercall arguments are always little endian. * * All memory which is shared with other entities in the system * (including the hypervisor and other guests) must reside in memory * which is mapped as Normal Inner Write-Back Outer Write-Back Inner-Shareable. * This applies to: * - hypercall arguments passed via a pointer to guest memory. * - memory shared via the grant table mechanism (including PV I/O * rings etc). * - memory shared with the hypervisor (struct shared_info, struct * vcpu_info, the grant table, etc). * * Any cache allocation hints are acceptable. */ /* * `incontents 55 arm_hcall Supported Hypercalls * * Xen on ARM makes extensive use of hardware facilities and therefore * only a subset of the potential hypercalls are required. * * Since ARM uses second stage paging any machine/physical addresses * passed to hypercalls are Guest Physical Addresses (Intermediate * Physical Addresses) unless otherwise noted. * * The following hypercalls (and sub operations) are supported on the * ARM platform. Other hypercalls should be considered * unavailable/unsupported. * * HYPERVISOR_memory_op * All generic sub-operations * * HYPERVISOR_domctl * All generic sub-operations, with the exception of: * * XEN_DOMCTL_irq_permission (not yet implemented) * * HYPERVISOR_sched_op * All generic sub-operations, with the exception of: * * SCHEDOP_block -- prefer wfi hardware instruction * * HYPERVISOR_console_io * All generic sub-operations * * HYPERVISOR_xen_version * All generic sub-operations * * HYPERVISOR_event_channel_op * All generic sub-operations * * HYPERVISOR_physdev_op * No sub-operations are currenty supported * * HYPERVISOR_sysctl * All generic sub-operations, with the exception of: * * XEN_SYSCTL_page_offline_op * * XEN_SYSCTL_get_pmstat * * XEN_SYSCTL_pm_op * * HYPERVISOR_hvm_op * Exactly these sub-operations are supported: * * HVMOP_set_param * * HVMOP_get_param * * HYPERVISOR_grant_table_op * All generic sub-operations * * HYPERVISOR_vcpu_op * Exactly these sub-operations are supported: * * VCPUOP_register_vcpu_info * * VCPUOP_register_runstate_memory_area * * * Other notes on the ARM ABI: * * - struct start_info is not exported to ARM guests. * * - struct shared_info is mapped by ARM guests using the * HYPERVISOR_memory_op sub-op XENMEM_add_to_physmap, passing * XENMAPSPACE_shared_info as space parameter. * * - All the per-cpu struct vcpu_info are mapped by ARM guests using the * HYPERVISOR_vcpu_op sub-op VCPUOP_register_vcpu_info, including cpu0 * struct vcpu_info. * * - The grant table is mapped using the HYPERVISOR_memory_op sub-op * XENMEM_add_to_physmap, passing XENMAPSPACE_grant_table as space * parameter. The memory range specified under the Xen compatible * hypervisor node on device tree can be used as target gpfn for the * mapping. * * - Xenstore is initialized by using the two hvm_params * HVM_PARAM_STORE_PFN and HVM_PARAM_STORE_EVTCHN. They can be read * with the HYPERVISOR_hvm_op sub-op HVMOP_get_param. * * - The paravirtualized console is initialized by using the two * hvm_params HVM_PARAM_CONSOLE_PFN and HVM_PARAM_CONSOLE_EVTCHN. They * can be read with the HYPERVISOR_hvm_op sub-op HVMOP_get_param. * * - Event channel notifications are delivered using the percpu GIC * interrupt specified under the Xen compatible hypervisor node on * device tree. * * - The device tree Xen compatible node is fully described under Linux * at Documentation/devicetree/bindings/arm/xen.txt. */ #define XEN_HYPERCALL_TAG 0XEA1 #define int64_aligned_t int64_t __aligned(8) #define uint64_aligned_t uint64_t __aligned(8) #ifndef __ASSEMBLY__ #define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef union { type *p; unsigned long q; } \ __guest_handle_ ## name; \ typedef union { type *p; uint64_aligned_t q; } \ __guest_handle_64_ ## name /* * XEN_GUEST_HANDLE represents a guest pointer, when passed as a field * in a struct in memory. On ARM is always 8 bytes sizes and 8 bytes * aligned. * XEN_GUEST_HANDLE_PARAM represents a guest pointer, when passed as an * hypercall argument. It is 4 bytes on aarch32 and 8 bytes on aarch64. */ #define __DEFINE_XEN_GUEST_HANDLE(name, type) \ ___DEFINE_XEN_GUEST_HANDLE(name, type); \ ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type) #define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name) #define __XEN_GUEST_HANDLE(name) __guest_handle_64_ ## name #define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name) #define XEN_GUEST_HANDLE_PARAM(name) __guest_handle_ ## name #define set_xen_guest_handle_raw(hnd, val) \ do { \ __typeof__(&(hnd)) _sxghr_tmp = &(hnd); \ _sxghr_tmp->q = 0; \ _sxghr_tmp->p = val; \ } while (0) #define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val) typedef uint64_t xen_pfn_t; #define PRI_xen_pfn PRIx64 #define PRIu_xen_pfn PRIu64 typedef uint64_t xen_ulong_t; #define PRI_xen_ulong PRIx64 /* * Maximum number of virtual CPUs in legacy multi-processor guests. * Only one. All other VCPUS must use VCPUOP_register_vcpu_info. */ #define XEN_LEGACY_MAX_VCPUS 1 struct arch_vcpu_info { }; typedef struct arch_vcpu_info arch_vcpu_info_t; struct arch_shared_info { }; typedef struct arch_shared_info arch_shared_info_t; typedef uint64_t xen_callback_t; #endif /* __ASSEMBLY__ */ #endif /* __XEN_PUBLIC_ARCH_ARM_H__ */
2,826
4,081
/* * The Alluxio Open Foundation licenses this work under the Apache License, version 2.0 * (the "License"). You may not use this work except in compliance with the License, which is * available at www.apache.org/licenses/LICENSE-2.0 * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied, as more fully set forth in the License. * * See the NOTICE file distributed with this work for information regarding copyright ownership. */ package alluxio.job.plan.load; import com.fasterxml.jackson.databind.ObjectMapper; import org.junit.Assert; import org.junit.Test; import java.util.Collections; /** * Tests {@link LoadConfig}. */ public final class LoadConfigTest { @Test public void jsonTest() throws Exception { LoadConfig config = new LoadConfig("/path/to/load", 3, Collections.EMPTY_SET, Collections.EMPTY_SET, Collections.EMPTY_SET, Collections.EMPTY_SET); ObjectMapper mapper = new ObjectMapper(); LoadConfig other = mapper.readValue(mapper.writeValueAsString(config), LoadConfig.class); checkEquality(config, other); } @Test public void nullTest() { try { new LoadConfig(null, null, Collections.EMPTY_SET, Collections.EMPTY_SET, Collections.EMPTY_SET, Collections.EMPTY_SET); Assert.fail("Cannot create config with null path"); } catch (NullPointerException exception) { Assert.assertEquals("The file path cannot be null", exception.getMessage()); } } public void checkEquality(LoadConfig a, LoadConfig b) { Assert.assertEquals(a.getFilePath(), b.getFilePath()); Assert.assertEquals(a.getReplication(), b.getReplication()); Assert.assertEquals(a, b); } }
561
4,829
// // Copyright <NAME> (<EMAIL>), <NAME> (<EMAIL>) 2014-2021 // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #pragma once #include "tl_writer_td.h" #include <cstdint> #include <string> #include <vector> namespace td { class TD_TL_writer_h : public TD_TL_writer { protected: const std::vector<std::string> ext_include; static std::string forward_declaration(std::string type); bool need_arg_mask(const tl::arg &a, bool can_be_stored) const; public: TD_TL_writer_h(const std::string &tl_name, const std::string &string_type, const std::string &bytes_type, const std::vector<std::string> &ext_include) : TD_TL_writer(tl_name, string_type, bytes_type), ext_include(ext_include) { } std::string gen_output_begin() const override; std::string gen_output_end() const override; std::string gen_forward_class_declaration(const std::string &class_name, bool is_proxy) const override; std::string gen_class_begin(const std::string &class_name, const std::string &base_class_name, bool is_proxy) const override; std::string gen_class_end() const override; std::string gen_class_alias(const std::string &class_name, const std::string &alias_name) const override; std::string gen_field_definition(const std::string &class_name, const std::string &type_name, const std::string &field_name) const override; std::string gen_flags_definitions(const tl::tl_combinator *t, bool can_be_stored) const override; std::string gen_vars(const tl::tl_combinator *t, const tl::tl_tree_type *result_type, std::vector<tl::var_description> &vars) const override; std::string gen_function_vars(const tl::tl_combinator *t, std::vector<tl::var_description> &vars) const override; std::string gen_uni(const tl::tl_tree_type *result_type, std::vector<tl::var_description> &vars, bool check_negative) const override; std::string gen_constructor_id_store(std::int32_t id, int storer_type) const override; std::string gen_field_fetch(int field_num, const tl::arg &a, std::vector<tl::var_description> &vars, bool flat, int parser_type) const override; std::string gen_field_store(const tl::arg &a, std::vector<tl::var_description> &vars, bool flat, int storer_type) const override; std::string gen_type_fetch(const std::string &field_name, const tl::tl_tree_type *tree_type, const std::vector<tl::var_description> &vars, int parser_type) const override; std::string gen_type_store(const std::string &field_name, const tl::tl_tree_type *tree_type, const std::vector<tl::var_description> &vars, int storer_type) const override; std::string gen_var_type_fetch(const tl::arg &a) const override; std::string gen_get_id(const std::string &class_name, std::int32_t id, bool is_proxy) const override; std::string gen_function_result_type(const tl::tl_tree *result) const override; std::string gen_fetch_function_begin(const std::string &parser_name, const std::string &class_name, const std::string &parent_class_name, int arity, int field_count, std::vector<tl::var_description> &vars, int parser_type) const override; std::string gen_fetch_function_end(bool has_parent, int field_count, const std::vector<tl::var_description> &vars, int parser_type) const override; std::string gen_fetch_function_result_begin(const std::string &parser_name, const std::string &class_name, const tl::tl_tree *result) const override; std::string gen_fetch_function_result_end() const override; std::string gen_fetch_function_result_any_begin(const std::string &parser_name, const std::string &class_name, bool is_proxy) const override; std::string gen_fetch_function_result_any_end(bool is_proxy) const override; std::string gen_store_function_begin(const std::string &storer_name, const std::string &class_name, int arity, std::vector<tl::var_description> &vars, int storer_type) const override; std::string gen_store_function_end(const std::vector<tl::var_description> &vars, int storer_type) const override; std::string gen_fetch_switch_begin() const override; std::string gen_fetch_switch_case(const tl::tl_combinator *t, int arity) const override; std::string gen_fetch_switch_end() const override; std::string gen_constructor_begin(int field_count, const std::string &class_name, bool is_default) const override; std::string gen_constructor_field_init(int field_num, const std::string &class_name, const tl::arg &a, bool is_default) const override; std::string gen_constructor_end(const tl::tl_combinator *t, int field_count, bool is_default) const override; }; } // namespace td
2,062
14,668
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // MediaFileSystemRegistry registers pictures directories and media devices as // File API filesystems and keeps track of the path to filesystem ID mappings. #ifndef CHROME_BROWSER_MEDIA_GALLERIES_MEDIA_FILE_SYSTEM_REGISTRY_H_ #define CHROME_BROWSER_MEDIA_GALLERIES_MEDIA_FILE_SYSTEM_REGISTRY_H_ #include <map> #include <memory> #include <string> #include <utility> #include <vector> #include "base/files/file.h" #include "base/files/file_path.h" #include "base/memory/ref_counted.h" #include "chrome/browser/media_galleries/media_galleries_preferences.h" #include "components/keyed_service/core/keyed_service_shutdown_notifier.h" #include "components/storage_monitor/removable_storage_observer.h" class ExtensionGalleriesHost; class GalleryWatchManager; class MediaFileSystemContext; class MediaGalleriesPreferences; class Profile; namespace content { class WebContents; } namespace extensions { class Extension; } // Contains information about a particular filesystem being provided to a // client, including metadata like the name and ID, and API handles like the // fsid (filesystem ID) used to hook up the API objects. struct MediaFileSystemInfo { MediaFileSystemInfo(const std::u16string& fs_name, const base::FilePath& fs_path, const std::string& filesystem_id, MediaGalleryPrefId pref_id, const std::string& transient_device_id, bool removable, bool media_device); MediaFileSystemInfo(); MediaFileSystemInfo(const MediaFileSystemInfo& other); ~MediaFileSystemInfo(); std::u16string name; base::FilePath path; std::string fsid; MediaGalleryPrefId pref_id; std::string transient_device_id; bool removable; bool media_device; }; typedef base::OnceCallback<void(const std::vector<MediaFileSystemInfo>&)> MediaFileSystemsCallback; // Tracks usage of filesystems by extensions. // This object lives on the UI thread. class MediaFileSystemRegistry : public storage_monitor::RemovableStorageObserver, public MediaGalleriesPreferences::GalleryChangeObserver { public: MediaFileSystemRegistry(); MediaFileSystemRegistry(const MediaFileSystemRegistry&) = delete; MediaFileSystemRegistry& operator=(const MediaFileSystemRegistry&) = delete; ~MediaFileSystemRegistry() override; // Passes to |callback| the list of media filesystem IDs and paths for a // given WebContents. void GetMediaFileSystemsForExtension(content::WebContents* contents, const extensions::Extension* extension, MediaFileSystemsCallback callback); // Attempt to register the file system for |pref_id|. If |extension| does not // have permission to |pref_id|, sends |callback| FILE_ERROR_NOT_FOUND. void RegisterMediaFileSystemForExtension( content::WebContents* contents, const extensions::Extension* extension, MediaGalleryPrefId pref_id, base::OnceCallback<void(base::File::Error result)> callback); // Returns the media galleries preferences for the specified |profile|. // Caller is responsible for ensuring that the preferences are initialized // before use. MediaGalleriesPreferences* GetPreferences(Profile* profile); GalleryWatchManager* gallery_watch_manager(); // RemovableStorageObserver implementation. void OnRemovableStorageDetached( const storage_monitor::StorageInfo& info) override; private: class MediaFileSystemContextImpl; friend class MediaFileSystemContextImpl; friend class MediaFileSystemRegistryTest; friend class TestMediaFileSystemContext; // Map an extension to the ExtensionGalleriesHost. typedef std::map<std::string /*extension_id*/, scoped_refptr<ExtensionGalleriesHost>> ExtensionHostMap; // Map a profile and extension to the ExtensionGalleriesHost. typedef std::map<Profile*, ExtensionHostMap> ExtensionGalleriesHostMap; // Map a profile to a shutdown notification subscription. typedef std::map<Profile*, base::CallbackListSubscription> ProfileSubscriptionMap; void OnPermissionRemoved(MediaGalleriesPreferences* pref, const std::string& extension_id, MediaGalleryPrefId pref_id) override; void OnGalleryRemoved(MediaGalleriesPreferences* pref, MediaGalleryPrefId pref_id) override; // Look up or create the extension gallery host. ExtensionGalleriesHost* GetExtensionGalleryHost( Profile* profile, MediaGalleriesPreferences* preferences, const std::string& extension_id); void OnExtensionGalleriesHostEmpty(Profile* profile, const std::string& extension_id); void OnProfileShutdown(Profile* profile); // This map owns all the ExtensionGalleriesHost objects created. ExtensionGalleriesHostMap extension_hosts_map_; // The above map uses raw Profile pointers as keys. This map removes those // entries when the Profile is destroyed. ProfileSubscriptionMap profile_subscription_map_; std::unique_ptr<MediaFileSystemContext> file_system_context_; std::unique_ptr<GalleryWatchManager> gallery_watch_manager_; }; #endif // CHROME_BROWSER_MEDIA_GALLERIES_MEDIA_FILE_SYSTEM_REGISTRY_H_
1,816
3,066
/* * Licensed to Crate.io GmbH ("Crate") under one or more contributor * license agreements. See the NOTICE file distributed with this work for * additional information regarding copyright ownership. Crate licenses * this file to you under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. You may * obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * However, if you have executed another commercial license agreement * with Crate these terms will supersede the license and you may use the * software solely pursuant to the terms of the relevant commercial agreement. */ package io.crate.expression.scalar.arithmetic; import io.crate.expression.scalar.ScalarTestCase; import org.junit.Test; public class PowerFunctionTest extends ScalarTestCase { @Test public void testPowerWithIntegers() { assertEvaluate("power(2,4)", 16.0); } @Test public void testPowerWithDecimalTypes() { assertEvaluate("power(2.0,4.0)", 16.0); } @Test public void testNegativeExponent() { assertEvaluate("power(2,-3)", 0.125); } @Test public void testNegativeDecimalTypeExponent() { assertEvaluate("power(2,-3.0)", 0.125); } @Test public void testNegativeBaseWithPositiveExponent() { assertEvaluate("power(-2,3)", -8.0); } @Test public void testNegativeBaseAndExponent() { assertEvaluate("power(-2,-3)", -0.125); } @Test public void testNegativeBaseAndExponentDecimalType() { assertEvaluate("power(-2.0,-3.0)", -0.125); } @Test public void testInvalidNumberOfArguments() { expectedException.expectMessage("Unknown function: power(2)," + " no overload found for matching argument types: (integer)."); assertEvaluate("power(2)", null); } }
774
1,338
/* * Copyright 2005-2012 Haiku, Inc. All Rights Reserved. * Distributed under the terms of the MIT License. */ #ifndef _WCTYPE_H_ #define _WCTYPE_H_ #include <wchar.h> typedef int wctrans_t; #ifdef __cplusplus extern "C" { #endif extern int iswalnum(wint_t wc); extern int iswalpha(wint_t wc); extern int iswcntrl(wint_t wc); extern int iswctype(wint_t wc, wctype_t desc); extern int iswdigit(wint_t wc); extern int iswgraph(wint_t wc); extern int iswlower(wint_t wc); extern int iswprint(wint_t wc); extern int iswpunct(wint_t wc); extern int iswspace(wint_t wc); extern int iswupper(wint_t wc); extern int iswxdigit(wint_t wc); extern int iswblank(wint_t wc); extern wint_t towctrans(wint_t wc, wctrans_t transition); extern wint_t towlower(wint_t wc); extern wint_t towupper(wint_t wc); extern wctrans_t wctrans(const char *charClass); extern wctype_t wctype(const char *property); #ifdef __cplusplus } #endif #endif /* _WCTYPE_H_ */
461
575
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CC_LAYERS_LAYER_LIST_ITERATOR_H_ #define CC_LAYERS_LAYER_LIST_ITERATOR_H_ #include <stdlib.h> #include <vector> #include "cc/cc_export.h" namespace cc { class Layer; // This visits a tree of layers in drawing order. class CC_EXPORT LayerListIterator { public: explicit LayerListIterator(Layer* root_layer); LayerListIterator(const LayerListIterator& other); virtual ~LayerListIterator(); bool operator==(const LayerListIterator& other) const { return current_layer_ == other.current_layer_; } bool operator!=(const LayerListIterator& other) const { return !(*this == other); } // We will only support prefix increment. virtual LayerListIterator& operator++(); Layer* operator->() const { return current_layer_; } Layer* operator*() const { return current_layer_; } protected: // The implementation of this iterator is currently tied tightly to the layer // tree, but it should be straightforward to reimplement in terms of a list // when it's ready. Layer* current_layer_; std::vector<size_t> list_indices_; }; class CC_EXPORT LayerListReverseIterator : public LayerListIterator { public: explicit LayerListReverseIterator(Layer* root_layer); ~LayerListReverseIterator() override; // We will only support prefix increment. LayerListIterator& operator++() override; private: void DescendToRightmostInSubtree(); }; } // namespace cc #endif // CC_LAYERS_LAYER_LIST_ITERATOR_H_
492
372
<filename>lwsecurity/servlets/src/com/likewise/auth/filter/spnego/LikewiseHttpServletRequestWrapper.java /* * Copyright (c) BeyondTrust Software. All rights reserved. * * Module Name: * * LikewiseHttpServletRequestWrapper.java * * Abstract: * * Likewise Authentication * * HTTP Servlet Request Wrapper * * Authors: <NAME> (<EMAIL>) * */ package com.likewise.auth.filter.spnego; import java.security.Principal; import java.util.List; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletRequestWrapper; /** * Wraps the original HTTP servlet request in such a way that the authenticated * remote user principal and associated roles can be provisioned. * <p> * This information is useful to subsequent chained servlet filter requests to * authorize the current authenticated remote user principal. * </p> */ public final class LikewiseHttpServletRequestWrapper extends HttpServletRequestWrapper { private Principal _remoteUser; private List<String> _roles; /** * Builds a Likewise HTTP servlet request wrapper using the original request */ public LikewiseHttpServletRequestWrapper( HttpServletRequest request ) { super(request); } /** * Builds a Likewise HTTP servlet request wrapper using the original request * the remote authenticated user principal and the list of roles the remote * principal is a member of. */ public LikewiseHttpServletRequestWrapper( HttpServletRequest request, Principal remoteUser, List<String> roles ) { super(request); _remoteUser = remoteUser; _roles = roles; } /** * Retrieves the user principal object associated with the authenticated * remote user. * * @return Remote user principal */ @Override public Principal getUserPrincipal() { if (_remoteUser != null) { return _remoteUser; } else { return ((HttpServletRequest)getRequest()).getUserPrincipal(); } } /** * Retrieves the name of the remote user who has been authenticated successfully. * * @return SAM Account name of the authenticated user principal. */ @Override public String getRemoteUser() { if (_remoteUser != null) { return _remoteUser.getName(); } else { return ((HttpServletRequest)getRequest()).getRemoteUser(); } } /** * Checks if the remote user is a member of the specified role. * * @param role Role the remote user might be a member of. * @return true if the remote user is a member of the specified role. */ @Override public boolean isUserInRole( String role ) { if (_roles != null) { return _roles.contains(role); } else { return ((HttpServletRequest)getRequest()).isUserInRole(role); } } }
1,077
1,585
/* * Copyright (C) by Argonne National Laboratory. * See COPYRIGHT in top-level directory. */ #include "mpl.h" #include <dlfcn.h> #include <assert.h> #define CUDA_ERR_CHECK(ret) if (unlikely((ret) != cudaSuccess)) goto fn_fail #define CU_ERR_CHECK(ret) if (unlikely((ret) != CUDA_SUCCESS)) goto fn_fail typedef struct gpu_free_hook { void (*free_hook) (void *dptr); struct gpu_free_hook *next; } gpu_free_hook_s; static gpu_free_hook_s *free_hook_chain = NULL; static CUresult CUDAAPI(*sys_cuMemFree) (CUdeviceptr dptr); static cudaError_t CUDARTAPI(*sys_cudaFree) (void *dptr); static int gpu_mem_hook_init(); int MPL_gpu_query_pointer_attr(const void *ptr, MPL_pointer_attr_t * attr) { cudaError_t ret; struct cudaPointerAttributes ptr_attr; ret = cudaPointerGetAttributes(&ptr_attr, ptr); if (ret == cudaSuccess) { switch (ptr_attr.type) { case cudaMemoryTypeUnregistered: attr->type = MPL_GPU_POINTER_UNREGISTERED_HOST; attr->device = ptr_attr.device; break; case cudaMemoryTypeHost: attr->type = MPL_GPU_POINTER_REGISTERED_HOST; attr->device = ptr_attr.device; break; case cudaMemoryTypeDevice: attr->type = MPL_GPU_POINTER_DEV; attr->device = ptr_attr.device; break; case cudaMemoryTypeManaged: attr->type = MPL_GPU_POINTER_MANAGED; attr->device = ptr_attr.device; break; } } else if (ret == cudaErrorInvalidValue) { attr->type = MPL_GPU_POINTER_UNREGISTERED_HOST; attr->device = -1; } else { goto fn_fail; } fn_exit: return MPL_SUCCESS; fn_fail: return MPL_ERR_GPU_INTERNAL; } int MPL_gpu_ipc_handle_create(const void *ptr, MPL_gpu_ipc_mem_handle_t * ipc_handle) { cudaError_t ret; ret = cudaIpcGetMemHandle(ipc_handle, (void *) ptr); CUDA_ERR_CHECK(ret); fn_exit: return MPL_SUCCESS; fn_fail: return MPL_ERR_GPU_INTERNAL; } int MPL_gpu_ipc_handle_map(MPL_gpu_ipc_mem_handle_t ipc_handle, MPL_gpu_device_handle_t dev_handle, void **ptr) { cudaError_t ret; int prev_devid; cudaGetDevice(&prev_devid); cudaSetDevice(dev_handle); ret = cudaIpcOpenMemHandle(ptr, ipc_handle, cudaIpcMemLazyEnablePeerAccess); CUDA_ERR_CHECK(ret); fn_exit: cudaSetDevice(prev_devid); return MPL_SUCCESS; fn_fail: return MPL_ERR_GPU_INTERNAL; } int MPL_gpu_ipc_handle_unmap(void *ptr) { cudaError_t ret; ret = cudaIpcCloseMemHandle(ptr); CUDA_ERR_CHECK(ret); fn_exit: return MPL_SUCCESS; fn_fail: return MPL_ERR_GPU_INTERNAL; } int MPL_gpu_malloc_host(void **ptr, size_t size) { cudaError_t ret; ret = cudaMallocHost(ptr, size); CUDA_ERR_CHECK(ret); fn_exit: return MPL_SUCCESS; fn_fail: return MPL_ERR_GPU_INTERNAL; } int MPL_gpu_free_host(void *ptr) { cudaError_t ret; ret = cudaFreeHost(ptr); CUDA_ERR_CHECK(ret); fn_exit: return MPL_SUCCESS; fn_fail: return MPL_ERR_GPU_INTERNAL; } int MPL_gpu_register_host(const void *ptr, size_t size) { cudaError_t ret; ret = cudaHostRegister((void *) ptr, size, cudaHostRegisterDefault); CUDA_ERR_CHECK(ret); fn_exit: return MPL_SUCCESS; fn_fail: return MPL_ERR_GPU_INTERNAL; } int MPL_gpu_unregister_host(const void *ptr) { cudaError_t ret; ret = cudaHostUnregister((void *) ptr); CUDA_ERR_CHECK(ret); fn_exit: return MPL_SUCCESS; fn_fail: return MPL_ERR_GPU_INTERNAL; } int MPL_gpu_malloc(void **ptr, size_t size, MPL_gpu_device_handle_t h_device) { int mpl_errno = MPL_SUCCESS; int prev_devid; cudaError_t ret; cudaGetDevice(&prev_devid); cudaSetDevice(h_device); ret = cudaMalloc(ptr, size); CUDA_ERR_CHECK(ret); fn_exit: cudaSetDevice(prev_devid); return mpl_errno; fn_fail: mpl_errno = MPL_ERR_GPU_INTERNAL; goto fn_exit; } int MPL_gpu_free(void *ptr) { cudaError_t ret; ret = cudaFree(ptr); CUDA_ERR_CHECK(ret); fn_exit: return MPL_SUCCESS; fn_fail: return MPL_ERR_GPU_INTERNAL; } int MPL_gpu_init(int *device_count, int *max_dev_id_ptr) { int count, max_dev_id = -1; cudaError_t ret = cudaGetDeviceCount(&count); CUDA_ERR_CHECK(ret); char *visible_devices = getenv("CUDA_VISIBLE_DEVICES"); if (visible_devices) { uintptr_t len = strlen(visible_devices); char *devices = MPL_malloc(len + 1, MPL_MEM_OTHER); char *free_ptr = devices; memcpy(devices, visible_devices, len + 1); for (int i = 0; i < count; i++) { int global_dev_id; char *tmp = strtok(devices, ","); assert(tmp); global_dev_id = atoi(tmp); if (global_dev_id > max_dev_id) max_dev_id = global_dev_id; devices = NULL; } MPL_free(free_ptr); } else { max_dev_id = count - 1; } *max_dev_id_ptr = max_dev_id; *device_count = count; gpu_mem_hook_init(); fn_exit: return MPL_SUCCESS; fn_fail: return MPL_ERR_GPU_INTERNAL; } int MPL_gpu_finalize() { gpu_free_hook_s *prev; while (free_hook_chain) { prev = free_hook_chain; free_hook_chain = free_hook_chain->next; MPL_free(prev); } return MPL_SUCCESS; } int MPL_gpu_get_dev_id(MPL_gpu_device_handle_t dev_handle, int *dev_id) { *dev_id = dev_handle; return MPL_SUCCESS; } int MPL_gpu_get_dev_handle(int dev_id, MPL_gpu_device_handle_t * dev_handle) { *dev_handle = dev_id; return MPL_SUCCESS; } int MPL_gpu_get_global_dev_ids(int *global_ids, int count) { char *visible_devices = getenv("CUDA_VISIBLE_DEVICES"); if (visible_devices) { uintptr_t len = strlen(visible_devices); char *devices = MPL_malloc(len + 1, MPL_MEM_OTHER); char *free_ptr = devices; memcpy(devices, visible_devices, len + 1); for (int i = 0; i < count; i++) { char *tmp = strtok(devices, ","); assert(tmp); global_ids[i] = atoi(tmp); devices = NULL; } MPL_free(free_ptr); } else { for (int i = 0; i < count; i++) { global_ids[i] = i; } } fn_exit: return MPL_SUCCESS; fn_fail: return MPL_ERR_GPU_INTERNAL; } int MPL_gpu_get_buffer_bounds(const void *ptr, void **pbase, uintptr_t * len) { CUresult curet; curet = cuMemGetAddressRange((CUdeviceptr *) pbase, (size_t *) len, (CUdeviceptr) ptr); CU_ERR_CHECK(curet); fn_exit: return MPL_SUCCESS; fn_fail: return MPL_ERR_GPU_INTERNAL; } static void gpu_free_hooks_cb(void *dptr) { gpu_free_hook_s *current = free_hook_chain; if (dptr != NULL) { /* we call gpu hook only when dptr != NULL */ while (current) { current->free_hook(dptr); current = current->next; } } return; } static int gpu_mem_hook_init() { void *libcuda_handle; void *libcudart_handle; libcuda_handle = dlopen("libcuda.so", RTLD_LAZY | RTLD_GLOBAL); assert(libcuda_handle); libcudart_handle = dlopen("libcudart.so", RTLD_LAZY | RTLD_GLOBAL); assert(libcudart_handle); sys_cuMemFree = (void *) dlsym(libcuda_handle, "cuMemFree"); assert(sys_cuMemFree); sys_cudaFree = (void *) dlsym(libcudart_handle, "cudaFree"); assert(sys_cudaFree); return MPL_SUCCESS; } int MPL_gpu_free_hook_register(void (*free_hook) (void *dptr)) { gpu_free_hook_s *hook_obj = MPL_malloc(sizeof(gpu_free_hook_s), MPL_MEM_OTHER); assert(hook_obj); hook_obj->free_hook = free_hook; hook_obj->next = NULL; if (!free_hook_chain) free_hook_chain = hook_obj; else { hook_obj->next = free_hook_chain; free_hook_chain = hook_obj; } return MPL_SUCCESS; } CUresult CUDAAPI cuMemFree(CUdeviceptr dptr) { CUresult result; gpu_free_hooks_cb((void *) dptr); result = sys_cuMemFree(dptr); return (result); } cudaError_t CUDARTAPI cudaFree(void *dptr) { cudaError_t result; gpu_free_hooks_cb(dptr); result = sys_cudaFree(dptr); return result; }
4,091
412
/*******************************************************************\ Module: Abstract Interpretation Author: <NAME>, <EMAIL> \*******************************************************************/ /// \file /// Abstract Interpretation Domain /// /// An abstract domain is an over-approximation of a set of possible valuations /// that the variables in a program may take at a given point in the program. /// For example if you have two variables, x and y and at a given point they /// can take the following values: /// /// (x = 1, y = -1), (x = -1, y = -1), (x = 1, y = 0) /// /// then you could represent this with a pair of intervals: /// /// x in [-1,1], y in [-1,0] /// /// this is an over-approximation as it also describes valuations, like, /// (x = 0, y = 0) which are not in the set. It also omits things like the /// link between the variables, such as knowning x >= y. /// However, in return for some imprecision (in a known direction), it gains /// scalability. A pair of intervals can represent sets of valuations that /// might contain millions or billions of pairs. /// /// The abstract interpretation framework is modular in terms of the domain /// used. Inherit from ai_domain_baset, implement the pure virtual functions /// and add a merge method and your domain can be plugged in to the analysis. /// The actual "glue" is a factory so that you can have domains with non-trivial /// constructors. These inherit from ai_domain_factory_baset, but /// ai_domain_factory_default_constructort<your_domain> will be fine if the /// default constructor is fine and inheriting from /// ai_domain_factoryt<your_domain> will be fine if non-trivial constructors /// are needed. #ifndef CPROVER_ANALYSES_AI_DOMAIN_H #define CPROVER_ANALYSES_AI_DOMAIN_H #include <util/json.h> #include <util/make_unique.h> #include <util/xml.h> #include "ai_history.h" // forward reference the abstract interpreter interface class ai_baset; /// The interface offered by a domain, allows code to manipulate domains without /// knowing their exact type. Derive from this to implement domains. class ai_domain_baset { protected: /// The constructor is expected to produce 'false' or 'bottom' /// A default constructor is not part of the domain interface ai_domain_baset() { } /// A copy constructor is part of the domain interface ai_domain_baset(const ai_domain_baset &old) { } public: virtual ~ai_domain_baset() { } typedef goto_programt::const_targett locationt; typedef ai_history_baset::trace_ptrt trace_ptrt; /// how function calls are treated: /// a) there is an edge from each call site to the function head /// b) there is an edge from the last instruction (END_FUNCTION) /// of the function to the instruction _following_ the call site /// (this also needs to set the LHS, if applicable) /// /// in some cases, function calls are skipped, in which case: /// c) there is an edge from the call instruction to the instruction after /// /// "this" is the domain before the instruction "from" /// "from" is the instruction to be interpreted /// "to" is the next instruction (for GOTO, FUNCTION_CALL, END_FUNCTION) /// /// PRECONDITION(from.is_dereferenceable(), "Must not be _::end()") /// PRECONDITION(to.is_dereferenceable(), "Must not be _::end()") /// PRECONDITION(are_comparable(from,to) || /// (from->is_function_call() || from->is_end_function()) /// /// The history aware version is used by the abstract interpreter /// for backwards compatability it calls the older signature virtual void transform( const irep_idt &function_from, trace_ptrt from, const irep_idt &function_to, trace_ptrt to, ai_baset &ai, const namespacet &ns) = 0; virtual void output(std::ostream &, const ai_baset &, const namespacet &) const { } virtual jsont output_json(const ai_baset &ai, const namespacet &ns) const; virtual xmlt output_xml(const ai_baset &ai, const namespacet &ns) const; /// no states virtual void make_bottom() = 0; /// all states -- the analysis doesn't use this, /// and domains may refuse to implement it. virtual void make_top() = 0; /// Make this domain a reasonable entry-point state virtual void make_entry() = 0; virtual bool is_bottom() const = 0; virtual bool is_top() const = 0; /// also add /// /// bool merge(const T &b, locationt from, locationt to); /// or /// bool merge(const T &b, trace_ptrt from, trace_ptrt to); /// /// This computes the join between "this" and "b". /// Return true if "this" has changed. /// In the usual case, "b" is the updated state after "from" /// and "this" is the state before "to". /// /// PRECONDITION(from.is_dereferenceable(), "Must not be _::end()") /// PRECONDITION(to.is_dereferenceable(), "Must not be _::end()") /// This method allows an expression to be simplified / evaluated using the /// current state. It is used to evaluate assertions and in program /// simplification /// return true if unchanged virtual bool ai_simplify(exprt &condition, const namespacet &) const { (void)condition; // unused parameter return true; } /// Simplifies the expression but keeps it as an l-value virtual bool ai_simplify_lhs(exprt &condition, const namespacet &ns) const; /// Gives a Boolean condition that is true for all values represented by the /// domain. This allows domains to be converted into program invariants. virtual exprt to_predicate(void) const { if(is_bottom()) return false_exprt(); else return true_exprt(); } }; // No virtual interface is complete without a factory! class ai_domain_factory_baset { public: typedef ai_domain_baset statet; typedef ai_domain_baset::locationt locationt; typedef ai_domain_baset::trace_ptrt trace_ptrt; virtual ~ai_domain_factory_baset() { } virtual std::unique_ptr<statet> make(locationt l) const = 0; virtual std::unique_ptr<statet> copy(const statet &s) const = 0; // Not domain construction but requires knowing the precise type of statet virtual bool merge(statet &dest, const statet &src, trace_ptrt from, trace_ptrt to) const = 0; }; // Converting make to take a trace_ptr instead of a location would // require removing the backwards-compatible // location_sensitive_storaget::get_state(locationt l) // function which is used by some of the older domains // It would be great to have a single (templated) default implementation. // However, a default constructor is not part of the ai_domain_baset interface // and there are some domains which don't have one. So we need to separate the // methods. template <typename domainT> class ai_domain_factoryt : public ai_domain_factory_baset { public: typedef ai_domain_factory_baset::statet statet; typedef ai_domain_factory_baset::locationt locationt; typedef ai_domain_factory_baset::trace_ptrt trace_ptrt; std::unique_ptr<statet> copy(const statet &s) const override { return util_make_unique<domainT>(static_cast<const domainT &>(s)); } bool merge(statet &dest, const statet &src, trace_ptrt from, trace_ptrt to) const override { // For backwards compatability, use the location version return static_cast<domainT &>(dest).merge( static_cast<const domainT &>(src), from, to); } }; template <typename domainT> class ai_domain_factory_default_constructort : public ai_domain_factoryt<domainT> { public: typedef ai_domain_factory_baset::statet statet; typedef ai_domain_factory_baset::locationt locationt; typedef ai_domain_factory_baset::trace_ptrt trace_ptrt; std::unique_ptr<statet> make(locationt l) const override { auto d = util_make_unique<domainT>(); CHECK_RETURN(d->is_bottom()); return std::unique_ptr<statet>(d.release()); } }; #endif
2,494
409
//![includes] #include <seqan/stream.h> #include <seqan/seq_io.h> //![includes] //![custom_file] namespace seqan { // Your custom file format. struct MyFastaAdaptor_; using MyFastaAdaptor = Tag<MyFastaAdaptor_>; // Specilaize sequence input file with custom tag. using MySeqFileIn = FormattedFile<Fastq, Input, MyFastaAdaptor>; //![custom_file] //![custom_format] // Your custom format tag. struct MySeqFormat_; using MySeqFormat = Tag<MySeqFormat_>; // The extended TagList containing our custom format. using MySeqInFormats = TagList<MySeqFormat, SeqInFormats>; // Overloaded file format metafunction. template <> struct FileFormat<FormattedFile<Fastq, Input, MyFastaAdaptor> > { using Type = TagSelector<MySeqInFormats>; }; // Set magic header. template <typename T> struct MagicHeader<MySeqFormat, T> : public MagicHeader<Fasta, T> {}; //![custom_format] //![custom_extension] // Specify the valid ending for your fasta adaptor. template <typename T> struct FileExtensions<MySeqFormat, T> { static char const * VALUE[1]; }; template <typename T> char const * FileExtensions<MySeqFormat, T>::VALUE[1] = { ".fa.dat" // fasta file with dat ending. }; //![custom_extension] //![custom_read_record] // Overload an inner readRecord function to delegate to the actual fasta parser. template <typename TIdString, typename TSeqString, typename TSpec> inline void readRecord(TIdString & meta, TSeqString & seq, FormattedFile<Fastq, Input, TSpec> & file, MySeqFormat) { readRecord(meta, seq, file.iter, Fasta()); // Just delegate to Fasta parser. } } // namespace seqan //![custom_read_record] //![main] int main() { using namespace seqan; std::string path = getAbsolutePath("demos/howto/custom_file_ending.fa.dat"); MySeqFileIn seqFile(path.c_str()); CharString meta; Dna5String seq; readRecord(meta, seq, seqFile); std::cout << "> " << meta << "\n" << seq << std::endl; return 0; } //![main]
713
5,447
"""02. Train Image Classification with Auto Estimator ===================================================== This tutorial goes through the basic steps of using GluonCV auto estimator to train an image classifier with custom hyper-parameters. """ ########################################################## # Train with default configurations # --------------------------------- from gluoncv.auto.estimators import ImageClassificationEstimator ########################################################## # In this tutorial, we use a small sample dataset train, _, test = ImageClassificationEstimator.Dataset.from_folders( 'https://autogluon.s3.amazonaws.com/datasets/shopee-iet.zip') train, val, _ = train.random_split(val_size=0.1, test_size=0) ########################################################## # Create an estimator with default configurations. # We only change the number of GPUs to reflect the hardware constraint. # # Note that you may still launch training if no gpu is available(with {'gpus': []}) # but be prepared that this is painfully slow and not even possible to finish # in case the dataset isn't tiny. # # We recommend that you use at least one nvidia gpu with more than 6GB free GPU memory. classifier = ImageClassificationEstimator( {'gpus': [0], 'train': {'batch_size': 16, 'epochs': 2}}) ########################################################## # run fit on train/validation data classifier.fit(train, val) ########################################################## # Evaluate the final model on test set eval_result = classifier.evaluate(test) print("Top1/Top5 on test dataset: {}".format(eval_result)) ########################################################## # save to/from disk to be used later classifier.save('classifier.pkl') classifier2 = ImageClassificationEstimator.load('classifier.pkl') ########################################################## # run prediction on test images pred = classifier2.predict(test.iloc[0]['image']) print('GroundTruth:', test.iloc[0]) print('prediction:', pred) ########################################################## # Customize your training configurations # -------------------------------------- # You may modify configurations to customize your training, # supported fields: print(ImageClassificationEstimator._default_cfg) ########################################################## # For example, we could change the learning rate and batch size new_classifier = ImageClassificationEstimator({'gpus': [0], 'train': {'batch_size': 16, 'lr': 0.01}}) ########################################################## # A more natural format for modifying individual hyperparameter # is to edit the yaml file saved automatically in `self._logdir`, here we just show an example # of how to copy/edit and load the modified configuration file back to the estimator in python # # You may edit the yaml file directly with a text editor import shutil import os config_name = 'config.yaml' shutil.copyfile(os.path.join(classifier2._logdir, config_name), os.path.join('.', config_name)) cfg = open(config_name).read() print(cfg) ########################################################## # Let's modify the network from resnet50_v1 to resnet18_v1b import fileinput with fileinput.FileInput(config_name, inplace = True, backup ='.bak') as f: for line in f: if 'resnet50_v1' in line: new_line = line.replace('resnet50_v1', 'resnet18_v1b') print(new_line, end='') else: print(line, end='') ########################################################## # The new classifier now should reflect the new configs we just edited new_classifier2 = ImageClassificationEstimator(config_name)
1,009
3,384
// // Generated by class-dump 3.5 (64 bit). // // class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2013 by <NAME>. // #pragma mark Blocks typedef void (^CDUnknownBlockType)(void); // return type and parameters are unknown #pragma mark - // // File: /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator.sdk/System/Library/PrivateFrameworks/SyncedDefaults.framework/SyncedDefaults // UUID: 62C331F4-774E-366E-863A-4300A4341E81 // // Arch: x86_64 // Current version: 1.0.0 // Compatibility version: 1.0.0 // Source version: 207.1.0.0.0 // Minimum iOS version: 8.0.0 // SDK version: 8.0.0 // // Objective-C Garbage Collection: Unsupported // @interface NSUbiquitousKeyValueStore (SYDDebug) - (void)_printDebugDescription; @end __attribute__((visibility("hidden"))) @interface SYDClient : NSObject { NSObject<OS_dispatch_queue> *_queue; NSObject<OS_xpc_object> *_connection; NSString *_bundleIdentifier; NSString *_storeIdentifier; _Bool _additionalSource; } - (void)sendMessageWithName:(id)arg1 userInfo:(id)arg2 replyHandler:(CDUnknownBlockType)arg3; - (void)sendMessageWithName:(id)arg1 replyHandler:(CDUnknownBlockType)arg2; - (id)sendMessageWithName:(id)arg1 userInfo:(id)arg2; - (void)sendMessageWithName:(id)arg1; - (void)_sendMessage:(id)arg1 replyHandler:(CDUnknownBlockType)arg2; - (void)_sendMessageNoReply:(id)arg1; - (id)_sendMessageWithReplySync:(id)arg1; - (id)_newMessageWithName:(id)arg1 userInfo:(id)arg2; - (void)shutdown; - (void)_resetConnection; - (void)_createConnectionIfNecessary; - (void)finalize; - (void)dealloc; - (id)initWithQueue:(id)arg1 bundleIdentifier:(struct __CFString *)arg2 storeIdentifier:(struct __CFString *)arg3; - (id)initWithQueue:(id)arg1 bundleIdentifier:(struct __CFString *)arg2 storeIdentifier:(struct __CFString *)arg3 additionalSource:(_Bool)arg4; @end __attribute__((visibility("hidden"))) @interface SYDJournal : NSObject { NSMutableDictionary *store; NSMutableDictionary *journal; _Bool readonly; } - (id)description; - (_Bool)removeChangesUntilChangeCount:(long long)arg1; - (void)addChange:(int)arg1 forKey:(id)arg2 changeCount:(long long)arg3; - (long long)maximumChangeCount; - (id)changesSinceChangeCount:(long long)arg1; - (int)changeSinceChangeCount:(long long)arg1 forKey:(id)arg2; - (id)initWithMutableStore:(id)arg1; - (id)initWithStore:(id)arg1; - (void)dealloc; - (id)init; @end @interface SYDRemotePreferencesSource : NSObject { long long _generationCount; long long _lastGenerationFromDisk; struct __CFString *preferenceID; struct __CFURL *urlOnDisk; struct __CFDictionary *cache; long long storageChangeCount; long long initialSyncChangeCount; unsigned char isInitialSync; struct __CFSet *dirtyKeys; struct __CFDictionary *configurationDictionary; NSMutableDictionary *externalChanges; SYDClient *client; CDUnknownBlockType registrationBlock; NSObject<OS_dispatch_queue> *registrationQueue; NSObject<OS_dispatch_queue> *_protectionQueue; NSObject<OS_dispatch_source> *_memoryWarningSource; double _lastAccess; _Bool _forceNextSynchronization; } + (id)SYDRemotePreferencesSourceConfigurationDidChangeNotification; + (id)SYDRemotePreferencesSourceDidChangeNotification; + (void)initialize; + (void)migrateSyncedDefaultsForBundleIdentifier:(id)arg1; + (void)noteAccountChanges:(id)arg1; + (void)resetAllApplicationsWithCompletionHandler:(CDUnknownBlockType)arg1; - (id)serverSideDebugDescription; - (void)_didReceiveMemoryWarning; - (void)_createMemoryWarningSource; - (id)_warningSource; - (void)updateConfiguration; - (void)unregisterForSynchronizedDefaults; - (void)registerForSynchronizedDefaults; - (long long)generationCount; - (struct __CFDictionary *)copyDictionary; - (struct __CFArray *)copyKeyList; - (void)discardExternalChangesForChangeCount:(long long)arg1; - (id)copyExternalChangesWithChangeCount:(long long *)arg1; - (unsigned char)hasExternalChanges; - (unsigned char)isInitialSync; - (unsigned char)_synchronizeForced:(unsigned char)arg1; - (unsigned char)synchronizeForced:(unsigned char)arg1; - (unsigned char)synchronize; - (void)synchronizationWithCompletionHandler:(CDUnknownBlockType)arg1; - (void)scheduleRemoteSynchronization; - (void)_cachePlistFromDisk; - (void)setValue:(void *)arg1 forKey:(struct __CFString *)arg2; - (void *)getValueForKey:(struct __CFString *)arg1; - (void)_forceRegistrationNow; - (void)finalize; - (void)dealloc; - (id)initWithApplicationID:(struct __CFString *)arg1 storeID:(struct __CFString *)arg2 shared:(_Bool)arg3 additionalSource:(_Bool)arg4; - (id)initWithApplicationID:(struct __CFString *)arg1 storeID:(struct __CFString *)arg2 shared:(_Bool)arg3; - (id)initWithApplicationID:(struct __CFString *)arg1 shared:(_Bool)arg2; - (long long)maximumTotalDataLength; - (long long)maximumDataLengthPerKey; - (long long)maximumKeyLength; - (long long)maximumKeyCount; - (long long)configurationValueForKey:(struct __CFString *)arg1; - (struct __CFDictionary *)copyConfigurationDictionary; - (void)_storeConfiguration:(struct __CFDictionary *)arg1; @end
1,887
879
package org.zstack.test.storage.backup.sftp; import junit.framework.Assert; import org.junit.Before; import org.junit.Test; import org.zstack.core.Platform; import org.zstack.core.cloudbus.CloudBus; import org.zstack.core.componentloader.ComponentLoader; import org.zstack.core.config.GlobalConfigFacade; import org.zstack.core.db.DatabaseFacade; import org.zstack.header.identity.SessionInventory; import org.zstack.header.image.ImageConstant; import org.zstack.header.image.ImageConstant.ImageMediaType; import org.zstack.header.image.ImageInventory; import org.zstack.header.image.ImageVO; import org.zstack.header.volume.VolumeConstant; import org.zstack.simulator.storage.backup.sftp.SftpBackupStorageSimulatorConfig; import org.zstack.storage.backup.sftp.SftpBackupStorageInventory; import org.zstack.test.Api; import org.zstack.test.ApiSenderException; import org.zstack.test.DBUtil; import org.zstack.test.WebBeanConstructor; import org.zstack.test.deployer.Deployer; import org.zstack.utils.Utils; import org.zstack.utils.data.SizeUnit; import org.zstack.utils.logging.CLogger; public class TestSftpBackupStorageDownloadImage { CLogger logger = Utils.getLogger(TestSftpBackupStorageDownloadImage.class); Deployer deployer; Api api; ComponentLoader loader; CloudBus bus; DatabaseFacade dbf; GlobalConfigFacade gcf; SessionInventory session; SftpBackupStorageSimulatorConfig config; @Before public void setUp() throws Exception { DBUtil.reDeployDB(); WebBeanConstructor con = new WebBeanConstructor(); deployer = new Deployer("deployerXml/sftpBackupStorage/TestAddSftpBackupStorage.xml", con); deployer.addSpringConfig("KVMRelated.xml"); deployer.build(); api = deployer.getApi(); loader = deployer.getComponentLoader(); bus = loader.getComponent(CloudBus.class); dbf = loader.getComponent(DatabaseFacade.class); gcf = loader.getComponent(GlobalConfigFacade.class); config = loader.getComponent(SftpBackupStorageSimulatorConfig.class); session = api.loginAsAdmin(); } @Test public void test() throws ApiSenderException { SftpBackupStorageTestHelper helper = new SftpBackupStorageTestHelper(); SftpBackupStorageInventory sinv = helper.addSimpleHttpBackupStorage(api); config.downloadSuccess1 = true; config.downloadSuccess2 = true; config.imageMd5sum = Platform.getUuid(); long size = SizeUnit.GIGABYTE.toByte(8); ImageInventory iinv = new ImageInventory(); iinv.setUuid(Platform.getUuid()); iinv.setMediaType(ImageMediaType.RootVolumeTemplate.toString()); iinv.setFormat(VolumeConstant.VOLUME_FORMAT_QCOW2); iinv.setGuestOsType("CentOS6.3"); iinv.setName("TestImage"); iinv.setType(ImageConstant.ZSTACK_IMAGE_TYPE); iinv.setUrl("http://zstack.org/download/testimage.qcow2"); config.imageSizes.put(iinv.getUuid(), size); long asize = SizeUnit.GIGABYTE.toByte(1); config.imageActualSizes.put(iinv.getUuid(), asize); iinv = api.addImage(iinv, sinv.getUuid()); Assert.assertEquals(size, iinv.getSize()); Assert.assertEquals(asize, iinv.getActualSize().longValue()); Assert.assertEquals(config.imageMd5sum, iinv.getMd5Sum()); Assert.assertNotNull(iinv.getBackupStorageRefs().get(0).getInstallPath()); ImageVO vo = dbf.findByUuid(iinv.getUuid(), ImageVO.class); Assert.assertEquals(size, vo.getSize()); Assert.assertEquals(config.imageMd5sum, vo.getMd5Sum()); Assert.assertNotNull(vo.getBackupStorageRefs().iterator().next().getInstallPath()); } }
1,472
348
{"nom":"Mortagne-sur-Gironde","circ":"4ème circonscription","dpt":"Charente-Maritime","inscrits":772,"abs":436,"votants":336,"blancs":24,"nuls":4,"exp":308,"res":[{"nuance":"LR","nom":"<NAME>","voix":179},{"nuance":"REM","nom":"<NAME>","voix":129}]}
98
1,031
<filename>newton-3.14/sdk/dScene/dAnimationTrack.cpp ///////////////////////////////////////////////////////////////////////////// // Name: dAnimationTrack.h // Purpose: // Author: <NAME> // Modified by: // Created: 22/05/2010 08:02:08 // RCS-ID: // Copyright: Copyright (c) <2010> <Newton Game Dynamics> // License: // This software is provided 'as-is', without any express or implied // warranty. In no event will the authors be held liable for any damages // arising from the use of this software. // // Permission is granted to anyone to use this software for any purpose, // including commercial applications, and to alter it and redistribute it // freely ///////////////////////////////////////////////////////////////////////////// #include "dSceneStdafx.h" #include "dScene.h" #include "dAnimationTrack.h" #include <tinyxml.h> D_IMPLEMENT_CLASS_NODE(dAnimationTrack); dAnimationTrack::dAnimationTrack(dScene* const world) :dNodeInfo () { SetName ("animationLayers"); } dAnimationTrack::dAnimationTrack() :dNodeInfo () { SetName ("animationLayers"); } dAnimationTrack::~dAnimationTrack(void) { } void dAnimationTrack::AddScale(dFloat time, dFloat x, dFloat y, dFloat z) { dCurveValue& value = m_scale.Append()->GetInfo(); value.m_x = x; value.m_y = y; value.m_z = z; value.m_time = time; } void dAnimationTrack::AddPosition(dFloat time, dFloat x, dFloat y, dFloat z) { dCurveValue& value = m_position.Append()->GetInfo(); value.m_x = x; value.m_y = y; value.m_z = z; value.m_time = time; } void dAnimationTrack::AddRotation(dFloat time, dFloat x, dFloat y, dFloat z) { dCurveValue& value = m_rotation.Append()->GetInfo(); value.m_x = x; value.m_y = y; value.m_z = z; value.m_time = time; } void dAnimationTrack::AddKeyframe(dFloat time, const dMatrix& matrix) { dVector scale; dVector euler0; dVector euler1; dMatrix transform; dMatrix eigenScaleAxis; matrix.PolarDecomposition(transform, scale, eigenScaleAxis); transform.GetEulerAngles(euler0, euler1); AddScale(time, scale.m_x, scale.m_y, scale.m_z); AddPosition(time, matrix.m_posit.m_x, matrix.m_posit.m_y, matrix.m_posit.m_z); AddRotation(time, euler0.m_x, euler0.m_y, euler0.m_z); //dTrace(("%d %f %f %f\n", m_rotation.GetCount(), euler0.m_x * dRadToDegree, euler0.m_y * dRadToDegree, euler0.m_z * dRadToDegree)); } void dAnimationTrack::ResampleAnimation() { dFloat period = m_scale.GetLast()->GetInfo().m_time; dFloat t1 = m_position.GetLast()->GetInfo().m_time; period = dMax(period, t1); dFloat t2 = m_rotation.GetLast()->GetInfo().m_time; period = dMax(period, t2); dCurve scaleCurve; dCurve positionCurve; dCurve rotationCurve; dFloat acc = 0.0f; do { dCurveValue scale(m_scale.Evaluate(acc)); dCurveValue position(m_position.Evaluate(acc)); dCurveValue rotation(m_rotation.Evaluate(acc)); scaleCurve.Append(scale); positionCurve.Append(position); rotationCurve.Append(rotation); acc += 1.0f / 60.0f; } while (acc < period); dCurveValue scale(m_scale.Evaluate(period)); dCurveValue position(m_position.Evaluate(period)); dCurveValue rotation(m_rotation.Evaluate(period)); scaleCurve.Append(scale); positionCurve.Append(position); rotationCurve.Append(rotation); m_scale.RemoveAll(); m_position.RemoveAll(); m_rotation.RemoveAll(); for (dCurve::dListNode* ptr = scaleCurve.GetFirst(); ptr; ptr = ptr->GetNext()) { m_scale.Append(ptr->GetInfo()); } for (dCurve::dListNode* ptr = positionCurve.GetFirst(); ptr; ptr = ptr->GetNext()) { m_position.Append(ptr->GetInfo()); } for (dCurve::dListNode* ptr = rotationCurve.GetFirst(); ptr; ptr = ptr->GetNext()) { m_rotation.Append(ptr->GetInfo()); } } void dAnimationTrack::FreezeScale(const dMatrix& parentMatrix) { ResampleAnimation(); dCurve::dListNode* scaleNode = m_scale.GetFirst(); dCurve::dListNode* positNode = m_position.GetFirst(); for (dCurve::dListNode* rotationNode = m_rotation.GetFirst(); rotationNode; rotationNode = rotationNode->GetNext()) { if (m_position.GetCount() && m_rotation.GetCount()) { dVector euler0; dVector euler1; dCurveValue& scaleValue = scaleNode->GetInfo(); dCurveValue& positValue = positNode->GetInfo(); dCurveValue& rotationValue = rotationNode->GetInfo(); dMatrix scaleMatrix(dGetIdentityMatrix()); scaleMatrix[0][0] = scaleValue.m_x; scaleMatrix[1][1] = scaleValue.m_y; scaleMatrix[2][2] = scaleValue.m_z; dMatrix matrix(scaleMatrix * dPitchMatrix(rotationValue.m_x) * dYawMatrix(rotationValue.m_y) * dRollMatrix(rotationValue.m_z)); matrix.m_posit = dVector(positValue.m_x, positValue.m_y, positValue.m_z, 1.0f); dMatrix transform(matrix * parentMatrix); dMatrix stretchAxis; dVector scale(0.0f); transform.PolarDecomposition(matrix, scale, stretchAxis); matrix.GetEulerAngles(euler0, euler1); scaleValue.m_x = dFloat(1.0f); scaleValue.m_y = dFloat(1.0f); scaleValue.m_z = dFloat(1.0f); rotationValue.m_x = euler0.m_x; rotationValue.m_y = euler0.m_y; rotationValue.m_z = euler0.m_z; positValue.m_x = matrix.m_posit.m_x; positValue.m_y = matrix.m_posit.m_y; positValue.m_z = matrix.m_posit.m_z; positNode = positNode->GetNext(); scaleNode = scaleNode->GetNext(); } } OptimizeCurves(); } void dAnimationTrack::BakeTransform(const dMatrix& transform) { ResampleAnimation(); dMatrix invert(transform.Inverse4x4()); dCurve::dListNode* scaleNode = m_scale.GetFirst(); dCurve::dListNode* positNode = m_position.GetFirst(); for (dCurve::dListNode* rotationNode = m_rotation.GetFirst(); rotationNode; rotationNode = rotationNode->GetNext()) { dVector euler0; dVector euler1; dCurveValue& scaleValue = scaleNode->GetInfo(); dCurveValue& positValue = positNode->GetInfo(); dCurveValue& rotationValue = rotationNode->GetInfo(); dMatrix scaleMatrix(dGetIdentityMatrix()); scaleMatrix[0][0] = scaleValue.m_x; scaleMatrix[1][1] = scaleValue.m_y; scaleMatrix[2][2] = scaleValue.m_z; dMatrix m(scaleMatrix * dPitchMatrix(rotationValue.m_x) * dYawMatrix(rotationValue.m_y) * dRollMatrix(rotationValue.m_z)); m.m_posit = dVector(positValue.m_x, positValue.m_y, positValue.m_z, 1.0f); dMatrix matrix(invert * m * transform); dVector scale; dMatrix output; dMatrix eigenScaleAxis; matrix.PolarDecomposition(output, scale, eigenScaleAxis); output.GetEulerAngles(euler0, euler1); //dTrace(("%d %f %f %f\n", m_rotation.GetCount(), euler0.m_x * dRadToDegree, euler0.m_y * dRadToDegree, euler0.m_z * dRadToDegree)); scaleValue.m_x = scale.m_x; scaleValue.m_y = scale.m_y; scaleValue.m_z = scale.m_z; rotationValue.m_x = euler0.m_x; rotationValue.m_y = euler0.m_y; rotationValue.m_z = euler0.m_z; positValue.m_x = output.m_posit.m_x; positValue.m_y = output.m_posit.m_y; positValue.m_z = output.m_posit.m_z; positNode = positNode->GetNext(); scaleNode = scaleNode->GetNext(); } OptimizeCurves(); } dFloat dAnimationTrack::Interpolate(dFloat x0, dFloat t0, dFloat x1, dFloat t1, dFloat t) const { return x0 + (x1 - x0) * (t - t0) / (t1 - t0); } void dAnimationTrack::OptimizeCurve(dList<dCurveValue>& curve) { const dFloat tol = 5.0e-5f; const dFloat tol2 = tol * tol; for (dCurve::dListNode* node0 = curve.GetFirst(); node0->GetNext(); node0 = node0->GetNext()) { const dCurveValue& value0 = node0->GetInfo(); for (dCurve::dListNode* node1 = node0->GetNext()->GetNext(); node1; node1 = node1->GetNext()) { const dCurveValue& value1 = node1->GetPrev()->GetInfo(); const dCurveValue& value2 = node1->GetInfo(); dVector p1(value1.m_x, value1.m_y, value1.m_z, dFloat(0.0f)); dVector p2(value2.m_x, value2.m_y, value2.m_z, dFloat(0.0f)); dFloat dist_x = value1.m_x - Interpolate(value0.m_x, value0.m_time, value2.m_x, value2.m_time, value1.m_time); dFloat dist_y = value1.m_y - Interpolate(value0.m_y, value0.m_time, value2.m_y, value2.m_time, value1.m_time); dFloat dist_z = value1.m_z - Interpolate(value0.m_z, value0.m_time, value2.m_z, value2.m_time, value1.m_time); dVector err(dist_x, dist_y, dist_z, 0.0f); dFloat mag2 = err.DotProduct3(err); if (mag2 > tol2) { break; } curve.Remove(node1->GetPrev()); } } } dFloat dAnimationTrack::FixAngleAlias(dFloat angleA, dFloat angleB) const { dFloat sinA = dSin(angleA); dFloat cosA = dCos(angleA); dFloat sinB = dSin(angleB); dFloat cosB = dCos(angleB); dFloat num = sinB * cosA - cosB * sinA; dFloat den = cosA * cosB + sinA * sinB; angleB = angleA + dAtan2(num, den); return angleB; } void dAnimationTrack::OptimizeCurves() { if (m_scale.GetCount()) { OptimizeCurve(m_scale); } if (m_position.GetCount()) { OptimizeCurve(m_position); } if (m_rotation.GetCount()) { for (dCurve::dListNode* node = m_rotation.GetFirst(); node->GetNext(); node = node->GetNext()) { const dCurveValue& value0 = node->GetInfo(); dCurveValue& value1 = node->GetNext()->GetInfo(); value1.m_x = FixAngleAlias(value0.m_x, value1.m_x); value1.m_y = FixAngleAlias(value0.m_y, value1.m_y); value1.m_z = FixAngleAlias(value0.m_z, value1.m_z); //dTrace(("%d %f %f %f\n", m_rotation.GetCount(), value0.m_x * dRadToDegree, value0.m_y * dRadToDegree, value0.m_z * dRadToDegree)); } OptimizeCurve(m_rotation); } } void dAnimationTrack::Serialize (TiXmlElement* const rootNode) { SerialiseBase(dNodeInfo, rootNode); dAssert(m_scale.GetCount() >= 2); dAssert(m_rotation.GetCount() >= 2); dAssert(m_position.GetCount() >= 2); if (m_scale.GetCount()) { TiXmlElement* const scaleKeys = new TiXmlElement("scaleKeyframes"); rootNode->LinkEndChild(scaleKeys); int bufferSizeInBytes = 3 * 12 * m_scale.GetCount() * sizeof(dFloat); char* const buffer = dAlloca(char, bufferSizeInBytes); dFloat* const time = dAlloca(dFloat, m_scale.GetCount()); dFloat* const points = dAlloca(dFloat, 3 * m_scale.GetCount()); int count = 0; for (dCurve::dListNode* node = m_scale.GetFirst(); node; node = node->GetNext()) { const dCurveValue& value = node->GetInfo(); time[count] = value.m_time; points[count * 3 + 0] = value.m_x; points[count * 3 + 1] = value.m_y; points[count * 3 + 2] = value.m_z; count++; } TiXmlElement* const timeLine = new TiXmlElement("timeLine"); scaleKeys->LinkEndChild(timeLine); dFloatArrayToString(time, count, buffer, bufferSizeInBytes); timeLine->SetAttribute("float", count); timeLine->SetAttribute("floats", buffer); TiXmlElement* const positions = new TiXmlElement("scale"); scaleKeys->LinkEndChild(positions); dFloatArrayToString(points, 3 * count, buffer, bufferSizeInBytes); positions->SetAttribute("float3", count); positions->SetAttribute("floats", buffer); } if (m_position.GetCount()) { TiXmlElement* const positionKeys = new TiXmlElement("positionKeyframes"); rootNode->LinkEndChild(positionKeys); int bufferSizeInBytes = 3 * 12 * m_position.GetCount() * sizeof(dFloat); char* const buffer = dAlloca(char, bufferSizeInBytes); dFloat* const time = dAlloca(dFloat, m_position.GetCount()); dFloat* const points = dAlloca(dFloat, 3 * m_position.GetCount()); int count = 0; for (dCurve::dListNode* node = m_position.GetFirst(); node; node = node->GetNext()) { const dCurveValue& value = node->GetInfo(); time[count] = value.m_time; points[count * 3 + 0] = value.m_x; points[count * 3 + 1] = value.m_y; points[count * 3 + 2] = value.m_z; count++; } TiXmlElement* const timeLine = new TiXmlElement("timeLine"); positionKeys->LinkEndChild(timeLine); dFloatArrayToString(time, count, buffer, bufferSizeInBytes); timeLine->SetAttribute("float", count); timeLine->SetAttribute("floats", buffer); TiXmlElement* const positions = new TiXmlElement("position"); positionKeys->LinkEndChild(positions); dFloatArrayToString(points, 3 * count, buffer, bufferSizeInBytes); positions->SetAttribute("float3", count); positions->SetAttribute("floats", buffer); } if (m_rotation.GetCount()) { TiXmlElement* const rotationKeys = new TiXmlElement("rotationKeyframes"); rootNode->LinkEndChild(rotationKeys); int bufferSizeInBytes = 3 * 12 * m_rotation.GetCount() * sizeof(dFloat); char* const buffer = dAlloca(char, bufferSizeInBytes); dFloat* const time = dAlloca(dFloat, m_rotation.GetCount()); dFloat* const points = dAlloca(dFloat, 3 * m_rotation.GetCount()); int count = 0; for (dCurve::dListNode* node = m_rotation.GetFirst(); node; node = node->GetNext()) { const dCurveValue& value = node->GetInfo(); time[count] = value.m_time; points[count * 3 + 0] = value.m_x; points[count * 3 + 1] = value.m_y; points[count * 3 + 2] = value.m_z; count++; } TiXmlElement* const timeLine = new TiXmlElement("timeLine"); rotationKeys->LinkEndChild(timeLine); dFloatArrayToString(time, count, buffer, bufferSizeInBytes); timeLine->SetAttribute("float", count); timeLine->SetAttribute("floats", buffer); TiXmlElement* const positions = new TiXmlElement("angles"); rotationKeys->LinkEndChild(positions); dFloatArrayToString(points, 3 * count, buffer, bufferSizeInBytes); positions->SetAttribute("float3", count); positions->SetAttribute("floats", buffer); } } bool dAnimationTrack::Deserialize (const dScene* const scene, TiXmlElement* const rootNode) { DeserialiseBase(scene, dNodeInfo, rootNode); TiXmlElement* const scaleKeyframes = (TiXmlElement*)rootNode->FirstChild("scaleKeyframes"); if (scaleKeyframes) { TiXmlElement* const timeLineElement = (TiXmlElement*)scaleKeyframes->FirstChild("timeLine"); TiXmlElement* const positionElement = (TiXmlElement*)scaleKeyframes->FirstChild("scale"); int keyFramesCount; timeLineElement->Attribute("float", &keyFramesCount); dFloat* const timeline = dAlloca(dFloat, keyFramesCount); dFloat* const points = dAlloca(dFloat, 3 * keyFramesCount); dStringToFloatArray(timeLineElement->Attribute("floats"), timeline, keyFramesCount); dStringToFloatArray(positionElement->Attribute("floats"), points, 3 * keyFramesCount); for (int i = 0; i < keyFramesCount; i++) { AddScale(timeline[i], points[i * 3 + 0], points[i * 3 + 1], points[i * 3 + 2]); } } TiXmlElement* const positionKeyframes = (TiXmlElement*)rootNode->FirstChild("positionKeyframes"); if (positionKeyframes) { TiXmlElement* const timeLineElement = (TiXmlElement*)positionKeyframes->FirstChild("timeLine"); TiXmlElement* const positionElement = (TiXmlElement*)positionKeyframes->FirstChild("position"); int keyFramesCount; timeLineElement->Attribute("float", &keyFramesCount); dFloat* const timeline = dAlloca(dFloat, keyFramesCount); dFloat* const points = dAlloca(dFloat, 3 * keyFramesCount); dStringToFloatArray(timeLineElement->Attribute("floats"), timeline, keyFramesCount); dStringToFloatArray(positionElement->Attribute("floats"), points, 3 * keyFramesCount); for (int i = 0; i < keyFramesCount; i ++) { AddPosition(timeline[i], points[i * 3 + 0], points[i * 3 + 1], points[i * 3 + 2]); } } TiXmlElement* const rotationKeyframes = (TiXmlElement*)rootNode->FirstChild("rotationKeyframes"); if (rotationKeyframes) { TiXmlElement* const timeLineElement = (TiXmlElement*)rotationKeyframes->FirstChild("timeLine"); TiXmlElement* const positionElement = (TiXmlElement*)rotationKeyframes->FirstChild("angles"); int keyFramesCount; timeLineElement->Attribute("float", &keyFramesCount); dFloat* const timeline = dAlloca(dFloat, keyFramesCount); dFloat* const points = dAlloca(dFloat, 3 * keyFramesCount); dStringToFloatArray(timeLineElement->Attribute("floats"), timeline, keyFramesCount); dStringToFloatArray(positionElement->Attribute("floats"), points, 3 * keyFramesCount); for (int i = 0; i < keyFramesCount; i++) { AddRotation(timeline[i], points[i * 3 + 0], points[i * 3 + 1], points[i * 3 + 2]); } } return true; }
6,190
3,212
<reponame>westdart/nifi /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.nifi.flow; import io.swagger.annotations.ApiModelProperty; import java.util.List; import java.util.Map; public class VersionedControllerService extends VersionedComponent implements VersionedConfigurableComponent, VersionedExtensionComponent { private String type; private Bundle bundle; private List<ControllerServiceAPI> controllerServiceApis; private Map<String, String> properties; private Map<String, VersionedPropertyDescriptor> propertyDescriptors; private String annotationData; private ScheduledState scheduledState; @Override @ApiModelProperty(value = "The type of the controller service.") public String getType() { return type; } @Override public void setType(String type) { this.type = type; } @Override @ApiModelProperty(value = "The details of the artifact that bundled this controller service type.") public Bundle getBundle() { return bundle; } @Override public void setBundle(Bundle bundle) { this.bundle = bundle; } @ApiModelProperty(value = "Lists the APIs this Controller Service implements.") public List<ControllerServiceAPI> getControllerServiceApis() { return controllerServiceApis; } public void setControllerServiceApis(List<ControllerServiceAPI> controllerServiceApis) { this.controllerServiceApis = controllerServiceApis; } @Override @ApiModelProperty(value = "The properties of the controller service.") public Map<String, String> getProperties() { return properties; } @Override public void setProperties(Map<String, String> properties) { this.properties = properties; } @Override @ApiModelProperty("The property descriptors for the controller service.") public Map<String, VersionedPropertyDescriptor> getPropertyDescriptors() { return propertyDescriptors; } @Override public void setPropertyDescriptors(Map<String, VersionedPropertyDescriptor> propertyDescriptors) { this.propertyDescriptors = propertyDescriptors; } @ApiModelProperty(value = "The annotation for the controller service. This is how the custom UI relays configuration to the controller service.") public String getAnnotationData() { return annotationData; } public void setAnnotationData(String annotationData) { this.annotationData = annotationData; } @Override public ComponentType getComponentType() { return ComponentType.CONTROLLER_SERVICE; } @ApiModelProperty("The ScheduledState denoting whether the Controller Service is ENABLED or DISABLED") public ScheduledState getScheduledState() { return scheduledState; } public void setScheduledState(final ScheduledState scheduledState) { this.scheduledState = scheduledState; } }
1,146
823
package org.zalando.problem.spring.webflux.advice.http; import org.apiguardian.api.API; import org.springframework.http.ResponseEntity; import org.springframework.web.bind.annotation.ExceptionHandler; import org.springframework.web.server.NotAcceptableStatusException; import org.springframework.web.server.ServerWebExchange; import org.zalando.problem.Problem; import org.zalando.problem.Status; import org.zalando.problem.spring.webflux.advice.AdviceTrait; import reactor.core.publisher.Mono; import static org.apiguardian.api.API.Status.INTERNAL; import static org.apiguardian.api.API.Status.STABLE; /** * @see NotAcceptableStatusException * @see Status#NOT_ACCEPTABLE */ @API(status = STABLE) public interface NotAcceptableAdviceTrait extends AdviceTrait { @API(status = INTERNAL) @ExceptionHandler default Mono<ResponseEntity<Problem>> handleMediaTypeNotAcceptable( final NotAcceptableStatusException exception, final ServerWebExchange request) { return create(Status.NOT_ACCEPTABLE, exception, request); } }
361
768
package net.engio.mbassy; import net.engio.mbassy.bus.MBassador; import net.engio.mbassy.bus.common.DeadMessage; import net.engio.mbassy.common.ConcurrentExecutor; import net.engio.mbassy.common.ListenerFactory; import net.engio.mbassy.common.MessageBusTest; import net.engio.mbassy.common.TestUtil; import net.engio.mbassy.listener.Handler; import net.engio.mbassy.listeners.IMessageListener; import net.engio.mbassy.listeners.MessagesTypeListener; import net.engio.mbassy.listeners.ObjectListener; import org.junit.Before; import org.junit.Test; import java.util.concurrent.atomic.AtomicInteger; /** * Verify correct behaviour in case of message publications that do not have any matching subscriptions * * @author bennidi * Date: 1/18/13 */ public class DeadMessageTest extends MessageBusTest{ @Before public void beforeTest(){ DeadMessagHandler.deadMessages.set(0); } @Test public void testDeadMessage(){ final MBassador bus = createBus(SyncAsync()); ListenerFactory listeners = new ListenerFactory() .create(InstancesPerListener, IMessageListener.DefaultListener.class) .create(InstancesPerListener, IMessageListener.AsyncListener.class) .create(InstancesPerListener, IMessageListener.DisabledListener.class) .create(InstancesPerListener, MessagesTypeListener.DefaultListener.class) .create(InstancesPerListener, MessagesTypeListener.AsyncListener.class) .create(InstancesPerListener, MessagesTypeListener.DisabledListener.class) .create(InstancesPerListener, DeadMessagHandler.class) .create(InstancesPerListener, Object.class); ConcurrentExecutor.runConcurrent(TestUtil.subscriber(bus, listeners), ConcurrentUnits); Runnable publishUnhandledMessage = new Runnable() { @Override public void run() { for(int i=0; i < IterationsPerThread; i++){ int variation = i % 3; switch (variation){ case 0:bus.publish(new Object());break; case 1:bus.publish(i);break; case 2:bus.publish(String.valueOf(i));break; } } } }; ConcurrentExecutor.runConcurrent(publishUnhandledMessage, ConcurrentUnits); assertEquals(InstancesPerListener * IterationsPerThread * ConcurrentUnits, DeadMessagHandler.deadMessages.get()); } @Test public void testUnsubscribingAllListeners() { final MBassador bus = createBus(SyncAsync()); ListenerFactory deadMessageListener = new ListenerFactory() .create(InstancesPerListener, DeadMessagHandler.class) .create(InstancesPerListener, Object.class); ListenerFactory objectListener = new ListenerFactory() .create(InstancesPerListener, ObjectListener.class); ConcurrentExecutor.runConcurrent(TestUtil.subscriber(bus, deadMessageListener), ConcurrentUnits); // Only dead message handlers available bus.post(new Object()).now(); // The message should be caught as dead message since there are no subscribed listeners assertEquals(InstancesPerListener, DeadMessagHandler.deadMessages.get()); // Clear deadmessage for future tests DeadMessagHandler.deadMessages.set(0); // Add object listeners and publish again ConcurrentExecutor.runConcurrent(TestUtil.subscriber(bus, objectListener), ConcurrentUnits); bus.post(new Object()).now(); // verify that no dead message events were produced assertEquals(0, DeadMessagHandler.deadMessages.get()); // Unsubscribe all object listeners ConcurrentExecutor.runConcurrent(TestUtil.unsubscriber(bus, objectListener), ConcurrentUnits); // Only dead message handlers available bus.post(new Object()).now(); // The message should be caught, as it's the only listener assertEquals(InstancesPerListener, DeadMessagHandler.deadMessages.get()); } public static class DeadMessagHandler { private static final AtomicInteger deadMessages = new AtomicInteger(0); @Handler public void handle(DeadMessage message){ deadMessages.incrementAndGet(); } } }
1,708
310
<reponame>dekimir/RamFuzz // Copyright 2016-2018 The RamFuzz contributors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <Python.h> #include <iostream> #include <unistd.h> using namespace std; /// Reads a T value from RamFuzz log opened under the file descriptor fd. After /// reading the value, reads its id and returns a Python tuple (value, id). template <typename T> PyObject *logread(int fd) { T val; if (size_t(read(fd, &val, sizeof(val))) < sizeof(val)) return Py_BuildValue(""); size_t id; if (size_t(read(fd, &id, sizeof(id))) < sizeof(id)) return Py_BuildValue(""); unsigned long long lid(id); return Py_BuildValue("d K", double(val), lid); } /// Implements Python's ramfuzz.load(), which is documented below in \c methods. static PyObject *ramfuzz_load(PyObject *self, PyObject *args) { int fd; if (!PyArg_ParseTuple(args, "i", &fd) || fd < 0) return NULL; char tag; if (read(fd, &tag, 1) < 1) return Py_BuildValue(""); switch (tag) { // The following must match the specializations of // ramfuzz::runtime::typetag. case 0: return logread<bool>(fd); case 1: return logread<char>(fd); case 2: return logread<unsigned char>(fd); case 3: return logread<short>(fd); case 4: return logread<unsigned short>(fd); case 5: return logread<int>(fd); case 6: return logread<unsigned int>(fd); case 7: return logread<long>(fd); case 8: return logread<unsigned long>(fd); case 9: return logread<long long>(fd); case 10: return logread<unsigned long long>(fd); case 11: return logread<float>(fd); case 12: return logread<double>(fd); default: return NULL; } } /// A list of all methods in this module. static PyMethodDef methods[] = { {"load", ramfuzz_load, METH_VARARGS, "Return the next value from the RamFuzz log whose file descriptor is " "passed as the sole (int) argument."}, {NULL, NULL, 0, NULL} /* Sentinel */ }; /// Module initialization. PyMODINIT_FUNC initramfuzz(void) { (void)Py_InitModule("ramfuzz", methods); }
902
1,192
<reponame>clayne/DirectXShaderCompiler // RUN: %clang_cc1 -triple i686 -fsyntax-only -verify %s // RUN: %clang_cc1 -triple x86_64 -fsyntax-only -verify %s void I(int i, int j) { static const int BelowMin = -1; static const int AboveMax = 32; __asm__("xorl %0,%2" : "=r"(i) : "0"(i), "I"(j)); // expected-error{{constraint 'I' expects an integer constant expression}} __asm__("xorl %0,%2" : "=r"(i) : "0"(i), "I"(BelowMin)); // expected-error{{value '-1' out of range for constraint 'I'}} __asm__("xorl %0,%2" : "=r"(i) : "0"(i), "I"(AboveMax)); // expected-error{{value '32' out of range for constraint 'I'}} __asm__("xorl %0,%2" : "=r"(i) : "0"(i), "I"(16)); // expected-no-error } void J(int i, int j) { static const int BelowMin = -1; static const int AboveMax = 64; __asm__("xorl %0,%2" : "=r"(i) : "0"(i), "J"(j)); // expected-error{{constraint 'J' expects an integer constant expression}} __asm__("xorl %0,%2" : "=r"(i) : "0"(i), "J"(BelowMin)); // expected-error{{value '-1' out of range for constraint 'J'}} __asm__("xorl %0,%2" : "=r"(i) : "0"(i), "J"(AboveMax)); // expected-error{{value '64' out of range for constraint 'J'}} __asm__("xorl %0,%2" : "=r"(i) : "0"(i), "J"(32)); // expected-no-error } void K(int i, int j) { static const int BelowMin = -129; static const int AboveMax = 128; __asm__("xorl %0,%2" : "=r"(i) : "0"(i), "K"(j)); // expected-error{{constraint 'K' expects an integer constant expression}} __asm__("xorl %0,%2" : "=r"(i) : "0"(i), "K"(BelowMin)); // expected-error{{value '-129' out of range for constraint 'K'}} __asm__("xorl %0,%2" : "=r"(i) : "0"(i), "K"(AboveMax)); // expected-error{{value '128' out of range for constraint 'K'}} __asm__("xorl %0,%2" : "=r"(i) : "0"(i), "K"(96)); // expected-no-error } void M(int i, int j) { static const int BelowMin = -1; static const int AboveMax = 4; __asm__("xorl %0,%2" : "=r"(i) : "0"(i), "M"(j)); // expected-error{{constraint 'M' expects an integer constant expression}} __asm__("xorl %0,%2" : "=r"(i) : "0"(i), "M"(BelowMin)); // expected-error{{value '-1' out of range for constraint 'M'}} __asm__("xorl %0,%2" : "=r"(i) : "0"(i), "M"(AboveMax)); // expected-error{{value '4' out of range for constraint 'M'}} __asm__("xorl %0,%2" : "=r"(i) : "0"(i), "M"(2)); // expected-no-error } void N(int i, int j) { static const int BelowMin = -1; static const int AboveMax = 256; __asm__("xorl %0,%2" : "=r"(i) : "0"(i), "N"(j)); // expected-error{{constraint 'N' expects an integer constant expression}} __asm__("xorl %0,%2" : "=r"(i) : "0"(i), "N"(BelowMin)); // expected-error{{value '-1' out of range for constraint 'N'}} __asm__("xorl %0,%2" : "=r"(i) : "0"(i), "N"(AboveMax)); // expected-error{{value '256' out of range for constraint 'N'}} __asm__("xorl %0,%2" : "=r"(i) : "0"(i), "N"(128)); // expected-no-error } void O(int i, int j) { static const int BelowMin = -1; static const int AboveMax = 128; __asm__("xorl %0,%2" : "=r"(i) : "0"(i), "O"(j)); // expected-error{{constraint 'O' expects an integer constant expression}} __asm__("xorl %0,%2" : "=r"(i) : "0"(i), "O"(BelowMin)); // expected-error{{value '-1' out of range for constraint 'O'}} __asm__("xorl %0,%2" : "=r"(i) : "0"(i), "O"(AboveMax)); // expected-error{{value '128' out of range for constraint 'O'}} __asm__("xorl %0,%2" : "=r"(i) : "0"(i), "O"(64)); // expected-no-error }
1,923
850
<gh_stars>100-1000 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef THIRD_PARTY_TENSORFLOW_CORE_KERNELS_SQL_DRIVER_MANAGER_H_ #define THIRD_PARTY_TENSORFLOW_CORE_KERNELS_SQL_DRIVER_MANAGER_H_ #include "tensorflow/core/kernels/sql/query_connection.h" namespace tensorflow { namespace sql { // A factory class for creating `QueryConnection` instances. class DriverManager { public: // A factory method for creating `QueryConnection` instances. // // `driver_name` is the database type (e.g. 'sqlite'). `driver_name` // corresponds to a `QueryConnection` subclass. For example, if `driver_name` // == `sqlite`, then `CreateQueryConnection` will create a // `SqliteQueryConnection` instance. static std::unique_ptr<QueryConnection> CreateQueryConnection( const string& driver_name); }; } // namespace sql } // namespace tensorflow #endif // THIRD_PARTY_TENSORFLOW_CORE_KERNELS_SQL_DRIVER_MANAGER_H_
463
3,055
/* u8x8_d_st75320.c Universal 8bit Graphics Library (https://github.com/olikraus/u8g2/) Copyright (c) 2019, <EMAIL> All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ST75320: 320x240 monochrome LCD https://github.com/olikraus/u8g2/issues/921 */ #include "u8x8.h" static const uint8_t u8x8_d_st75320_jlx320240_powersave0_seq[] = { U8X8_START_TRANSFER(), /* enable chip, delay is part of the transfer start */ U8X8_C(0x0af), /* display on */ U8X8_END_TRANSFER(), /* disable chip */ U8X8_END() /* end of sequence */ }; static const uint8_t u8x8_d_st75320_jlx320240_powersave1_seq[] = { U8X8_START_TRANSFER(), /* enable chip, delay is part of the transfer start */ U8X8_C(0x0ae), /* display off */ U8X8_END_TRANSFER(), /* disable chip */ U8X8_END() /* end of sequence */ }; static const uint8_t u8x8_d_st75320_jlx320240_flip0_seq[] = { U8X8_START_TRANSFER(), /* enable chip, delay is part of the transfer start */ U8X8_CA(0xC4, 0x02), /* COM Output Status, Bits 0 & 1 */ U8X8_C(0xA1), /* Column Address Direction: Bit 0 */ //U8X8_C(0x0a1), /* segment remap a0/a1*/ //U8X8_C(0x0c8), /* c0: scan dir normal, c8: reverse */ U8X8_END_TRANSFER(), /* disable chip */ U8X8_END() /* end of sequence */ }; static const uint8_t u8x8_d_st75320_jlx320240_flip1_seq[] = { U8X8_START_TRANSFER(), /* enable chip, delay is part of the transfer start */ //U8X8_C(0x0a0), /* segment remap a0/a1*/ //U8X8_C(0x0c0), /* c0: scan dir normal, c8: reverse */ U8X8_CA(0xC4, 0x03), /* COM Output Status, Bits 0 & 1 */ U8X8_C(0xA0), /* Column Address Direction: Bit 0 */ U8X8_END_TRANSFER(), /* disable chip */ U8X8_END() /* end of sequence */ }; /*===================================================*/ static uint8_t u8x8_d_st75320_generic(u8x8_t *u8x8, uint8_t msg, uint8_t arg_int, void *arg_ptr) { uint16_t x; uint8_t c; uint8_t *ptr; switch(msg) { /* handled by the calling function case U8X8_MSG_DISPLAY_SETUP_MEMORY: u8x8_d_helper_display_setup_memory(u8x8, &u8x8_st75320_jlx320240_display_info); break; */ /* handled by the calling function case U8X8_MSG_DISPLAY_INIT: u8x8_d_helper_display_init(u8x8); u8x8_cad_SendSequence(u8x8, u8x8_d_st75320_jlx320240_init_seq); break; */ case U8X8_MSG_DISPLAY_SET_POWER_SAVE: if ( arg_int == 0 ) u8x8_cad_SendSequence(u8x8, u8x8_d_st75320_jlx320240_powersave0_seq); else u8x8_cad_SendSequence(u8x8, u8x8_d_st75320_jlx320240_powersave1_seq); break; case U8X8_MSG_DISPLAY_SET_FLIP_MODE: if ( arg_int == 0 ) { u8x8_cad_SendSequence(u8x8, u8x8_d_st75320_jlx320240_flip0_seq); u8x8->x_offset = u8x8->display_info->default_x_offset; } else { u8x8_cad_SendSequence(u8x8, u8x8_d_st75320_jlx320240_flip1_seq); u8x8->x_offset = u8x8->display_info->flipmode_x_offset; } break; #ifdef U8X8_WITH_SET_CONTRAST case U8X8_MSG_DISPLAY_SET_CONTRAST: u8x8_cad_StartTransfer(u8x8); u8x8_cad_SendCmd(u8x8, 0x081 ); u8x8_cad_SendArg(u8x8, arg_int<<2 ); u8x8_cad_SendArg(u8x8, arg_int>>6 ); u8x8_cad_EndTransfer(u8x8); break; #endif case U8X8_MSG_DISPLAY_DRAW_TILE: x = ((u8x8_tile_t *)arg_ptr)->x_pos; x *= 8; x += u8x8->x_offset; u8x8_cad_StartTransfer(u8x8); u8x8_cad_SendCmd(u8x8, 0x013); u8x8_cad_SendArg(u8x8, (x>>8) ); u8x8_cad_SendArg(u8x8, (x&255) ); u8x8_cad_SendCmd(u8x8, 0x0b1 ); u8x8_cad_SendArg(u8x8, (((u8x8_tile_t *)arg_ptr)->y_pos)); u8x8_cad_SendCmd(u8x8, 0x01d ); // write data do { c = ((u8x8_tile_t *)arg_ptr)->cnt; ptr = ((u8x8_tile_t *)arg_ptr)->tile_ptr; /* SendData can not handle more than 255 bytes */ if ( c > 31 ) { u8x8_cad_SendData(u8x8, 248, ptr); /* 31*8=248 */ ptr+=248; c -= 31; } u8x8_cad_SendData(u8x8, c*8, ptr); arg_int--; } while( arg_int > 0 ); u8x8_cad_EndTransfer(u8x8); break; default: return 0; } return 1; } /*===================================================*/ /* QT-2832TSWUG02/ZJY-2832TSWZG02 */ static const uint8_t u8x8_d_st75320_jlx320240_init_seq[] = { U8X8_START_TRANSFER(), /* enable chip, delay is part of the transfer start */ U8X8_C(0xAE), // Display OFF U8X8_CA(0xEA, 0x00), // Power Discharge Control, Discharge OFF U8X8_C(0xA8), // sleep out U8X8_C(0xAB), // OSC ON U8X8_C(0x69), // Temperature Detection ON U8X8_C(0x4E), // TC Setting U8X8_A8(0xff, 0x44, 0x12, 0x11, 0x11, 0x11, 0x22, 0x23), U8X8_CAA(0x39, 0x00, 0x00), //TC Flag U8X8_CA(0x2B, 0x00), // Frame Rate Level U8X8_CAA(0x5F, 0x66, 0x66), // Set Frame Frequency, fFR=80Hz in all temperature range U8X8_CAAA(0xEC, 0x19, 0x64, 0x6e), // FR Compensation Temp. Range, TA = -15 degree, TB = 60 degree, TC = 70 degree U8X8_CAA(0xED, 0x04, 0x04), // Temp. Hysteresis Value (thermal sensitivity) U8X8_C(0xA6), // Display Inverse OFF U8X8_C(0xA4), // Disable Display All Pixel ON U8X8_CA(0xC4, 0x02), // COM Output Status U8X8_C(0xA1), // Column Address Direction: MX=0 U8X8_CAA(0x6D, 0x07, 0x00), // Display Area, Duty = 1/240 duty, Start Group = 1 U8X8_C(0x84), // Display Data Input Direction: Column U8X8_CA(0x36, 0x1e), // Set N-Line U8X8_C(0xE4), // N-Line On U8X8_CA(0xE7, 0x19), // LCD Drive Method //NLFR=1// U8X8_CAA(0x81, 0x4f, 0x01), // OX81: Set EV=64h, 0..255, 0..3 U8X8_CA(0xA2, 0x0a), // BIAS //1/16 BIAS U8X8_CA(0x25, 0x020), // Power Control //AVDD ON U8X8_DLY(10), U8X8_CA(0x25, 0x60), // Power Control//AVDD, MV3 & NAVDD ON U8X8_DLY(10), U8X8_CA(0x25, 0x70), // Power Control //AVDD, MV3, NAVDD & V3 ON U8X8_DLY(10), U8X8_CA(0x25, 0x78), // Power Control//AVDD, MV3, NAVDD, V3 & VPF ON U8X8_DLY(10), U8X8_CA(0x25, 0x7c), // Power Control//AVDD, MV3, NAVDD, V3, VPF & VNF ON U8X8_DLY(10), U8X8_CA(0x25, 0x7e), // Power Control//VOUT, AVDD, MV3, NAVDD, V3, VPF & VNF ON U8X8_DLY(10), U8X8_CA(0x25, 0x7f), // Power Control/VOUT, AVDD, MV3, NAVDD, V3, VPF & VNF ON U8X8_DLY(10), //U8X8_C(0xaf); //Display ON U8X8_END_TRANSFER(), /* disable chip */ U8X8_END() /* end of sequence */ }; static const u8x8_display_info_t u8x8_st75320_jlx320240_display_info = { /* chip_enable_level = */ 0, /* chip_disable_level = */ 1, /* post_chip_enable_wait_ns = */ 20, /* pre_chip_disable_wait_ns = */ 20, /* reset_pulse_width_ms = */ 5, /* post_reset_wait_ms = */ 5, /**/ /* sda_setup_time_ns = */ 20, /* */ /* sck_pulse_width_ns = */ 40, /* */ /* sck_clock_hz = */ 4000000UL, /* since Arduino 1.6.0, the SPI bus speed in Hz. Should be 1000000000/sck_pulse_width_ns */ /* spi_mode = */ 0, /* active high, rising edge */ /* i2c_bus_clock_100kHz = */ 4, /* 400KHz */ /* data_setup_time_ns = */ 15, /* write_pulse_width_ns = */ 70, /* tile_width = */ 40, /* tile_hight = */ 30, /* default_x_offset = */ 0, /* flipmode_x_offset = */ 0, /* pixel_width = */ 320, /* pixel_height = */ 240 }; uint8_t u8x8_d_st75320_jlx320240(u8x8_t *u8x8, uint8_t msg, uint8_t arg_int, void *arg_ptr) { if ( u8x8_d_st75320_generic(u8x8, msg, arg_int, arg_ptr) != 0 ) return 1; switch(msg) { case U8X8_MSG_DISPLAY_INIT: u8x8_d_helper_display_init(u8x8); u8x8_cad_SendSequence(u8x8, u8x8_d_st75320_jlx320240_init_seq); break; case U8X8_MSG_DISPLAY_SETUP_MEMORY: u8x8_d_helper_display_setup_memory(u8x8, &u8x8_st75320_jlx320240_display_info); break; default: return 0; } return 1; }
4,738
384
/* * Copyright 2002-2014 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.baidu.jprotobuf.pbrpc.proto; import java.util.ArrayList; import java.util.List; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import org.junit.After; import org.junit.Test; import com.baidu.jprotobuf.pbrpc.BasePerformaceTest; import com.baidu.jprotobuf.pbrpc.client.ProtobufRpcProxy; import com.baidu.jprotobuf.pbrpc.data.RpcDataPackage; import com.baidu.jprotobuf.pbrpc.proto.EchoInfoClass.EchoInfo; import com.baidu.jprotobuf.pbrpc.transport.RpcClient; import com.baidu.jprotobuf.pbrpc.transport.RpcClientOptions; import com.baidu.jprotobuf.pbrpc.transport.RpcServer; import junit.framework.Assert; /** * Test class for {@link EchoService} * * @author xiemalin * @since 1.0 */ public class EchoServicePerformanceTest extends BasePerformaceTest { RpcServer rpcServer; RpcClient rpcClient; EchoInfo echoInfo; EchoService echoService; RpcDataPackage in; RpcDataPackage out; int totalRequestSize = 1000; /** * set totalRequestSize value to totalRequestSize * @param totalRequestSize the totalRequestSize to set */ public void setTotalRequestSize(int totalRequestSize) { this.totalRequestSize = totalRequestSize; } Runnable runnable = new Runnable() { public void run() { echoService.echo(echoInfo); } }; public void setUp(int threadSize, String requestData, String responseData) { rpcServer = new RpcServer(); EchoServiceImpl echoServiceImpl = new EchoServiceImpl(); rpcServer.registerService(echoServiceImpl); rpcServer.start(PORT); RpcClientOptions options = new RpcClientOptions(); options.setThreadPoolSize(threadSize); options.setMaxIdleSize(threadSize); options.setMaxWait(1000); rpcClient = new RpcClient(options); ProtobufRpcProxy<EchoService> pbrpcProxy = new ProtobufRpcProxy<EchoService>(rpcClient, EchoService.class); pbrpcProxy.setPort(PORT); echoService = pbrpcProxy.proxy(); echoInfo = EchoInfo.newBuilder().setMessage(requestData).build(); echoService.echo(echoInfo); in = buildPackage(requestData.getBytes(), null, null, "echoService", "echo"); out = buildPackage(responseData.getBytes(), null, null, "echoService", "echo"); } @After public void tearDown() { rpcClient.stop(); rpcServer.shutdown(); } @Test public void performanceOneTreadTest() { oneThreadExecute("world", "hello world"); } @Test public void performanceOneTreadTest2() { oneThreadExecute("world", "hello world"); } @Test public void performanceOneTreadTestWithLongText() { String requestString = ""; String responseString = ""; for (int i = 0; i < 100; i++) { requestString += "world world"; responseString += "hello world"; } oneThreadExecute(requestString, responseString); } /** * */ private void oneThreadExecute(String requestString, String responseString) { setUp(1, requestString, responseString); long time = System.currentTimeMillis(); for (int i = 0; i < totalRequestSize; i++) { echoService.echo(echoInfo); } long timetook = System.currentTimeMillis() - time; printResult(in, out, totalRequestSize, timetook, 1); } @Test public void performanceTwoTreadsTest() throws Exception { int thread = 2; long timetook = multiExecute(totalRequestSize, thread, "world", "hello world"); printResult(in, out, totalRequestSize, timetook, thread); } @Test public void performanceFourTreadsTest() throws Exception { int thread = 4; long timetook = multiExecute(totalRequestSize, thread, "world", "hello world"); printResult(in, out, totalRequestSize, timetook, thread); } @Test public void performance20TreadsTest() throws Exception { int thread = 20; long timetook = multiExecute(totalRequestSize, thread, "world", "hello world"); printResult(in, out, totalRequestSize, timetook, thread); } @Test public void performance20TreadsTestWithLongText() throws Exception { String requestString = ""; String responseString = ""; for (int i = 0; i < 100; i++) { requestString += "world world"; responseString += "hello world"; } int thread = 20; long timetook = multiExecute(totalRequestSize, thread, requestString, responseString); printResult(in, out, totalRequestSize, timetook, thread); } @Test public void performance40TreadsTestWithLongText() throws Exception { String requestString = ""; String responseString = ""; for (int i = 0; i < 100; i++) { requestString += "world world"; responseString += "hello world"; } int thread = 40; long timetook = multiExecute(totalRequestSize, thread, requestString, responseString); printResult(in, out, totalRequestSize, timetook, thread); } @Test public void multiExecuteValidTest() throws Exception { setUp(100, "hello", "world"); ExecutorService pool = Executors.newFixedThreadPool(100); List<Future<?>> futures = new ArrayList<Future<?>>(10000); for (int i = 0; i < 10000; i++) { final EchoInfo echoInfo = EchoInfo.newBuilder().setMessage(i + "").build(); final int order = i; Runnable runnable = new Runnable() { public void run() { try { EchoInfo echo = echoService.echo(echoInfo); Assert.assertEquals("hello:" + order, echo.getMessage()); } catch (Exception e) { e.printStackTrace(); } } }; Future<?> submit = pool.submit(runnable); futures.add(submit); } for (Future<?> future : futures) { future.get(); } pool.shutdown(); } /** * @param totalRequestSize * @return * @throws InterruptedException * @throws ExecutionException */ private long multiExecute(int totalRequestSize, int multiSize, String requestData, String responseData) throws InterruptedException, ExecutionException { setUp(multiSize, requestData, responseData); ExecutorService pool = Executors.newFixedThreadPool(multiSize); long time = System.currentTimeMillis(); List<Future<?>> futures = new ArrayList<Future<?>>(multiSize); for (int i = 0; i < totalRequestSize; i++) { Future<?> submit = pool.submit(runnable); futures.add(submit); } for (Future<?> future : futures) { future.get(); } long timetook = System.currentTimeMillis() - time; pool.shutdown(); return timetook; } }
3,087
335
{ "word": "Inconvenience", "definitions": [ "Cause trouble or difficulty to." ], "parts-of-speech": "Verb" }
60
317
<reponame>heralex/OTB<gh_stars>100-1000 /* * Copyright (C) 2005-2020 Centre National d'Etudes Spatiales (CNES) * * This file is part of Orfeo Toolbox * * https://www.orfeo-toolbox.org/ * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef otbSqrtSpectralAngleFunctor_h #define otbSqrtSpectralAngleFunctor_h #include "otbSpectralAngleFunctor.h" #include "itkMacro.h" namespace otb { namespace Functor { /** \class SqrtSpectralAngleFunctor * \brief This functor computes the square root of a spectral angle according to a reference pixel. * * \ingroup OTBImageManipulation */ /** \class SpectralAngleFunctor * \brief This functor computes the spectral angle according to a reference pixel. * * \ingroup OTBImageManipulation */ template <class TInput, class TOutputValue> class SqrtSpectralAngleFunctor { public: SqrtSpectralAngleFunctor() { m_ReferencePixel.SetSize(4); m_ReferencePixel.Fill(1); } virtual ~SqrtSpectralAngleFunctor() = default; // Binary operator inline TOutputValue operator()(TInput const & inPix) const { return std::sqrt(SpectralAngleDetails::ComputeSpectralAngle<TInput, TInput, TOutputValue>(inPix, inPix.GetNorm(), m_ReferencePixel, m_RefNorm)); } void SetReferencePixel(TInput const & ref) { m_ReferencePixel = ref; m_RefNorm = ref.GetNorm(); } TInput GetReferencePixel() const { return m_ReferencePixel; } private : TInput m_ReferencePixel; double m_RefNorm; }; } // end namespace Functor } // end namespace otb #ifndef OTB_MANUAL_INSTANTIATION #endif #endif
710
4,213
<gh_stars>1000+ from abc import abstractmethod from inspect import Parameter from typing import Optional, Tuple from parso.tree import search_ancestor from jedi.parser_utils import find_statement_documentation, clean_scope_docstring from jedi.inference.utils import unite from jedi.inference.base_value import ValueSet, NO_VALUES from jedi.inference.cache import inference_state_method_cache from jedi.inference import docstrings from jedi.cache import memoize_method from jedi.inference.helpers import deep_ast_copy, infer_call_of_leaf from jedi.plugins import plugin_manager def _merge_name_docs(names): doc = '' for name in names: if doc: # In case we have multiple values, just return all of them # separated by a few dashes. doc += '\n' + '-' * 30 + '\n' doc += name.py__doc__() return doc class AbstractNameDefinition: start_pos: Optional[Tuple[int, int]] = None string_name: str parent_context = None tree_name = None is_value_name = True """ Used for the Jedi API to know if it's a keyword or an actual name. """ @abstractmethod def infer(self): raise NotImplementedError @abstractmethod def goto(self): # Typically names are already definitions and therefore a goto on that # name will always result on itself. return {self} def get_qualified_names(self, include_module_names=False): qualified_names = self._get_qualified_names() if qualified_names is None or not include_module_names: return qualified_names module_names = self.get_root_context().string_names if module_names is None: return None return module_names + qualified_names def _get_qualified_names(self): # By default, a name has no qualified names. return None def get_root_context(self): return self.parent_context.get_root_context() def get_public_name(self): return self.string_name def __repr__(self): if self.start_pos is None: return '<%s: string_name=%s>' % (self.__class__.__name__, self.string_name) return '<%s: string_name=%s start_pos=%s>' % (self.__class__.__name__, self.string_name, self.start_pos) def is_import(self): return False def py__doc__(self): return '' @property def api_type(self): return self.parent_context.api_type def get_defining_qualified_value(self): """ Returns either None or the value that is public and qualified. Won't return a function, because a name in a function is never public. """ return None class AbstractArbitraryName(AbstractNameDefinition): """ When you e.g. want to complete dicts keys, you probably want to complete string literals, which is not really a name, but for Jedi we use this concept of Name for completions as well. """ is_value_name = False def __init__(self, inference_state, string): self.inference_state = inference_state self.string_name = string self.parent_context = inference_state.builtins_module def infer(self): return NO_VALUES class AbstractTreeName(AbstractNameDefinition): def __init__(self, parent_context, tree_name): self.parent_context = parent_context self.tree_name = tree_name def get_qualified_names(self, include_module_names=False): import_node = search_ancestor(self.tree_name, 'import_name', 'import_from') # For import nodes we cannot just have names, because it's very unclear # how they would look like. For now we just ignore them in most cases. # In case of level == 1, it works always, because it's like a submodule # lookup. if import_node is not None and not (import_node.level == 1 and self.get_root_context().get_value().is_package()): # TODO improve the situation for when level is present. if include_module_names and not import_node.level: return tuple(n.value for n in import_node.get_path_for_name(self.tree_name)) else: return None return super().get_qualified_names(include_module_names) def _get_qualified_names(self): parent_names = self.parent_context.get_qualified_names() if parent_names is None: return None return parent_names + (self.tree_name.value,) def get_defining_qualified_value(self): if self.is_import(): raise NotImplementedError("Shouldn't really happen, please report") elif self.parent_context: return self.parent_context.get_value() # Might be None return None def goto(self): context = self.parent_context name = self.tree_name definition = name.get_definition(import_name_always=True) if definition is not None: type_ = definition.type if type_ == 'expr_stmt': # Only take the parent, because if it's more complicated than just # a name it's something you can "goto" again. is_simple_name = name.parent.type not in ('power', 'trailer') if is_simple_name: return [self] elif type_ in ('import_from', 'import_name'): from jedi.inference.imports import goto_import module_names = goto_import(context, name) return module_names else: return [self] else: from jedi.inference.imports import follow_error_node_imports_if_possible values = follow_error_node_imports_if_possible(context, name) if values is not None: return [value.name for value in values] par = name.parent node_type = par.type if node_type == 'argument' and par.children[1] == '=' and par.children[0] == name: # Named param goto. trailer = par.parent if trailer.type == 'arglist': trailer = trailer.parent if trailer.type != 'classdef': if trailer.type == 'decorator': value_set = context.infer_node(trailer.children[1]) else: i = trailer.parent.children.index(trailer) to_infer = trailer.parent.children[:i] if to_infer[0] == 'await': to_infer.pop(0) value_set = context.infer_node(to_infer[0]) from jedi.inference.syntax_tree import infer_trailer for trailer in to_infer[1:]: value_set = infer_trailer(context, value_set, trailer) param_names = [] for value in value_set: for signature in value.get_signatures(): for param_name in signature.get_param_names(): if param_name.string_name == name.value: param_names.append(param_name) return param_names elif node_type == 'dotted_name': # Is a decorator. index = par.children.index(name) if index > 0: new_dotted = deep_ast_copy(par) new_dotted.children[index - 1:] = [] values = context.infer_node(new_dotted) return unite( value.goto(name, name_context=context) for value in values ) if node_type == 'trailer' and par.children[0] == '.': values = infer_call_of_leaf(context, name, cut_own_trailer=True) return values.goto(name, name_context=context) else: stmt = search_ancestor( name, 'expr_stmt', 'lambdef' ) or name if stmt.type == 'lambdef': stmt = name return context.goto(name, position=stmt.start_pos) def is_import(self): imp = search_ancestor(self.tree_name, 'import_from', 'import_name') return imp is not None @property def string_name(self): return self.tree_name.value @property def start_pos(self): return self.tree_name.start_pos class ValueNameMixin: def infer(self): return ValueSet([self._value]) def py__doc__(self): doc = self._value.py__doc__() if not doc and self._value.is_stub(): from jedi.inference.gradual.conversion import convert_names names = convert_names([self], prefer_stub_to_compiled=False) if self not in names: return _merge_name_docs(names) return doc def _get_qualified_names(self): return self._value.get_qualified_names() def get_root_context(self): if self.parent_context is None: # A module return self._value.as_context() return super().get_root_context() def get_defining_qualified_value(self): context = self.parent_context if context.is_module() or context.is_class(): return self.parent_context.get_value() # Might be None return None @property def api_type(self): return self._value.api_type class ValueName(ValueNameMixin, AbstractTreeName): def __init__(self, value, tree_name): super().__init__(value.parent_context, tree_name) self._value = value def goto(self): return ValueSet([self._value.name]) class TreeNameDefinition(AbstractTreeName): _API_TYPES = dict( import_name='module', import_from='module', funcdef='function', param='param', classdef='class', ) def infer(self): # Refactor this, should probably be here. from jedi.inference.syntax_tree import tree_name_to_values return tree_name_to_values( self.parent_context.inference_state, self.parent_context, self.tree_name ) @property def api_type(self): definition = self.tree_name.get_definition(import_name_always=True) if definition is None: return 'statement' return self._API_TYPES.get(definition.type, 'statement') def assignment_indexes(self): """ Returns an array of tuple(int, node) of the indexes that are used in tuple assignments. For example if the name is ``y`` in the following code:: x, (y, z) = 2, '' would result in ``[(1, xyz_node), (0, yz_node)]``. When searching for b in the case ``a, *b, c = [...]`` it will return:: [(slice(1, -1), abc_node)] """ indexes = [] is_star_expr = False node = self.tree_name.parent compare = self.tree_name while node is not None: if node.type in ('testlist', 'testlist_comp', 'testlist_star_expr', 'exprlist'): for i, child in enumerate(node.children): if child == compare: index = int(i / 2) if is_star_expr: from_end = int((len(node.children) - i) / 2) index = slice(index, -from_end) indexes.insert(0, (index, node)) break else: raise LookupError("Couldn't find the assignment.") is_star_expr = False elif node.type == 'star_expr': is_star_expr = True elif node.type in ('expr_stmt', 'sync_comp_for'): break compare = node node = node.parent return indexes @property def inference_state(self): # Used by the cache function below return self.parent_context.inference_state @inference_state_method_cache(default='') def py__doc__(self): api_type = self.api_type if api_type in ('function', 'class', 'property'): if self.parent_context.get_root_context().is_stub(): from jedi.inference.gradual.conversion import convert_names names = convert_names([self], prefer_stub_to_compiled=False) if self not in names: return _merge_name_docs(names) # Make sure the names are not TreeNameDefinitions anymore. return clean_scope_docstring(self.tree_name.get_definition()) if api_type == 'module': names = self.goto() if self not in names: return _merge_name_docs(names) if api_type == 'statement' and self.tree_name.is_definition(): return find_statement_documentation(self.tree_name.get_definition()) return '' class _ParamMixin: def maybe_positional_argument(self, include_star=True): options = [Parameter.POSITIONAL_ONLY, Parameter.POSITIONAL_OR_KEYWORD] if include_star: options.append(Parameter.VAR_POSITIONAL) return self.get_kind() in options def maybe_keyword_argument(self, include_stars=True): options = [Parameter.KEYWORD_ONLY, Parameter.POSITIONAL_OR_KEYWORD] if include_stars: options.append(Parameter.VAR_KEYWORD) return self.get_kind() in options def _kind_string(self): kind = self.get_kind() if kind == Parameter.VAR_POSITIONAL: # *args return '*' if kind == Parameter.VAR_KEYWORD: # **kwargs return '**' return '' def get_qualified_names(self, include_module_names=False): return None class ParamNameInterface(_ParamMixin): api_type = 'param' def get_kind(self): raise NotImplementedError def to_string(self): raise NotImplementedError def get_executed_param_name(self): """ For dealing with type inference and working around the graph, we sometimes want to have the param name of the execution. This feels a bit strange and we might have to refactor at some point. For now however it exists to avoid infering params when we don't really need them (e.g. when we can just instead use annotations. """ return None @property def star_count(self): kind = self.get_kind() if kind == Parameter.VAR_POSITIONAL: return 1 if kind == Parameter.VAR_KEYWORD: return 2 return 0 def infer_default(self): return NO_VALUES class BaseTreeParamName(ParamNameInterface, AbstractTreeName): annotation_node = None default_node = None def to_string(self): output = self._kind_string() + self.get_public_name() annotation = self.annotation_node default = self.default_node if annotation is not None: output += ': ' + annotation.get_code(include_prefix=False) if default is not None: output += '=' + default.get_code(include_prefix=False) return output def get_public_name(self): name = self.string_name if name.startswith('__'): # Params starting with __ are an equivalent to positional only # variables in typeshed. name = name[2:] return name def goto(self, **kwargs): return [self] class _ActualTreeParamName(BaseTreeParamName): def __init__(self, function_value, tree_name): super().__init__( function_value.get_default_param_context(), tree_name) self.function_value = function_value def _get_param_node(self): return search_ancestor(self.tree_name, 'param') @property def annotation_node(self): return self._get_param_node().annotation def infer_annotation(self, execute_annotation=True, ignore_stars=False): from jedi.inference.gradual.annotation import infer_param values = infer_param( self.function_value, self._get_param_node(), ignore_stars=ignore_stars) if execute_annotation: values = values.execute_annotation() return values def infer_default(self): node = self.default_node if node is None: return NO_VALUES return self.parent_context.infer_node(node) @property def default_node(self): return self._get_param_node().default def get_kind(self): tree_param = self._get_param_node() if tree_param.star_count == 1: # *args return Parameter.VAR_POSITIONAL if tree_param.star_count == 2: # **kwargs return Parameter.VAR_KEYWORD # Params starting with __ are an equivalent to positional only # variables in typeshed. if tree_param.name.value.startswith('__'): return Parameter.POSITIONAL_ONLY parent = tree_param.parent param_appeared = False for p in parent.children: if param_appeared: if p == '/': return Parameter.POSITIONAL_ONLY else: if p == '*': return Parameter.KEYWORD_ONLY if p.type == 'param': if p.star_count: return Parameter.KEYWORD_ONLY if p == tree_param: param_appeared = True return Parameter.POSITIONAL_OR_KEYWORD def infer(self): values = self.infer_annotation() if values: return values doc_params = docstrings.infer_param(self.function_value, self._get_param_node()) return doc_params class AnonymousParamName(_ActualTreeParamName): @plugin_manager.decorate(name='goto_anonymous_param') def goto(self): return super().goto() @plugin_manager.decorate(name='infer_anonymous_param') def infer(self): values = super().infer() if values: return values from jedi.inference.dynamic_params import dynamic_param_lookup param = self._get_param_node() values = dynamic_param_lookup(self.function_value, param.position_index) if values: return values if param.star_count == 1: from jedi.inference.value.iterable import FakeTuple value = FakeTuple(self.function_value.inference_state, []) elif param.star_count == 2: from jedi.inference.value.iterable import FakeDict value = FakeDict(self.function_value.inference_state, {}) elif param.default is None: return NO_VALUES else: return self.function_value.parent_context.infer_node(param.default) return ValueSet({value}) class ParamName(_ActualTreeParamName): def __init__(self, function_value, tree_name, arguments): super().__init__(function_value, tree_name) self.arguments = arguments def infer(self): values = super().infer() if values: return values return self.get_executed_param_name().infer() def get_executed_param_name(self): from jedi.inference.param import get_executed_param_names params_names = get_executed_param_names(self.function_value, self.arguments) return params_names[self._get_param_node().position_index] class ParamNameWrapper(_ParamMixin): def __init__(self, param_name): self._wrapped_param_name = param_name def __getattr__(self, name): return getattr(self._wrapped_param_name, name) def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self._wrapped_param_name) class ImportName(AbstractNameDefinition): start_pos = (1, 0) _level = 0 def __init__(self, parent_context, string_name): self._from_module_context = parent_context self.string_name = string_name def get_qualified_names(self, include_module_names=False): if include_module_names: if self._level: assert self._level == 1, "Everything else is not supported for now" module_names = self._from_module_context.string_names if module_names is None: return module_names return module_names + (self.string_name,) return (self.string_name,) return () @property def parent_context(self): m = self._from_module_context import_values = self.infer() if not import_values: return m # It's almost always possible to find the import or to not find it. The # importing returns only one value, pretty much always. return next(iter(import_values)).as_context() @memoize_method def infer(self): from jedi.inference.imports import Importer m = self._from_module_context return Importer(m.inference_state, [self.string_name], m, level=self._level).follow() def goto(self): return [m.name for m in self.infer()] @property def api_type(self): return 'module' def py__doc__(self): return _merge_name_docs(self.goto()) class SubModuleName(ImportName): _level = 1 class NameWrapper: def __init__(self, wrapped_name): self._wrapped_name = wrapped_name def __getattr__(self, name): return getattr(self._wrapped_name, name) def __repr__(self): return '%s(%s)' % (self.__class__.__name__, self._wrapped_name) class StubNameMixin: def py__doc__(self): from jedi.inference.gradual.conversion import convert_names # Stubs are not complicated and we can just follow simple statements # that have an equals in them, because they typically make something # else public. See e.g. stubs for `requests`. names = [self] if self.api_type == 'statement' and '=' in self.tree_name.get_definition().children: names = [v.name for v in self.infer()] names = convert_names(names, prefer_stub_to_compiled=False) if self in names: return super().py__doc__() else: # We have signatures ourselves in stubs, so don't use signatures # from the implementation. return _merge_name_docs(names) # From here on down we make looking up the sys.version_info fast. class StubName(StubNameMixin, TreeNameDefinition): def infer(self): inferred = super().infer() if self.string_name == 'version_info' and self.get_root_context().py__name__() == 'sys': from jedi.inference.gradual.stub_value import VersionInfo return ValueSet(VersionInfo(c) for c in inferred) return inferred class ModuleName(ValueNameMixin, AbstractNameDefinition): start_pos = 1, 0 def __init__(self, value, name): self._value = value self._name = name @property def string_name(self): return self._name class StubModuleName(StubNameMixin, ModuleName): pass
10,260
1,350
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. // Code generated by Microsoft (R) AutoRest Code Generator. package com.azure.communication.callingserver.implementation.models; import com.azure.core.annotation.Fluent; import com.fasterxml.jackson.annotation.JsonProperty; /** The add participant result. */ @Fluent public final class AddParticipantResultInternal { /* * The id of the added participant. */ @JsonProperty(value = "participantId") private String participantId; /** * Get the participantId property: The id of the added participant. * * @return the participantId value. */ public String getParticipantId() { return this.participantId; } /** * Set the participantId property: The id of the added participant. * * @param participantId the participantId value to set. * @return the AddParticipantResultInternal object itself. */ public AddParticipantResultInternal setParticipantId(String participantId) { this.participantId = participantId; return this; } }
366
1,609
# # Copyright (C) 2000 <NAME> # """ unit tests for the Neural network trainer implementation this basically works out **all** of the network code """ import unittest from rdkit.ML.Neural.ActFuncs import Sigmoid, TanH from rdkit.ML.Neural.NetNode import NetNode from rdkit.ML.Neural.Network import Network class TestCaseActFuncs(unittest.TestCase): def test_Sigmoid(self): f = Sigmoid() self.assertAlmostEqual(f(0), 0.5) self.assertAlmostEqual(f(0), f.Eval(0)) self.assertAlmostEqual(f.Deriv(0), 0.25) self.assertAlmostEqual(f(1), 1.0 - f(-1)) self.assertAlmostEqual(f(2), 1.0 - f(-2)) self.assertAlmostEqual(f.Deriv(1), f.Deriv(-1)) self.assertAlmostEqual(f.Deriv(2), f.Deriv(-2)) self.assertLess(f(1), f(2)) self.assertLess(f.Deriv(2), f.Deriv(1)) self.assertAlmostEqual(f.Deriv(1), f.DerivFromVal(f(1))) def test_TanH(self): f = TanH() self.assertAlmostEqual(f(0), 0.0) self.assertAlmostEqual(f(0), f.Eval(0)) self.assertAlmostEqual(f.Deriv(0), 1.0) self.assertAlmostEqual(f(1), -f(-1)) self.assertAlmostEqual(f(2), -f(-2)) self.assertAlmostEqual(f.Deriv(1), f.Deriv(-1)) self.assertAlmostEqual(f.Deriv(2), f.Deriv(-2)) self.assertLess(f(1), f(2)) self.assertLess(f.Deriv(2), f.Deriv(1)) self.assertAlmostEqual(f.Deriv(1), f.DerivFromVal(f(1))) class TestCaseNetNode(unittest.TestCase): def test_NetNode(self): # A node without input always returns 1 nodeList = [None] * 2 node = NetNode(0, nodeList) nodeList[0] = node valVect = [None] * 2 self.assertEqual(node.Eval(valVect), 1) self.assertEqual(valVect, [1, None]) node = NetNode(1, nodeList, inputNodes=[0], weights=[0.1]) self.assertRaises(AssertionError, node.SetWeights, [0, 1]) self.assertRaises(AssertionError, node.SetInputs, [0, 1]) class TestCaseNetwork(unittest.TestCase): def test_Network(self): nodeCounts = [2, 2, 1, 2] net = Network(nodeCounts) self.assertEqual(net.GetNumNodes(), 7) self.assertEqual(len(net.GetAllNodes()), 7) self.assertEqual(net.GetInputNodeList(), [0, 1]) self.assertEqual(net.GetHiddenLayerNodeList(0), [2, 3]) self.assertEqual(net.GetHiddenLayerNodeList(1), [4]) self.assertEqual(net.GetOutputNodeList(), [5, 6]) # We get a representation of the network s = str(net) self.assertIn('Network', s) if __name__ == '__main__': # pragma: nocover unittest.main()
1,060
596
<gh_stars>100-1000 #pragma once #include "Common/Common.h" #include "ThreadEvent.h" #include <thread> #include <string> class Runnable; class ThreadManager; class RunnableThread { friend class ThreadManager; public: RunnableThread(); virtual ~RunnableThread(); static RunnableThread* Create(Runnable* runnable, const std::string& threadName); virtual void WaitForCompletion() { if (m_LocalThread->joinable()) { m_LocalThread->join(); } } const uint64 GetThreadID() const { return m_ThreadID; } const std::string& GetThreadName() const { return m_ThreadName; } protected: virtual bool CreateInternal(Runnable* runnable, const std::string& threadName); virtual void PreRun(); virtual int32 Run(); virtual void PostRun(); static void ThreadFunction(void* pThis); protected: std::string m_ThreadName; Runnable* m_Runnable; ThreadEvent* m_InitSyncEvent; uint64 m_ThreadID; std::thread* m_LocalThread; };
518
4,036
<filename>python/ql/test/experimental/dataflow/typetracking/import_as_attr.py<gh_stars>1000+ from module import attr as attr_ref x = attr_ref def fun(): y = attr_ref # The following should _not_ be a reference to the above module, since we don't actually import it. z = module
98
1,844
<filename>beatrix/src/test/java/org/killbill/billing/beatrix/integration/db/TestDBRouter.java /* * Copyright 2014-2018 Groupon, Inc * Copyright 2014-2018 The Billing Project, LLC * * The Billing Project licenses this file to you under the Apache License, version 2.0 * (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package org.killbill.billing.beatrix.integration.db; import java.util.concurrent.Callable; import java.util.concurrent.atomic.AtomicInteger; import javax.inject.Inject; import org.joda.time.DateTime; import org.killbill.billing.account.api.Account; import org.killbill.billing.api.TestApiListener.NextEvent; import org.killbill.billing.beatrix.integration.TestIntegrationBase; import org.killbill.billing.callcontext.DefaultTenantContext; import org.killbill.billing.catalog.api.BillingPeriod; import org.killbill.billing.catalog.api.ProductCategory; import org.killbill.billing.entitlement.api.DefaultEntitlement; import org.killbill.billing.notification.plugin.api.ExtBusEvent; import org.killbill.billing.osgi.api.ROTenantContext; import org.killbill.billing.util.callcontext.TenantContext; import org.killbill.bus.api.PersistentBus.EventBusException; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; import com.google.common.eventbus.Subscribe; import static java.util.concurrent.TimeUnit.SECONDS; import static org.awaitility.Awaitility.await; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNotNull; public class TestDBRouter extends TestIntegrationBase { @Inject private TestDBRouterAPI testDBRouterAPI; private PublicListener publicListener; private AtomicInteger externalBusCount; @Override @BeforeMethod(groups = "slow") public void beforeMethod() throws Exception { if (hasFailed()) { return; } super.beforeMethod(); this.externalBusCount = new AtomicInteger(0); testDBRouterAPI.reset(); } @Override protected void registerHandlers() throws EventBusException { super.registerHandlers(); publicListener = new PublicListener(); externalBus.register(publicListener); } @AfterMethod(groups = "slow") public void afterMethod() throws Exception { if (hasFailed()) { return; } externalBus.unregister(publicListener); super.afterMethod(); } @Test(groups = "slow") public void testWithBusEvents() throws Exception { final DateTime initialDate = new DateTime(2012, 2, 1, 0, 3, 42, 0, testTimeZone); clock.setTime(initialDate); final Account account = createAccountWithNonOsgiPaymentMethod(getAccountData(2)); assertNotNull(account); final DefaultEntitlement bpEntitlement = createBaseEntitlementAndCheckForCompletion(account.getId(), "externalKey", "Shotgun", ProductCategory.BASE, BillingPeriod.MONTHLY, NextEvent.CREATE, NextEvent.BLOCK, NextEvent.INVOICE); assertNotNull(bpEntitlement); await().atMost(10, SECONDS) .until(new Callable<Boolean>() { @Override public Boolean call() throws Exception { // Expecting ACCOUNT_CREATE, ACCOUNT_CHANGE, SUBSCRIPTION_CREATION (2), ENTITLEMENT_CREATE INVOICE_CREATION return externalBusCount.get() == 6; } }); } private void assertNbCalls(final int expectedNbRWCalls, final int expectedNbROCalls) { assertEquals(testDBRouterAPI.getNbRWCalls(), expectedNbRWCalls); assertEquals(testDBRouterAPI.getNbRoCalls(), expectedNbROCalls); } public class PublicListener { @Subscribe public void handleExternalEvents(final ExtBusEvent event) { testDBRouterAPI.reset(); final TenantContext tenantContext = new DefaultTenantContext(callContext.getAccountId(), callContext.getTenantId()); // Only RO tenant will trigger use of RO DBI (initiated by plugins) final ROTenantContext roTenantContext = new ROTenantContext(tenantContext); // RO calls goes to RW DB by default testDBRouterAPI.doROCall(tenantContext); assertNbCalls(1, 0); testDBRouterAPI.doROCall(callContext); assertNbCalls(2, 0); // Even if the thread is dirty (previous RW calls), the plugin asked for RO DBI testDBRouterAPI.doROCall(roTenantContext); assertNbCalls(2, 1); // Make sure subsequent calls go back to the RW DB testDBRouterAPI.doROCall(tenantContext); assertNbCalls(3, 1); testDBRouterAPI.doRWCall(callContext); assertNbCalls(4, 1); testDBRouterAPI.doROCall(roTenantContext); assertNbCalls(4, 2); testDBRouterAPI.doROCall(callContext); assertNbCalls(5, 2); testDBRouterAPI.doROCall(tenantContext); assertNbCalls(6, 2); testDBRouterAPI.doChainedROCall(tenantContext); assertNbCalls(7, 2); testDBRouterAPI.doChainedRWCall(callContext); assertNbCalls(8, 2); // Increment only if there are no errors externalBusCount.incrementAndGet(); } } }
2,295
307
<gh_stars>100-1000 /* * Copyright 2017 Microsoft, Inc. * * Licensed under the Apache License, Version 2.0 (the "License") * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.spinnaker.halyard.cli.command.v1.config.providers.azure; import com.beust.jcommander.Parameter; import com.beust.jcommander.Parameters; import com.netflix.spinnaker.halyard.cli.command.v1.config.providers.bakery.AbstractEditBaseImageCommand; import com.netflix.spinnaker.halyard.config.model.v1.node.BaseImage; import com.netflix.spinnaker.halyard.config.model.v1.providers.azure.AzureBaseImage; @Parameters(separators = "=") public class AzureEditBaseImageCommand extends AbstractEditBaseImageCommand<AzureBaseImage> { @Override protected String getProviderName() { return "azure"; } @Parameter( names = "--publisher", description = AzureCommandProperties.IMAGE_PUBLISHER_DESCRIPTION) private String publisher; @Parameter(names = "--offer", description = AzureCommandProperties.IMAGE_OFFER_DESCRIPTION) private String offer; @Parameter(names = "--sku", description = AzureCommandProperties.IMAGE_SKU_DESCRIPTION) private String sku; @Parameter( names = "--image-version", // just using '--version' would conflict with the global parameter description = AzureCommandProperties.IMAGE_VERSION_DESCRIPTION) private String version; @Override protected BaseImage editBaseImage(AzureBaseImage baseImage) { AzureBaseImage.AzureOperatingSystemSettings imageSettings = baseImage.getBaseImage(); imageSettings = imageSettings != null ? imageSettings : new AzureBaseImage.AzureOperatingSystemSettings(); imageSettings.setPublisher(isSet(publisher) ? publisher : imageSettings.getPublisher()); imageSettings.setOffer(isSet(offer) ? offer : imageSettings.getOffer()); imageSettings.setSku(isSet(sku) ? sku : imageSettings.getSku()); imageSettings.setVersion(isSet(version) ? version : imageSettings.getVersion()); baseImage.setBaseImage(imageSettings); return baseImage; } }
753
190,993
<filename>tensorflow/core/kernels/control_flow_ops.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_CORE_KERNELS_CONTROL_FLOW_OPS_H_ #define TENSORFLOW_CORE_KERNELS_CONTROL_FLOW_OPS_H_ #include "tensorflow/core/framework/op_kernel.h" namespace tensorflow { // A ControlTriggerOp is similar to a NoOp. However, it always treats the input // control edges as Live edges. Its primary use so far is in the scheduling of // recvs, where we add ControlTrigger nodes and use them to trigger recvs. We // allow ControlTrigger nodes to be enabled by dead nodes. class ControlTriggerOp : public OpKernel { public: explicit ControlTriggerOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override {} bool IsExpensive() override { return false; } }; // A switch op has two inputs and two outputs. It forwards the value of // Input:0 to the output specified by input:1. Input:1 is a boolean tensor. // Input:0 is forwarded to output:0 if input:1 is false, otherwise to // output:1. class SwitchOp : public OpKernel { public: explicit SwitchOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override; bool IsExpensive() override { return false; } ~SwitchOp() override {} TF_DISALLOW_COPY_AND_ASSIGN(SwitchOp); }; // An n-way switch op has two inputs and N outputs. It forwards the value of // Input:0 to the output specified by Input:1. Input:1 is an integer tensor. // Input:0 is forwarded to output:0 if Input:1 is 0, to output:1 if 1, and so // forth. If Input:1 is <0 or >=num_outputs(), Input:0 is forwarded to // output:num_outputs()-1. class SwitchNOp : public OpKernel { public: explicit SwitchNOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override; bool IsExpensive() override { return false; } ~SwitchNOp() override {} TF_DISALLOW_COPY_AND_ASSIGN(SwitchNOp); }; // A merge op has n inputs and two outputs. It forwards the value of the // first input that becomes available to its first output, and the // index of the first input to its second output. class MergeOp : public OpKernel { public: explicit MergeOp(OpKernelConstruction* context); void Compute(OpKernelContext* context) override; bool IsExpensive() override { return false; } ~MergeOp() override {} TF_DISALLOW_COPY_AND_ASSIGN(MergeOp); }; // An enter op has one input and one output. It creates or finds // the child frame that is uniquely identified by the frame_name, // and makes its input available to the child frame. class EnterOp : public OpKernel { public: explicit EnterOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override; bool IsExpensive() override { return false; } ~EnterOp() override {} TF_DISALLOW_COPY_AND_ASSIGN(EnterOp); }; // An exit op has one input and one output. It exits the current // frame to its parent frame, and makes its input available to the // parent frame. class ExitOp : public OpKernel { public: explicit ExitOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override; bool IsExpensive() override { return false; } ~ExitOp() override {} TF_DISALLOW_COPY_AND_ASSIGN(ExitOp); }; // A next_iteration op has one input and one output. It makes its input // available to the next iteration. class NextIterationOp : public OpKernel { public: explicit NextIterationOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override; bool IsExpensive() override { return false; } ~NextIterationOp() override {} TF_DISALLOW_COPY_AND_ASSIGN(NextIterationOp); }; // A LoopCond op has one input and one output. The input is a boolean // scalar representing the taken branches of the "pivot" Switch that // determines loop termination. As a contract, any high-level front-end // should always use port '0' of the "pivot" switches for loop exit. class LoopCondOp : public OpKernel { public: explicit LoopCondOp(OpKernelConstruction* context); ~LoopCondOp() override; void Compute(OpKernelContext* context) override; bool IsExpensive() override; TF_DISALLOW_COPY_AND_ASSIGN(LoopCondOp); }; } // namespace tensorflow #endif // TENSORFLOW_CORE_KERNELS_CONTROL_FLOW_OPS_H_
1,481
1,350
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. // Code generated by Microsoft (R) AutoRest Code Generator. package com.azure.resourcemanager.healthbot.fluent.models; import com.azure.core.annotation.Fluent; import com.azure.core.management.Resource; import com.azure.core.util.logging.ClientLogger; import com.azure.resourcemanager.healthbot.models.HealthBotProperties; import com.azure.resourcemanager.healthbot.models.Sku; import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonProperty; import java.util.Map; /** HealthBot resource definition. */ @Fluent public final class HealthBotInner extends Resource { @JsonIgnore private final ClientLogger logger = new ClientLogger(HealthBotInner.class); /* * SKU of the HealthBot. */ @JsonProperty(value = "sku", required = true) private Sku sku; /* * The set of properties specific to Healthbot resource. */ @JsonProperty(value = "properties") private HealthBotProperties properties; /** * Get the sku property: SKU of the HealthBot. * * @return the sku value. */ public Sku sku() { return this.sku; } /** * Set the sku property: SKU of the HealthBot. * * @param sku the sku value to set. * @return the HealthBotInner object itself. */ public HealthBotInner withSku(Sku sku) { this.sku = sku; return this; } /** * Get the properties property: The set of properties specific to Healthbot resource. * * @return the properties value. */ public HealthBotProperties properties() { return this.properties; } /** * Set the properties property: The set of properties specific to Healthbot resource. * * @param properties the properties value to set. * @return the HealthBotInner object itself. */ public HealthBotInner withProperties(HealthBotProperties properties) { this.properties = properties; return this; } /** {@inheritDoc} */ @Override public HealthBotInner withLocation(String location) { super.withLocation(location); return this; } /** {@inheritDoc} */ @Override public HealthBotInner withTags(Map<String, String> tags) { super.withTags(tags); return this; } /** * Validates the instance. * * @throws IllegalArgumentException thrown if the instance is not valid. */ public void validate() { if (sku() == null) { throw logger .logExceptionAsError( new IllegalArgumentException("Missing required property sku in model HealthBotInner")); } else { sku().validate(); } if (properties() != null) { properties().validate(); } } }
1,113
6,958
<gh_stars>1000+ // // BF16Backend.hpp // MNN // // Created by MNN on 2020/01/26. // Copyright © 2018, Alibaba Group Holding Limited // #ifndef BF16Backend_hpp #define BF16Backend_hpp #include "backend/cpu/CPUBackend.hpp" #include "core/Macro.h" #include "core/TensorUtils.hpp" namespace MNN { class BF16Backend : public CPUBackend { public: virtual ~BF16Backend(); BF16Backend(const CPURuntime* runtime); virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs, const MNN::Op* op) override; virtual bool onAcquireBuffer(const Tensor* nativeTensor, StorageType storageType) override; virtual void onCopyBuffer(const Tensor* srcTensor, const Tensor* dstTensor) const override; int numberThread() const { return threadNumber(); } public: class BF16Creator { public: virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs, const MNN::Op* op, Backend* backend) const = 0; }; static bool addBF16Creator(OpType t, BF16Creator* ct); }; #define REGISTER_BF16_OP_CREATOR(type, creator) \ void ___##type##__##creator##__() { \ BF16Backend::addBF16Creator(type, new creator); \ } } // namespace MNN #endif /* BF16Backend_hpp */
554
8,315
<filename>epoxy-processortest/src/test/resources/RequireAbstractModelPassesClassWithAttribute.java package com.airbnb.epoxy.configtest; import com.airbnb.epoxy.EpoxyAttribute; import com.airbnb.epoxy.EpoxyModel; public abstract class RequireAbstractModelPassesClassWithAttribute extends EpoxyModel<Object> { @EpoxyAttribute String value; @Override protected int getDefaultLayout() { return 0; } }
125
987
<filename>bounter/tests/hashtable/test_htc_update.py #!/usr/bin/env python # -*- coding: utf-8 -*- # # Author: <NAME> <<EMAIL>> # Copyright (C) 2017 Rare Technologies # # This code is distributed under the terms and conditions # from the MIT License (MIT). import unittest from bounter import HashTable long_long_max = 9223372036854775807 class HashTableUpdateTest(unittest.TestCase): """ Functional tests for HashTable.update method, which adds another counter, dictionary, hashtable, tuple or list """ def setUp(self): self.ht = HashTable(buckets=64) def test_update_numbers(self): """ Negative test: calling update using numeric values as parameter yields TypeError """ with self.assertRaises(TypeError): self.ht.update(1) with self.assertRaises(TypeError): self.ht.update(1.0) def test_update_string(self): self.ht.update("foo") self.assertEqual(self.ht['f'], 1) self.assertEqual(self.ht['o'], 2) def test_update_tuple(self): tuple = ('foo', 'bar', 'foo') self.ht.update(tuple) self.assertEqual(self.ht['foo'], 2) self.assertEqual(self.ht['bar'], 1) def test_update_list(self): self.ht.update([str(i % 3) for i in range(5)]) self.assertEqual(self.ht['0'], 2) self.assertEqual(self.ht['1'], 2) self.assertEqual(self.ht['2'], 1) def test_update_split(self): self.ht.update("This is a sentence".split()) self.assertEqual(self.ht['is'], 1) self.assertEqual(self.ht['this'], 0) # lowercase def test_update_twice(self): tuple = ('foo', 'bar', 'foo') self.ht.update(tuple) self.ht.update(('foo', 'bar', 'foo')) self.assertEqual(self.ht['foo'], 4) self.assertEqual(self.ht['bar'], 2) def test_update_bytes(self): tuple = ('foo', 'bar', b'foo') self.ht.update(tuple) self.assertEqual(self.ht['foo'], 2) self.assertEqual(self.ht[b'foo'], 2) def test_update_unicode(self): tuple = ('foo', 'bar', u'foo') self.ht.update(tuple) self.assertEqual(self.ht['foo'], 2) self.assertEqual(self.ht[u'foo'], 2) def test_update_with_dictionary(self): """ Update with a dictionary and test against it using set representation """ data = {'a': 1, 'b': 3, 'c': 2, 'd': 5} self.ht.update(data) self.assertEqual(self.ht['b'], 3) self.assertEqual(set(self.ht.items()), set(data.items())) def test_update_with_hashtable(self): """ Update with a dictionary and test against it using set representation """ data1 = {'a': 1, 'b': 3, 'c': 2, 'd': 5} data2 = {'a': 18, 'b': 4, 'c': 6, 'e': 13} expected = {'a': 19, 'b': 7, 'c': 8, 'd': 5, 'e': 13} self.ht.update(data1) ht2 = HashTable(64) ht2.update(data2) self.ht.update(ht2) self.assertEqual(set(self.ht.items()), set(expected.items())) if __name__ == '__main__': unittest.main()
1,438
2,077
<filename>Libraries/xcdriver/Sources/LicenseAction.cpp /** Copyright (c) 2016-present, Facebook, Inc. All rights reserved. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree. */ #include <xcdriver/LicenseAction.h> #include <string> #include <cstdio> /* * Generated from project license. The check below is extra-conservative * in that it fails if __has_include is not available (even though the * header may be), since the mechanism is more complex than a usual include. */ #if defined(__has_include) #if __has_include("LICENSE.h") #include "LICENSE.h" #else static char const LICENSE[] = "<unavailable>"; #endif #else static char const LICENSE[] = "<unavailable>"; #endif using xcdriver::LicenseAction; LicenseAction:: LicenseAction() { } LicenseAction:: ~LicenseAction() { } int LicenseAction:: Run() { std::string license = std::string(LICENSE, sizeof(LICENSE)); fprintf(stdout, "%s\n", license.c_str()); return 0; }
324
712
<gh_stars>100-1000 /** * Auto-configuration for Spring Cloud GCP Trace module related to Sleuth and Stackdriver integration. */ package org.springframework.cloud.gcp.autoconfigure.trace.sleuth;
56
335
{ "word": "Untested", "definitions": [ "(of an idea, product, or person) not subjected to examination, experiment, or experience; unproven." ], "parts-of-speech": "Adjective" }
75
1,247
// Copyright 2019 <NAME>. See LICENSE file for terms. #include "test.hpp" void func(int n, int k, int j) { path_start(); int x=0; if (n == 0) { x++; } if (k == 0) { x++; } if (j == 0) { x++; } if (x == 3) { path_goal(); } // Not enough increments to reach 5. volatile int t = x; // volatile to prevent optimization of nongoal if (t == 5) { path_nongoal(); } } int main() { func(INT_RAND, INT_RAND, INT_RAND); }
205
432
<reponame>lambdaxymox/DragonFlyBSD<gh_stars>100-1000 /* mpc_pow_d -- Raise a complex number to a double-precision power. Copyright (C) 2009 INRIA This file is part of GNU MPC. GNU MPC is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. GNU MPC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this program. If not, see http://www.gnu.org/licenses/ . */ #include <stdio.h> /* for MPC_ASSERT */ #include <float.h> /* for DBL_MANT_DIG */ #include "mpc-impl.h" int mpc_pow_d (mpc_ptr z, mpc_srcptr x, double y, mpc_rnd_t rnd) { mpc_t yy; int inex; MPC_ASSERT(FLT_RADIX == 2); mpc_init3 (yy, DBL_MANT_DIG, MPFR_PREC_MIN); mpc_set_d (yy, y, MPC_RNDNN); /* exact */ inex = mpc_pow (z, x, yy, rnd); mpc_clear (yy); return inex; }
442
412
/*******************************************************************\ Module: Loop Acceleration Author: <NAME> \*******************************************************************/ /// \file /// Loop Acceleration #ifndef CPROVER_GOTO_INSTRUMENT_ACCELERATE_ENUMERATING_LOOP_ACCELERATION_H #define CPROVER_GOTO_INSTRUMENT_ACCELERATE_ENUMERATING_LOOP_ACCELERATION_H #include <memory> #include <util/make_unique.h> #include <goto-programs/goto_program.h> #include <analyses/natural_loops.h> #include "polynomial_accelerator.h" #include "path_enumerator.h" #include "sat_path_enumerator.h" class enumerating_loop_accelerationt { public: enumerating_loop_accelerationt( message_handlert &message_handler, symbol_tablet &_symbol_table, goto_functionst &_goto_functions, goto_programt &_goto_program, natural_loops_mutablet::natural_loopt &_loop, goto_programt::targett _loop_header, int _path_limit, guard_managert &guard_manager) : symbol_table(_symbol_table), goto_functions(_goto_functions), goto_program(_goto_program), loop(_loop), loop_header(_loop_header), guard_manager(guard_manager), polynomial_accelerator( message_handler, symbol_table, goto_functions, guard_manager), path_limit(_path_limit), path_enumerator(util_make_unique<sat_path_enumeratort>( message_handler, symbol_table, goto_functions, goto_program, loop, loop_header, guard_manager)) { } bool accelerate(path_acceleratort &accelerator); protected: symbol_tablet &symbol_table; goto_functionst &goto_functions; goto_programt &goto_program; natural_loops_mutablet::natural_loopt &loop; goto_programt::targett loop_header; guard_managert &guard_manager; polynomial_acceleratort polynomial_accelerator; int path_limit; std::unique_ptr<path_enumeratort> path_enumerator; }; #endif // CPROVER_GOTO_INSTRUMENT_ACCELERATE_ENUMERATING_LOOP_ACCELERATION_H
815
2,671
<filename>test/run/t392.py<gh_stars>1000+ # Ship class class Ship: def __init__(self, name): self.name = name self.thrust = False def thrust(self): self.thrust = True print "Thrust", self.thrust my_ship = Ship("a_name") my_ship.thrust()
131
881
from snakeeyes.blueprints.page.views import page
13
692
/* * Copyright (C) 2013, Northwestern University and Argonne National Laboratory * See COPYRIGHT notice in top-level directory. */ /* $Id: header_consistency.c 2744 2016-12-28 16:25:22Z wkliao $ */ /* This program tests if PnetCDF can detect file header inconsistency and * overwrite the inconsistent header with root's. * This program is designed to run on more than 2 MPI processes. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <libgen.h> /* basename() */ #include <mpi.h> #include <pnetcdf.h> #include <testutils.h> #define ERR_EXP(e, exp) {if (e != exp) { printf("Error (line %d): expecting error code %s but got %s\n", __LINE__, nc_err_code_name(exp), nc_err_code_name(e)); nerrs++; }} #define ERR_EXP2(e, exp1, exp2) {if (e != exp1 && e != exp2 && e != NC_EFILE) { printf("Error (line %d): expecting error code %s or %s but got %s\n", __LINE__, nc_err_code_name(exp1), nc_err_code_name(exp2), nc_err_code_name(e)); nerrs++; }} #define CHECK_ERR(expect) { \ if (safe_mode) { \ if (err != NC_EMULTIDEFINE && err != expect) { \ printf("Error (line %d): expecting error code NC_EMULTIDEFINE or %s but got %s\n", __LINE__, nc_err_code_name(expect), nc_err_code_name(err)); \ nerrs++; \ } \ } \ else if (rank > 0) { \ if (err != expect) { \ printf("Error (line %d): expecting error code %s but got %s\n", __LINE__, nc_err_code_name(expect), nc_err_code_name(err)); \ nerrs++; \ } \ } \ } #define ERR {if(err!=NC_NOERR) {printf("Error(%d) at line %d: %s\n",err,__LINE__,ncmpi_strerror(err)); nerrs++; }} /*----< test_open_mode() >----------------------------------------------------*/ static int test_open_mode(char *filename, int safe_mode) { int err, rank, ncid, cmode, omode, nerrs=0; MPI_Info info=MPI_INFO_NULL; MPI_Comm comm=MPI_COMM_WORLD; MPI_Comm_rank(comm, &rank); /* Test inconsistent cmode -----------------------------------------------*/ cmode = NC_CLOBBER|NC_64BIT_OFFSET; if (rank == 0) cmode = NC_CLOBBER; err = ncmpi_create(comm, filename, cmode, info, &ncid); if (safe_mode) ERR_EXP(err, NC_EMULTIDEFINE_CMODE) else { if (rank > 0) ERR_EXP(err, NC_EMULTIDEFINE_CMODE) err = ncmpi_close(ncid); ERR } /* Test inconsistent omode -----------------------------------------------*/ omode = NC_WRITE; if (rank == 0) omode = NC_NOWRITE; err = ncmpi_open(comm, filename, omode, info, &ncid); if (safe_mode) ERR_EXP(err, NC_EMULTIDEFINE_OMODE) else { if (rank > 0) ERR_EXP(err, NC_EMULTIDEFINE_OMODE) err = ncmpi_close(ncid); ERR } return nerrs; } /*----< test_dim() >----------------------------------------------------------*/ static int test_dim(char *filename, int safe_mode) { int err, rank, ncid, cmode, dimid1, dimid2, dimid3, nerrs=0; MPI_Info info=MPI_INFO_NULL; MPI_Comm comm=MPI_COMM_WORLD; MPI_Comm_rank(comm, &rank); cmode = NC_CLOBBER|NC_64BIT_OFFSET; /* Test inconsistency on dimension names ---------------------------------*/ err = ncmpi_create(comm, filename, cmode, info, &ncid); ERR if (rank == 0) err = ncmpi_def_dim(ncid, "y", 100, &dimid1); else err = ncmpi_def_dim(ncid, "xx", 100, &dimid1); if (safe_mode) CHECK_ERR(NC_EMULTIDEFINE_DIM_NAME) else ERR err = ncmpi_enddef(ncid); ERR if (safe_mode) { /* no processes should be able to see dim "y" */ err = ncmpi_inq_dimid(ncid, "y", &dimid2); CHECK_ERR(NC_EBADDIM) /* no process should be able to see dim "x" */ err = ncmpi_inq_dimid(ncid, "xx", &dimid3); CHECK_ERR(NC_EBADDIM) } err = ncmpi_close(ncid); ERR /* Test inconsistency on dimension size ----------------------------------*/ err = ncmpi_create(comm, filename, cmode, info, &ncid); ERR if (rank == 0) err = ncmpi_def_dim(ncid, "x", 99, &dimid1); else err = ncmpi_def_dim(ncid, "x", 100, &dimid1); if (safe_mode) CHECK_ERR(NC_EMULTIDEFINE_DIM_SIZE) else ERR err = ncmpi_close(ncid); ERR return nerrs; } /*----< test_attr() >---------------------------------------------------------*/ static int test_attr(char *filename, int safe_mode) { int err, rank, ncid, cmode, nerrs=0; char gattr[128]; int int_attr; float flt_attr; MPI_Info info=MPI_INFO_NULL; MPI_Comm comm=MPI_COMM_WORLD; MPI_Comm_rank(comm, &rank); cmode = NC_CLOBBER|NC_64BIT_OFFSET; /* Test inconsistent global attribute name -------------------------------*/ err = ncmpi_create(comm, filename, cmode, info, &ncid); ERR int_attr = 1; sprintf(gattr, "gattr_name.%d",rank); err = ncmpi_put_att_int(ncid, NC_GLOBAL, gattr, NC_INT, 1, &int_attr); if (safe_mode) CHECK_ERR(NC_EMULTIDEFINE_ATTR_NAME) else ERR err = ncmpi_close(ncid); ERR /* Test inconsistent global attribute type -------------------------------*/ err = ncmpi_create(comm, filename, cmode, info, &ncid); ERR if (rank == 0) err = ncmpi_put_att_int(ncid, NC_GLOBAL, "gatt", NC_INT, 1, &int_attr); else err = ncmpi_put_att_float(ncid, NC_GLOBAL, "gatt", NC_FLOAT, 1, &flt_attr); if (safe_mode) CHECK_ERR(NC_EMULTIDEFINE_ATTR_TYPE) else ERR err = ncmpi_close(ncid); ERR /* Test inconsistent global attribute length -----------------------------*/ err = ncmpi_create(comm, filename, cmode, info, &ncid); ERR int intv[2]={1,2}; if (rank == 0) err = ncmpi_put_att_int(ncid, NC_GLOBAL, "gatt", NC_INT, 2, intv); else err = ncmpi_put_att_int(ncid, NC_GLOBAL, "gatt", NC_INT, 1, intv); if (safe_mode) CHECK_ERR(NC_EMULTIDEFINE_ATTR_LEN) else ERR err = ncmpi_close(ncid); ERR /* Test inconsistent global attribute length -----------------------------*/ err = ncmpi_create(comm, filename, cmode, info, &ncid); ERR if (rank == 0) intv[1]=3; err = ncmpi_put_att_int(ncid, NC_GLOBAL, "gatt", NC_INT, 2, intv); if (safe_mode) CHECK_ERR(NC_EMULTIDEFINE_ATTR_VAL) else ERR err = ncmpi_close(ncid); ERR return nerrs; } /*----< test_var() >----------------------------------------------------------*/ static int test_var(char *filename, int safe_mode) { int err, rank, ncid, cmode, nerrs=0; int dimid[3], varid1, int_attr; float flt_attr; char name[128], var_attr[128]; MPI_Info info=MPI_INFO_NULL; MPI_Comm comm=MPI_COMM_WORLD; MPI_Comm_rank(comm, &rank); cmode = NC_CLOBBER|NC_64BIT_OFFSET; /* Test inconsistent global attribute name -------------------------------*/ err = ncmpi_create(comm, filename, cmode, info, &ncid); ERR err = ncmpi_def_dim(ncid, "dim1", NC_UNLIMITED, &dimid[0]); ERR err = ncmpi_def_var(ncid, "var1", NC_INT, 1, dimid, &varid1); ERR int_attr = 1; sprintf(var_attr, "var_attr_name.%d",rank); err = ncmpi_put_att_int(ncid, varid1, var_attr, NC_INT, 1, &int_attr); if (safe_mode) CHECK_ERR(NC_EMULTIDEFINE_ATTR_NAME) else ERR err = ncmpi_close(ncid); ERR /* Test inconsistent global attribute type -------------------------------*/ err = ncmpi_create(comm, filename, cmode, info, &ncid); ERR err = ncmpi_def_dim(ncid, "dim1", NC_UNLIMITED, &dimid[0]); ERR err = ncmpi_def_var(ncid, "var1", NC_INT, 1, dimid, &varid1); ERR if (rank == 0) err = ncmpi_put_att_int(ncid, varid1, "var_att", NC_INT, 1, &int_attr); else err = ncmpi_put_att_float(ncid, varid1, "var_att", NC_FLOAT, 1, &flt_attr); if (safe_mode) CHECK_ERR(NC_EMULTIDEFINE_ATTR_TYPE) else ERR err = ncmpi_close(ncid); ERR /* Test inconsistent global attribute length -----------------------------*/ err = ncmpi_create(comm, filename, cmode, info, &ncid); ERR err = ncmpi_def_dim(ncid, "dim1", NC_UNLIMITED, &dimid[0]); ERR err = ncmpi_def_var(ncid, "var1", NC_INT, 1, dimid, &varid1); ERR int intv[2]={1,2}; if (rank == 0) err = ncmpi_put_att_int(ncid, varid1, "var_att", NC_INT, 2, intv); else err = ncmpi_put_att_int(ncid, varid1, "var_att", NC_INT, 1, intv); if (safe_mode) CHECK_ERR(NC_EMULTIDEFINE_ATTR_LEN) else ERR err = ncmpi_close(ncid); ERR /* Test inconsistent global attribute length -----------------------------*/ err = ncmpi_create(comm, filename, cmode, info, &ncid); ERR err = ncmpi_def_dim(ncid, "dim1", NC_UNLIMITED, &dimid[0]); ERR err = ncmpi_def_var(ncid, "var1", NC_INT, 1, dimid, &varid1); ERR if (rank == 0) intv[1]=3; err = ncmpi_put_att_int(ncid, varid1, "var_att", NC_INT, 2, intv); if (safe_mode) CHECK_ERR(NC_EMULTIDEFINE_ATTR_VAL) else ERR err = ncmpi_close(ncid); ERR /* Test inconsistent variable name ---------------------------------------*/ err = ncmpi_create(comm, filename, cmode, info, &ncid); ERR err = ncmpi_def_dim(ncid, "dim1", NC_UNLIMITED, &dimid[0]); ERR sprintf(name, "var.%d",rank); err = ncmpi_def_var(ncid, name, NC_INT, 1, dimid, &varid1); if (safe_mode) CHECK_ERR(NC_EMULTIDEFINE_VAR_NAME) else ERR err = ncmpi_close(ncid); ERR /* Test inconsistent variable ndims --------------------------------------*/ err = ncmpi_create(comm, filename, cmode, info, &ncid); ERR err = ncmpi_def_dim(ncid, "dim0", 3, &dimid[0]); ERR err = ncmpi_def_dim(ncid, "dim1", 2, &dimid[1]); ERR if (rank == 0) err = ncmpi_def_var(ncid, "var", NC_FLOAT, 2, dimid, &varid1); else err = ncmpi_def_var(ncid, "var", NC_FLOAT, 1, dimid, &varid1); if (safe_mode) CHECK_ERR(NC_EMULTIDEFINE_VAR_NDIMS) else ERR err = ncmpi_close(ncid); ERR /* Test inconsistent variable type ---------------------------------------*/ err = ncmpi_create(comm, filename, cmode, info, &ncid); ERR err = ncmpi_def_dim(ncid, "dim1", NC_UNLIMITED, &dimid[0]); ERR if (rank == 0) err = ncmpi_def_var(ncid, "var", NC_INT, 1, dimid, &varid1); else err = ncmpi_def_var(ncid, "var", NC_FLOAT, 1, dimid, &varid1); if (safe_mode) CHECK_ERR(NC_EMULTIDEFINE_VAR_TYPE) else ERR err = ncmpi_close(ncid); ERR /* Test inconsistent variable length -------------------------------------*/ err = ncmpi_create(comm, filename, cmode, info, &ncid); ERR err = ncmpi_def_dim(ncid, "dim0", 5, &dimid[0]); ERR err = ncmpi_def_dim(ncid, "dim1", 4, &dimid[1]); ERR err = ncmpi_def_dim(ncid, "dim2", 3, &dimid[2]); ERR if (rank == 0) err = ncmpi_def_var(ncid, "var", NC_FLOAT, 2, dimid, &varid1); else err = ncmpi_def_var(ncid, "var", NC_FLOAT, 2, dimid+1, &varid1); if (safe_mode) CHECK_ERR(NC_EMULTIDEFINE_VAR_DIMIDS) else ERR err = ncmpi_close(ncid); ERR /* Test inconsistent variable dimension IDs ------------------------------*/ err = ncmpi_create(comm, filename, cmode, info, &ncid); ERR err = ncmpi_def_dim(ncid, "Z", 3, &dimid[0]); ERR err = ncmpi_def_dim(ncid, "Y", 3, &dimid[1]); ERR err = ncmpi_def_dim(ncid, "X", 3, &dimid[2]); ERR if (rank == 0) err = ncmpi_def_var(ncid, "var", NC_FLOAT, 2, dimid+1, &varid1); else err = ncmpi_def_var(ncid, "var", NC_FLOAT, 2, dimid, &varid1); if (safe_mode) CHECK_ERR(NC_EMULTIDEFINE_VAR_DIMIDS) else ERR err = ncmpi_close(ncid); ERR return nerrs; } /*----< main() >--------------------------------------------------------------*/ int main(int argc, char **argv) { char *filename="testfile.nc", *mode[2] = {"0", "1"}; int i, rank, nprocs, verbose, nerrs=0; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); if (nprocs < 2) { if (!rank) printf("This program is for running 2 or more processes. Exiting ...\n"); MPI_Finalize(); return 0; } if (argc > 2) { if (!rank) printf("Usage: %s [filename]\n",argv[0]); MPI_Finalize(); return 0; } if (argc == 2) filename = argv[1]; if (rank == 0) { char *cmd_str = (char*)malloc(strlen(argv[0]) + 256); sprintf(cmd_str, "*** TESTING C %s for header consistency", basename(argv[0])); printf("%-66s ------ ", cmd_str); free(cmd_str); } verbose = 1; for (i=verbose; i>=0; i--) { /* test with safe mode off and on : * Note even if --enable-debug is set at configure time, safe mode * can still be disabled by setting the environment variable * PNETCDF_SAFE_MODE to 0. */ setenv("PNETCDF_SAFE_MODE", mode[i], 1); nerrs += test_open_mode(filename, i); nerrs += test_dim(filename, i); nerrs += test_attr(filename, i); nerrs += test_var(filename, i); } MPI_Offset malloc_size, sum_size; int err = ncmpi_inq_malloc_size(&malloc_size); if (err == NC_NOERR) { MPI_Reduce(&malloc_size, &sum_size, 1, MPI_OFFSET, MPI_SUM, 0, MPI_COMM_WORLD); if (rank == 0 && sum_size > 0) printf("heap memory allocated by PnetCDF internally has %lld bytes yet to be freed\n", sum_size); } MPI_Allreduce(MPI_IN_PLACE, &nerrs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); if (rank == 0) { if (nerrs) printf(FAIL_STR,nerrs); else printf(PASS_STR); } MPI_Finalize(); return 0; }
6,288
1,738
<reponame>jeikabu/lumberyard /* * All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or * its licensors. * * For complete copyright and license terms please see the LICENSE at the root of this * distribution (the "License"). All use of this software is governed by the License, * or, if provided, by the license below or the license accompanying this file. Do not * remove or modify any license notices. This file is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * */ // Original file Copyright Crytek GMBH or its affiliates, used under license. #ifndef CRYINCLUDE_EDITOR_AI_NAVDATAGENERATION_AIQUADTREE_H #define CRYINCLUDE_EDITOR_AI_NAVDATAGENERATION_AIQUADTREE_H #pragma once #include <vector> // The element stored in CAIQuadTree must provide the following interface: // // The z component of the AABB should be ignored: // bool DoesIntersectAABB(const AABB& aabb); // // const AABB &GetAABB(); // // There should be a counter that CAIQuadTree can use internally - shouldn't // be used for anything else: // mutable int m_counter // // Returns some debug info about this element // const char *GetDebugName() const template<typename SAIQuadTreeElement> class CAIQuadTree { public: CAIQuadTree(); ~CAIQuadTree(); /// Adds a element to the list of elements, but doesn't add it to the QuadTree void AddElement(const SAIQuadTreeElement& element); /// Clears elements and cells /// if the freeMemory is set to true, the element index array will be freed, /// otherwise it will be reset preserving the allocated memory. void Clear(bool freeMemory = true); /// Builds the QuadTree from scratch (not incrementally) - deleting any previous /// tree. /// Building the QuadTree will involve placing all elements into the root cell. /// Then this cell gets pushed onto a stack of cells to examine. This stack /// will get parsed and every cell containing more than maxElementsPerCell /// will get split into 8 children, and all the original elements in that cell /// will get partitioned between the children. A element can end up in multiple /// cells (possibly a lot!) if it straddles a boundary. Therefore when intersection /// tests are done SAIQuadTreeElement::m_counter can be set/tested using a counter to avoid /// properly testing the triangle multiple times (the counter _might_ wrap around, /// so when it wraps ALL the element flags should be cleared! Could do this /// incrementally...). void BuildQuadTree(int maxElementsPerCell, float minCellSize); /// Returns elements that claim to intersect the point (ignoring z), and the number. unsigned GetElements(std::vector<const SAIQuadTreeElement*>& elements, const Vec3& point) const; /// Returns elements that claim to intersect the aabb (ignoring z), and the number. unsigned GetElements(std::vector<const SAIQuadTreeElement*>& elements, const AABB& aabb) const; /// Dumps our contents void Dump(const char* debugName) const; private: /// Internally we don't store pointers but store indices into a single contiguous /// array of cells and triangles (so that the vectors can get resized). /// /// Each cell will either contain children OR contain triangles. struct CQuadTreeCell { /// constructor clears everything CQuadTreeCell(); /// constructor clears everything CQuadTreeCell(const AABB& aabb); /// Sets all child indices to -1 and clears the triangle indices. void Clear(); /// Indicates if we contain triangles (if not then we should/might have children) bool IsLeaf() const {return m_childCellIndices[0] == -1; } /// indices into the children - P means "plus" and M means "minus" and the /// letters are xy. So PM means +ve x, -ve y enum EChild { PP, PM, MP, MM, NUM_CHILDREN }; /// indices of the children (if not leaf). Will be -1 if there is no child int m_childCellIndices[NUM_CHILDREN]; /// indices of the elements (if leaf) std::vector<int> m_elementIndices; /// Bounding box for the space we own AABB m_aabb; }; /// Functor that can be passed to std::sort so that it sorts equal sized cells along a specified /// direction such that cells near the beginning of a line with dirPtr come at the end of the /// sorted container. This means they get processed first when that container is used as a stack. struct CCellSorter { CCellSorter(const Vec3* dirPtr, const std::vector<CQuadTreeCell>* cellsPtr) : m_dirPtr(dirPtr) , m_cellsPtr(cellsPtr) {} bool operator()(int cell1Index, int cell2Index) const { Vec3 delta = (*m_cellsPtr)[cell2Index].m_aabb.min - (*m_cellsPtr)[cell1Index].m_aabb.min; return (delta * *m_dirPtr) < 0.0f; } const Vec3* m_dirPtr; const std::vector<CQuadTreeCell>* m_cellsPtr; }; /// Create a bounding box appropriate for a child, based on a parents AABB AABB CreateAABB(const AABB& aabb, typename CQuadTreeCell::EChild child) const; /// Returns true if the triangle intersects or is contained by a cell bool DoesElementIntersectCell(const SAIQuadTreeElement& element, const CQuadTreeCell& cell) const; /// Increment our test counter, wrapping around if necessary and zapping the /// triangle counters. /// Const because we only modify mutable members. void IncrementTestCounter() const; /// Dumps the cell and all its children, indented void DumpCell(const CQuadTreeCell& cell, int indentLevel) const; /// All our cells. The only thing guaranteed about this is that m_cell[0] (if /// it exists) is the root cell. std::vector<CQuadTreeCell> m_cells; /// All our elements. std::vector<SAIQuadTreeElement> m_elements; AABB m_boundingBox; /// During intersection testing we keep a stack of cells to test (rather than recursing) - /// to avoid excessive memory allocation we don't free the memory between calls unless /// the user calls FreeTemporaryMemory(); mutable std::vector<int> m_cellsToTest; /// Counter used to prevent multiple tests when triangles are contained in more than /// one cell mutable unsigned m_testCounter; }; #include "AIQuadTree.inl" #endif // CRYINCLUDE_EDITOR_AI_NAVDATAGENERATION_AIQUADTREE_H
2,188
746
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # Functional tests running EXPLAIN statements. # import re from tests.common.impala_test_suite import ImpalaTestSuite from tests.common.skip import SkipIfLocal, SkipIfNotHdfsMinicluster, SkipIfEC from tests.util.filesystem_utils import WAREHOUSE # Tests the different explain levels [0-3] on a few queries. # TODO: Clean up this test to use an explain level test dimension and appropriate # result sub-sections for the expected explain plans. @SkipIfEC.fix_later class TestExplain(ImpalaTestSuite): # Value for the num_scanner_threads query option to ensure that the memory estimates of # scan nodes are consistent even when run on machines with different numbers of cores. NUM_SCANNER_THREADS = 1 @classmethod def get_workload(self): return 'functional-query' @classmethod def add_test_dimensions(cls): super(TestExplain, cls).add_test_dimensions() cls.ImpalaTestMatrix.add_constraint(lambda v:\ v.get_value('table_format').file_format == 'text' and\ v.get_value('table_format').compression_codec == 'none' and\ v.get_value('exec_option')['batch_size'] == 0 and\ v.get_value('exec_option')['disable_codegen'] == False and\ v.get_value('exec_option')['num_nodes'] != 1) @SkipIfNotHdfsMinicluster.plans def test_explain_level0(self, vector): vector.get_value('exec_option')['num_scanner_threads'] = self.NUM_SCANNER_THREADS vector.get_value('exec_option')['explain_level'] = 0 self.run_test_case('QueryTest/explain-level0', vector) @SkipIfNotHdfsMinicluster.plans def test_explain_level1(self, vector): vector.get_value('exec_option')['num_scanner_threads'] = self.NUM_SCANNER_THREADS vector.get_value('exec_option')['explain_level'] = 1 self.run_test_case('QueryTest/explain-level1', vector) @SkipIfNotHdfsMinicluster.plans def test_explain_level2(self, vector): vector.get_value('exec_option')['num_scanner_threads'] = self.NUM_SCANNER_THREADS vector.get_value('exec_option')['explain_level'] = 2 self.run_test_case('QueryTest/explain-level2', vector) @SkipIfNotHdfsMinicluster.plans def test_explain_level3(self, vector): vector.get_value('exec_option')['num_scanner_threads'] = self.NUM_SCANNER_THREADS vector.get_value('exec_option')['explain_level'] = 3 self.run_test_case('QueryTest/explain-level3', vector) @staticmethod def check_row_size_and_cardinality(query_result, expected_row_size=None, expected_cardinality=None): regex = re.compile('tuple-ids=.+ row-size=(\d+)B cardinality=(.*)') found_match = False for res in query_result: m = regex.match(res.strip()) if m: found_match = True assert len(m.groups()) == 2 if expected_row_size: assert m.groups()[0] == expected_row_size if expected_cardinality: assert m.groups()[1] == expected_cardinality assert found_match, query_result def test_explain_validate_cardinality_estimates(self, vector, unique_database): # Tests that the cardinality estimates are correct for partitioned tables. # TODO Cardinality estimation tests should eventually be part of the planner tests. # TODO Remove this test db_name = 'functional' tbl_name = 'alltypes' def check_cardinality(query_result, expected_cardinality): self.check_row_size_and_cardinality( query_result, expected_cardinality=expected_cardinality) # All partitions are filtered out, cardinality should be 0. result = self.execute_query("explain select * from %s.%s where year = 1900" % ( db_name, tbl_name), query_options={'explain_level':3}) check_cardinality(result.data, '0') # Half of the partitions are filtered out, cardinality should be 3650. result = self.execute_query("explain select * from %s.%s where year = 2010" % ( db_name, tbl_name), query_options={'explain_level':3}) check_cardinality(result.data, '3.65K') # None of the partitions are filtered out, cardinality should be 7300. result = self.execute_query("explain select * from %s.%s" % (db_name, tbl_name), query_options={'explain_level':3}) check_cardinality(result.data, '7.30K') # Create a partitioned table with a mixed set of available stats, mixed_tbl = unique_database + ".t" self.execute_query( "create table %s (c int) partitioned by (p int)" % mixed_tbl) self.execute_query( "insert into table %s partition (p) values(1,1),(2,2),(3,3)" % mixed_tbl) # Set the number of rows at the table level. self.execute_query( "alter table %s set tblproperties('numRows'='100')" % mixed_tbl) # Should fall back to table-level cardinality when partitions lack stats. result = self.execute_query("explain select * from %s" % mixed_tbl, query_options={'explain_level':3}) check_cardinality(result.data, '100') # Should fall back to table-level cardinality, even for a subset of partitions, result = self.execute_query("explain select * from %s where p = 1" % mixed_tbl, query_options={'explain_level':3}) check_cardinality(result.data, '100') # Set the number of rows at the table level to -1. self.execute_query( "alter table %s set tblproperties('numRows'='-1')" % mixed_tbl) # Set the number of rows for a single partition. self.execute_query( "alter table %s partition(p=1) set tblproperties('numRows'='50')" % mixed_tbl) # Use partition stats when availabe. Row counts for partitions without # stats are estimated. result = self.execute_query("explain select * from %s" % mixed_tbl, query_options={'explain_level':3}) check_cardinality(result.data, '51') # Set the number of rows at the table level back to 100. self.execute_query( "alter table %s set tblproperties('numRows'='100')" % mixed_tbl) # Fall back to table-level stats when no selected partitions have stats. result = self.execute_query("explain select * from %s where p = 2" % mixed_tbl, query_options={'explain_level':3}) check_cardinality(result.data, '100') def test_explain_row_size_estimates(self, vector, unique_database): """ Tests that EXPLAIN returns the expected row sizes with and without stats. Planner tests is probably a more logical place for this, but covering string avg_size handling end-to-end seemed easier here. Note that row sizes do not include the null indicator bytes, so actual tuple sizes are a bit larger. """ def check_row_size(query_result, expected_row_size): self.check_row_size_and_cardinality( query_result, expected_row_size=expected_row_size) def execute_explain(query): return self.execute_query("explain " + query, query_options={'explain_level': 3}) FQ_TBL_NAME = unique_database + ".t" self.execute_query("create table %s (i int, s string)" % FQ_TBL_NAME) # Fill the table with data that leads to avg_size of 4 for 's'. self.execute_query("insert into %s values (1, '123'), (2, '12345')" % FQ_TBL_NAME) # Always use slot size for fixed sized types. result = execute_explain("select i from %s" % FQ_TBL_NAME) check_row_size(result.data, '4') # If there are no stats, use slot size for variable length types. result = execute_explain("select s from %s" % FQ_TBL_NAME) check_row_size(result.data, "12") self.execute_query("compute stats %s" % FQ_TBL_NAME) # Always use slot size for fixed sized types. result = execute_explain("select i from %s" % FQ_TBL_NAME) check_row_size(result.data, '4') # If there are no stats, use slot size + avg_size for variable length types. result = execute_explain("select s from %s" % FQ_TBL_NAME) check_row_size(result.data, "16") class TestExplainEmptyPartition(ImpalaTestSuite): TEST_DB_NAME = "imp_1708" def setup_method(self, method): self.cleanup_db(self.TEST_DB_NAME) self.execute_query("create database if not exists {0} location '{1}/{0}.db'" .format(self.TEST_DB_NAME, WAREHOUSE)) def teardown_method(self, method): self.cleanup_db(self.TEST_DB_NAME) @SkipIfLocal.hdfs_client def test_non_empty_partition_0_rows(self): """Regression test for IMPALA-1708: if a partition has 0 rows but > 0 files after COMPUTE STATS, don't warn the user about missing stats. The files are probably corrupted, or used for something else.""" self.client.execute("SET EXPLAIN_LEVEL=3") self.client.execute("CREATE TABLE %s.empty_partition (col int) " "partitioned by (p int)" % self.TEST_DB_NAME) self.client.execute( "ALTER TABLE %s.empty_partition ADD PARTITION (p=NULL)" % self.TEST_DB_NAME) # Put an empty file in the partition so we have > 0 files, but 0 rows self.filesystem_client.create_file( "test-warehouse/%s.db/empty_partition/p=__HIVE_DEFAULT_PARTITION__/empty" % self.TEST_DB_NAME, "") self.client.execute("REFRESH %s.empty_partition" % self.TEST_DB_NAME) self.client.execute("COMPUTE STATS %s.empty_partition" % self.TEST_DB_NAME) assert "NULL\t0\t1" in str( self.client.execute("SHOW PARTITIONS %s.empty_partition" % self.TEST_DB_NAME)) assert "missing relevant table and/or column statistics" not in str( self.client.execute( "EXPLAIN SELECT * FROM %s.empty_partition" % self.TEST_DB_NAME)) # Now add a partition with some data (so it gets selected into the scan), to check # that its lack of stats is correctly identified self.client.execute( "ALTER TABLE %s.empty_partition ADD PARTITION (p=1)" % self.TEST_DB_NAME) self.filesystem_client.create_file("test-warehouse/%s.db/empty_partition/p=1/rows" % self.TEST_DB_NAME, "1") self.client.execute("REFRESH %s.empty_partition" % self.TEST_DB_NAME) explain_result = str( self.client.execute("EXPLAIN SELECT * FROM %s.empty_partition" % self.TEST_DB_NAME)) assert "missing relevant table and/or column statistics" in explain_result # Also test IMPALA-1530 - adding the number of partitions missing stats assert "partitions: 1/2 " in explain_result
4,022
613
<filename>src/test/java/nl/jqno/equalsverifier/testhelpers/FactoryCacheFactory.java package nl.jqno.equalsverifier.testhelpers; import static nl.jqno.equalsverifier.internal.prefabvalues.factories.Factories.values; import nl.jqno.equalsverifier.internal.prefabvalues.FactoryCache; public final class FactoryCacheFactory { private FactoryCacheFactory() {} public static FactoryCache withPrimitiveFactories() { FactoryCache factoryCache = new FactoryCache(); factoryCache.put(boolean.class, values(true, false, true)); factoryCache.put(byte.class, values((byte) 1, (byte) 2, (byte) 1)); factoryCache.put(char.class, values('a', 'b', 'a')); factoryCache.put(double.class, values(0.5D, 1.0D, 0.5D)); factoryCache.put(float.class, values(0.5F, 1.0F, 0.5F)); factoryCache.put(int.class, values(1, 2, 1)); factoryCache.put(long.class, values(1L, 2L, 1L)); factoryCache.put(short.class, values((short) 1, (short) 2, (short) 1)); return factoryCache; } }
413
14,668
<reponame>zealoussnow/chromium // Copyright 2021 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef IOS_CHROME_BROWSER_SESSIONS_SCENE_UTIL_TEST_SUPPORT_H_ #define IOS_CHROME_BROWSER_SESSIONS_SCENE_UTIL_TEST_SUPPORT_H_ #import <UIKit/UIKit.h> // Returns a fake UIScene with |identifier| as session persistent identifier // when running on iOS 13+ or nil otherwise. The fake object implements just // enough API for SessionIdentifierForScene(). id FakeSceneWithIdentifier(NSString* identifier); #endif // IOS_CHROME_BROWSER_SESSIONS_SCENE_UTIL_TEST_SUPPORT_H_
226
15,588
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.rocketmq.logging.inner; import org.apache.rocketmq.logging.BasicLoggerTest; import org.junit.Assert; import org.junit.Test; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FilenameFilter; import java.io.PrintStream; public class LoggingBuilderTest extends BasicLoggerTest { @Test public void testConsole() { PrintStream out = System.out; ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); System.setOut(new PrintStream(byteArrayOutputStream)); Appender consoleAppender = LoggingBuilder.newAppenderBuilder() .withConsoleAppender(LoggingBuilder.SYSTEM_OUT) .withLayout(LoggingBuilder.newLayoutBuilder().withDefaultLayout().build()).build(); consoleAppender.doAppend(loggingEvent); String result = new String(byteArrayOutputStream.toByteArray()); System.setOut(out); Assert.assertTrue(result.contains(loggingEvent.getMessage().toString())); } @Test public void testFileAppender() throws InterruptedException { String logFile = loggingDir + "/file.log"; Appender rollingFileAppender = LoggingBuilder.newAppenderBuilder().withAsync(false, 102400) .withFileAppender(logFile).withLayout(LoggingBuilder.newLayoutBuilder().withDefaultLayout().build()).build(); for (int i = 0; i < 10; i++) { rollingFileAppender.doAppend(loggingEvent); } rollingFileAppender.close(); File file = new File(logFile); Assert.assertTrue(file.length() > 0); } @Test public void testRollingFileAppender() throws InterruptedException { String rollingFile = loggingDir + "/rolling.log"; Appender rollingFileAppender = LoggingBuilder.newAppenderBuilder().withAsync(false, 1024) .withRollingFileAppender(rollingFile, "1024", 5) .withLayout(LoggingBuilder.newLayoutBuilder().withDefaultLayout().build()).build(); for (int i = 0; i < 100; i++) { rollingFileAppender.doAppend(loggingEvent); } rollingFileAppender.close(); int cc = 0; for (int i = 0; i < 5; i++) { File file; if (i == 0) { file = new File(rollingFile); } else { file = new File(rollingFile + "." + i); } if (file.exists() && file.length() > 0) { cc += 1; } } Assert.assertTrue(cc >= 2); } //@Test public void testDailyRollingFileAppender() throws InterruptedException { String rollingFile = loggingDir + "/daily-rolling--222.log"; Appender rollingFileAppender = LoggingBuilder.newAppenderBuilder().withAsync(false, 1024) .withDailyFileRollingAppender(rollingFile, "'.'yyyy-MM-dd_HH-mm-ss-SSS") .withLayout(LoggingBuilder.newLayoutBuilder().withDefaultLayout().build()).build(); for (int i = 0; i < 100; i++) { rollingFileAppender.doAppend(loggingEvent); } rollingFileAppender.close(); File file = new File(loggingDir); String[] list = file.list(new FilenameFilter() { @Override public boolean accept(File dir, String name) { return name.startsWith("daily-rolling--222.log"); } }); Assert.assertTrue(list.length > 0); } }
1,614
450
//===---------------------------------------------------------------------===// // // The ONNC Project // // Copyright(c) 2018, The ONNC Team // // This file is part of the ONNC Project and is distributed under // 3-clause BSD license (https://opensource.org/licenses/BSD-3-Clause) // // See LICENSE.TXT for details. // //===---------------------------------------------------------------------===// // legacy code #include "TLStore.h" #include "BM188xCodeEmitter.h" #include "PatternMatch.h" #include <onnc/Support/Debug.h> #include <onnc/Target/Sophon/BM188x/bmkernel_api.h> #define DEBUG_TYPE "tl_store" namespace onnc { namespace BM188X { TLStore::TLStore(const xNode &pNode) : BM188xComputeOperator(pNode, std::string("TLStore")) { // ONNC extension attribute assert(pNode.hasAttribute(xSymbol("dst_goffset"))); assert(pNode.hasAttribute(xSymbol("src_laddr"))); assert(pNode.hasAttribute(xSymbol("local_dim"))); assert(pNode.hasAttribute(xSymbol("global_dim"))); assert(pNode.hasAttribute(xSymbol("do_transpose"))); assert(pNode.hasAttribute(xSymbol("is_aligned"))); assert(pNode.hasAttribute(xSymbol("is_neuron"))); assert(pNode.hasAttribute(xSymbol("op_name"))); m_SplitName = pNode.s(xSymbol("op_name")); m_DstGOffset = pNode.i(xSymbol("dst_goffset")); m_SrcLAddr = pNode.i(xSymbol("src_laddr")); m_DoTranspose = pNode.i(xSymbol("do_transpose")); m_IsAligned = pNode.i(xSymbol("is_aligned")); m_IsNeuron = pNode.i(xSymbol("is_neuron")); auto &local_dim = pNode.is(xSymbol("local_dim")); assert(local_dim.size() == 4); m_LocalN = local_dim[0]; m_LocalC = local_dim[1]; m_LocalH = local_dim[2]; m_LocalW = local_dim[3]; auto &global_dim = pNode.is(xSymbol("global_dim")); assert(global_dim.size() == 4); m_GlobalC = global_dim[1]; m_GlobalH = global_dim[2]; m_GlobalW = global_dim[3]; // End extension } void TLStore::emit() const { // Calculate the address after Global Memory Allocation Pass uint64_t gaddr = m_DstGOffset + m_MemOperands[0]->m_Addr; bmnet::bmnet_asm::asm_context::get_context().name = m_SplitName; // TODO(arcbbb): only support 4d tensor for the moment bmnet::bmnet_asm::bmnet_tl_store_stride_bmkernel( gaddr, // Dest global addr m_SrcLAddr, // Src local addr m_LocalN, // Local N m_LocalC, // Local C m_LocalH, // Local H m_LocalW, // Local W m_GlobalC, // Global C m_GlobalH, // Global H m_GlobalW, // Global W m_DoTranspose, // Do Transpose m_IsAligned, // Check alignment m_IsNeuron // MemSpace: Neuron or Weight ); } TLStore *TLStore::addMemOperands(MemOperand *pOutput) { m_MemOperands.push_back(pOutput); return this; } } // namespace BM188X } // namespace onnc
1,153
515
<filename>Libs/Widgets/ctkToolTipTrapper.h /*========================================================================= Library: CTK Copyright (c) Kitware Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0.txt Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =========================================================================*/ /*========================================================================= Program: ParaView Module: pqToolTipTrapper.h Copyright (c) 2005-2008 Sandia Corporation, Kitware Inc. All rights reserved. ParaView is a free software; you can redistribute it and/or modify it under the terms of the ParaView license version 1.2. See http://www.paraview.org/paraview/project/license.html for the full ParaView license. A copy of this license can be obtained by contacting Kitware Inc. 28 Corporate Drive Clifton Park, NY 12065 USA THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =========================================================================*/ #ifndef __ctkToolTipTrapper_h #define __ctkToolTipTrapper_h // Qt includes #include <QObject> // CTK includes #include "ctkWidgetsExport.h" class ctkToolTipTrapperPrivate; /// \ingroup Widgets /// Filters tooltips, to prevent tooltips from appearing or to word wrap /// tooltips. /// If toolTipsTrapped or toolTipsWordWrapped is true, installs an event filter to /// trap or wrap tooltips. /// If toolTipsTrapped and toolTipsWordWrapped are false, does not install the event /// filter. /// Tooltips are trapped and not word wrapped by default. class CTK_WIDGETS_EXPORT ctkToolTipTrapper : public QObject { Q_OBJECT Q_PROPERTY( bool toolTipsTrapped READ toolTipsTrapped WRITE setToolTipsTrapped) Q_PROPERTY( bool toolTipsWordWrapped READ toolTipsWordWrapped WRITE setToolTipsWordWrapped) public: typedef QObject Superclass; /// Constructs a ToolTip trapper which is a child of objectParent explicit ctkToolTipTrapper(QObject* objectParent = 0); explicit ctkToolTipTrapper(bool toolTipsTrapped, bool toolTipsWordWordWrapped, QObject* objectParent = 0); virtual ~ctkToolTipTrapper(); /// Returns true if the tooltips are trapped to prevent them from appearing. bool toolTipsTrapped()const; /// Returns true if the tooltips are word wrapped. bool toolTipsWordWrapped()const; /// Automatically called when the tooltips are trapped or word wrapped. /// You shouldn't have to call it manually. bool eventFilter(QObject* watched, QEvent* event); public Q_SLOTS: /// If true, installs the eventFilter on the application if it isn't already /// installed. Otherwise, removes the eventFilter if tooltips are neither /// trapped nor word wrapped. void setToolTipsTrapped(bool toolTipsTrapped); void setToolTipsWordWrapped(bool toolTipsWordWrapped); protected: QScopedPointer<ctkToolTipTrapperPrivate> d_ptr; private: Q_DECLARE_PRIVATE(ctkToolTipTrapper); Q_DISABLE_COPY(ctkToolTipTrapper); }; #endif
1,244
605
<reponame>kasohong/mqcloud package com.sohu.tv.mq.cloud.util; import java.io.IOException; import java.io.PrintWriter; import javax.servlet.ServletRequest; import javax.servlet.http.Cookie; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.commons.lang3.StringUtils; import org.springframework.web.util.WebUtils; /** * web相关工具 * @Description: * @author yongfeigao * @date 2018年6月12日 */ public class WebUtil { public static final String LOGIN_TOKEN = "TOKEN"; /** * 从request中获取客户端ip * * @param request * @return */ public static String getIp(ServletRequest request) { HttpServletRequest req = (HttpServletRequest) request; String addr = getHeaderValue(req, "X-Forwarded-For"); if (StringUtils.isNotEmpty(addr) && addr.contains(",")) { addr = addr.split(",")[0]; } if (StringUtils.isEmpty(addr)) { addr = getHeaderValue(req, "X-Real-IP"); } if (StringUtils.isEmpty(addr)) { addr = req.getRemoteAddr(); } return addr; } /** * 获取请求的完整url * @param request * @return */ public static String getUrl(HttpServletRequest request) { String url = request.getRequestURL().toString(); String queryString = request.getQueryString(); if(queryString != null) { url += "?" + request.getQueryString(); } return url; } /** * 获取ServletRequest header value * @param request * @param name * @return */ public static String getHeaderValue(HttpServletRequest request, String name) { String v = request.getHeader(name); if(v == null) { return null; } return v.trim(); } /** * 从request属性中获取对象 * @param request * @return */ public static void setEmailAttribute(ServletRequest request, String email) { request.setAttribute("email", email); } /** * 从request属性中获取对象 * @param request * @return */ public static String getEmailAttribute(ServletRequest request) { Object email = request.getAttribute("email"); if(email == null) { return null; } return email.toString(); } /** * 从request属性中获取对象 * @param request * @return */ public static void setAttribute(ServletRequest request, String name, Object obj) { request.setAttribute(name, obj); } /** * 设置对象到request属性中 * @param request * @return */ public static Object getAttribute(ServletRequest request, String name) { return request.getAttribute(name); } /** * 输出内容到页面 * @param response * @param result * @throws IOException */ public static void print(HttpServletResponse response, String result) throws IOException { response.setContentType("text/html;charset=UTF-8"); PrintWriter out = response.getWriter(); out.print(result); out.flush(); out.close(); out = null; } /** * 获取登录的cookie的值 * * @param request * @return */ public static String getLoginCookieValue(HttpServletRequest request) { Cookie cookie = WebUtils.getCookie(request, LOGIN_TOKEN); if(cookie != null) { return cookie.getValue(); } return null; } /** * 获取登录的cookie * * @param request * @return */ public static Cookie getLoginCookie(HttpServletRequest request) { return WebUtils.getCookie(request, LOGIN_TOKEN); } /** * 设置登录的cookie * * @param request */ public static void setLoginCookie(HttpServletResponse response, String value) { Cookie cookie = new Cookie(LOGIN_TOKEN, value); cookie.setPath("/"); response.addCookie(cookie); } /** * 移除登录的cookie * * @param request */ public static void deleteLoginCookie(HttpServletResponse response) { Cookie cookie = new Cookie(LOGIN_TOKEN, ""); cookie.setPath("/"); cookie.setMaxAge(0); response.addCookie(cookie); } /** * 跳转 * @param response * @param request * @param path * @throws IOException */ public static void redirect(HttpServletResponse response, HttpServletRequest request, String path) throws IOException { response.sendRedirect(request.getContextPath() + path); } /** * count格式化 * @param value * @return */ public static String countFormat(long value) { if (value >= 100000000) { return format(value / 100000000F) + "亿"; } if (value >= 10000) { return format(value / 10000F) + "万"; } return format(value); } /** * size格式化 * @param value * @return */ public static String sizeFormat(long value) { if (value >= 1073741824) { return format(value / 1073741824F) + "g"; } if (value >= 1048576) { return format(value / 1048576F) + "m"; } if (value >= 1024) { return format(value / 1024F) + "k"; } return format(value) + "b"; } public static String format(float value) { long v = (long) (value * 10); if (v % 10 == 0) { return String.valueOf(v / 10); } return String.valueOf(v / 10.0); } }
2,670
348
{"nom":"Sottevast","circ":"3ème circonscription","dpt":"Manche","inscrits":1025,"abs":501,"votants":524,"blancs":13,"nuls":0,"exp":511,"res":[{"nuance":"REM","nom":"<NAME>","voix":220},{"nuance":"FI","nom":"<NAME>","voix":94},{"nuance":"LR","nom":"<NAME>","voix":73},{"nuance":"FN","nom":"Mme <NAME>","voix":62},{"nuance":"ECO","nom":"<NAME>","voix":29},{"nuance":"COM","nom":"Mme <NAME>","voix":22},{"nuance":"DIV","nom":"<NAME>","voix":4},{"nuance":"EXD","nom":"<NAME>","voix":4},{"nuance":"EXG","nom":"Mme <NAME>","voix":3}]}
211
925
<filename>tests/test_load.py import pytest import re from test_layout import verify_frame_objects_via_dump @pytest.mark.parametrize("invalid_layout,error_pos", [ ('(', 1), ('()', 1), ('foo baar', 0), ('(foo baar', 1), ('((clients max:0 ))', 1), ('(clients)', 8), ('(clients )', 9), ('(split max:0.5:1)', 7), ('(split horizontal:0.05:1)', 7), ('(split horizontal:0.95:1)', 7), ('(split horizontal:x:1)', 7), ('(split horizontal:0.5:x)', 7), ('(split horizontal:0.5:-1)', 7), ('(split horizontal:0.5:2)', 7), ('(split horizontal:0.3)', 7), ('(split horizontal:0.3:0:0)', 7), ('(split horizonta:0.5:0 )', 8), ('(clients max )', 9), ('(clients max:0:0 )', 9), ('(clients ma:0 )', 9), ('(clients max:-1 )', 9), ('(clients grid:0 asdf )', 16), ('(clients grid:0 0xx0)', 16), ('(clients grid:0 09)', 16), ('(clients grid:0 0x)', 16), ('(clients grid:0 x)', 16), ('(split horizontal:0.5:0 x)', 24), ('(split horizontal:0.5:0 (split horizontal:0.5:1', 47), ('(split horizontal:0.5:0 (split horizontal:0.5:1 ', 48), ('(split horizontal:0.5:0 (split horizontal:0.5:1 )', 49), ('(split horizontal:0.5:0 (split horizontal:0.5:1 )))', 50), ('(split horizontal:0.5:0 (clients max:1', 38), ]) def test_syntax_errors_position(hlwm, invalid_layout, error_pos): c = hlwm.call_xfail(['load', invalid_layout]) c.expect_stderr(r'^load: Syntax error at {}: '.format(error_pos)) def is_subseq(x, y): """Checks if x is a subsequence (not substring) of y.""" # from https://stackoverflow.com/a/24017747/4400896 it = iter(y) return all(c in it for c in x) @pytest.mark.parametrize("layout", [ "(clients max:0)", "(clients grid:0)", " ( clients vertical:0 )", "(split horizontal:0.3:0)", "(split vertical:0.3:0 (clients horizontal:0))", "(split vertical:0.3:0 (split vertical:0.4:1))", ]) @pytest.mark.parametrize('num_splits_before', [0, 1, 2]) def test_valid_layout_syntax_partial_layouts(hlwm, layout, num_splits_before): for i in range(0, num_splits_before): hlwm.call('split explode') # load the layout that defines the layout tree only partially hlwm.call(['load', layout]) # The new layout is the old layout with some '(clients …)' (and theoretically # even '(split…)') subtrees inserted. assert is_subseq(layout.replace(' ', ''), hlwm.call('dump').stdout) @pytest.mark.parametrize( "layout", [ # with window ID placeholders 'W' "(clients max:0 W)", "(clients max:1 W W)", "(split horizontal:0.9:0 (split vertical:0.5:1 (clients max:0) (clients grid:0)) (clients horizontal:0))", "(split vertical:0.4:1 (clients max:2 W W W) (clients grid:0 W))", ]) def test_full_layouts(hlwm, layout): clients = [hlwm.create_client() for k in range(0, layout.count('W'))] for winid, _ in clients: # replace the next W by the window ID layout = layout.replace('W', winid, 1) p = hlwm.call(['load', layout]) assert p.stdout == '' assert layout == hlwm.call('dump').stdout verify_frame_objects_via_dump(hlwm) @pytest.mark.parametrize("layout", [ "(clients horizontal:0 0234)", "(clients vertical:0 0x2343)", "(clients vertical:0 1713)", ]) def test_load_invalid_winids(hlwm, layout): message = 'load: Warning: Unknown window IDs' p = hlwm.call(['load', layout], allowed_stderr=re.compile(message)) \ assert p.stderr.startswith(message) @pytest.mark.parametrize( "running_clients_num,focus", [(n, f) for n in [1, 3] for f in range(0, n)]) def test_focus_client_via_load(hlwm, running_clients, running_clients_num, focus): layout = '(clients horizontal:{} {})'.format( focus, ' '.join(running_clients)) hlwm.call(['load', layout]) assert hlwm.call('dump').stdout == layout assert hlwm.get_attr('clients.focus.winid') == running_clients[focus] @pytest.mark.parametrize( "running_clients_num,num_bring", [(n, f) for n in [1, 3] for f in range(0, n + 1)]) def test_load_brings_windows(hlwm, running_clients, running_clients_num, num_bring): hlwm.call('add other') layout = '(clients horizontal:0{}{})'.format( (' ' if num_bring > 0 else ''), ' '.join(running_clients[0:num_bring])) assert int(hlwm.get_attr('tags.0.client_count')) \ == len(running_clients) assert int(hlwm.get_attr('tags.1.client_count')) == 0 hlwm.call(['load', 'other', layout]) assert int(hlwm.get_attr('tags.0.client_count')) == \ len(running_clients) - num_bring assert int(hlwm.get_attr('tags.1.client_count')) == num_bring assert hlwm.call('dump other').stdout == layout def test_load_invalid_tag(hlwm): hlwm.call_xfail(['load', 'invalidtagname', '(clients vertical:0)']) \ .expect_stderr(r'Tag.*not found') def test_fraction_precision(hlwm): values = [ '0.4', '0.305', '0.8987', '0.5', '0.4001' ] layout_format = '(split horizontal:{}:0 (clients max:0) (clients max:0))' for v in values: layout = layout_format.format(v) hlwm.call(['load', layout]) assert hlwm.call('dump').stdout == layout def test_fraction_precision_outside_range(hlwm): # here, we test the decimal i/o for values that are outside # of the allowed frame-split-ratio. This test only makes sense # because we know that in FrameParser::buildTree(), the already # parsed decimal is used for the error message values = [ '0.098', '-0.098', '-0.5', '12.43', '-110.01', ] layout_format = '(split horizontal:{}:0 (clients max:0) (clients max:0))' for v in values: layout = layout_format.format(v) hlwm.call_xfail(['load', layout]) \ .expect_stderr('but actually is ' + v) def test_load_floating_client(hlwm): winid, _ = hlwm.create_client() hlwm.call(f'set_attr clients.{winid}.floating true') hlwm.call('set_layout max') assert hlwm.call('dump').stdout.rstrip() == '(clients max:0)' # suck the client into the frame tree layout = f'(clients max:0 {winid})' hlwm.call(['load', layout]) assert hlwm.call('dump').stdout.rstrip() == layout assert hlwm.get_attr(f'clients.{winid}.floating') == 'false' @pytest.mark.parametrize("othertag,minimized", [ # all combinations where at least one of the flags is True # such that it is not in the tiling layer of the first tag yet # and such that it is invisible initially (True, True), (True, False), (False, True) ]) @pytest.mark.parametrize("floating", [True, False]) def test_load_minimized_client(hlwm, othertag, minimized, floating): if othertag: hlwm.call('add othertag') hlwm.call('rule tag=othertag') winid, _ = hlwm.create_client() hlwm.call(f'set_attr clients.{winid}.minimized {hlwm.bool(minimized)}') hlwm.call(f'set_attr clients.{winid}.floating {hlwm.bool(floating)}') assert hlwm.get_attr(f'clients.{winid}.visible') == 'false' # ensure the client is not yet in the tiling layer hlwm.call('set_layout max') assert hlwm.call('dump').stdout.rstrip() == '(clients max:0)' layout = f'(clients max:0 {winid})' hlwm.call(['load', layout]) assert hlwm.call('dump').stdout.rstrip() == layout assert hlwm.get_attr(f'clients.{winid}.visible') == 'true' assert hlwm.get_attr(f'clients.{winid}.minimized') == 'false' assert hlwm.get_attr(f'clients.{winid}.floating') == 'false' def test_load_completion(hlwm): tagname = 'newtag' hlwm.call(f'add {tagname}') assert tagname in hlwm.complete(['load']) assert tagname not in hlwm.complete(['load', tagname]) hlwm.command_has_all_args(['load', '(clients ...)']) hlwm.command_has_all_args(['load', tagname, 'bar'])
3,362
2,542
<reponame>gridgentoo/ServiceFabricAzure // ------------------------------------------------------------ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License (MIT). See License.txt in the repo root for license information. // ------------------------------------------------------------ #pragma once #include <string.h> #include <stdio.h> #include <stdlib.h> #include <ktl.h> #include <ktrace.h> #include <ktllogger.h> #include <klogicallog.h> #include <bldver.h> #include "ktlloggertests.h" #include "LWTtests.h" #include "rwtstress.h" #include "CloseSync.h" #include "ControllerHost.h" #include "llworkload.h" #include "workerasync.h" # define VERIFY_IS_TRUE(__condition, ...) if (! (__condition)) { DebugBreak(); } #ifdef KInvariant #undef KInvariant #endif #define KInvariant(x) VERIFY_IS_TRUE( (x) ? TRUE : FALSE ) #include "TestUtil.h" #if KTL_USER_MODE #define _printf(...) printf(__VA_ARGS__) // #define _printf(...) KDbgPrintf(__VA_ARGS__) extern volatile LONGLONG gs_AllocsRemaining; #else #define _printf(...) KDbgPrintf(__VA_ARGS__) #define wprintf(...) KDbgPrintf(__VA_ARGS__) #endif #define ALLOCATION_TAG 'LLKT' typedef struct { ULONG RecordSize; BOOLEAN WriteDedicatedOnly; ULONG NumberStreams; ULONG TimeToRunInSeconds; ULONG WaitTimerInMs; WCHAR LogContainerDirectory[MAX_PATH]; WCHAR LogStreamDirectory[MAX_PATH]; } LWTParameters; VOID CreateGuidPathname( __in PWCHAR Path, __out KString::SPtr& FullPathname ) { BOOL b; KGuid guid; KString::SPtr path; KString::SPtr filename; path = KString::Create(*g_Allocator, MAX_PATH); VERIFY_IS_TRUE((path != nullptr) ? true : false); filename = KString::Create(*g_Allocator, MAX_PATH); VERIFY_IS_TRUE((filename != nullptr) ? true : false); b = path->CopyFrom(Path, TRUE); VERIFY_IS_TRUE(b ? true : false); guid. CreateNew(); b = filename->FromGUID(guid); VERIFY_IS_TRUE(b ? true : false); b = filename->Concat(KStringView(L".log")); VERIFY_IS_TRUE(b ? true : false); b = path->Concat(KStringView(L"\\")); VERIFY_IS_TRUE(b ? true : false); b = path->Concat(*filename, TRUE); VERIFY_IS_TRUE(b ? true : false); FullPathname = Ktl::Move(path); } // // This test will allow any number of streams to perform a long running // workload. The workload will write fixed or random size data and truncate // periodically. // // Future iterations may check the size of the log to ensure it is not // too large, perform record reads while writing, etc. // #include "LWTAsync.h" VOID LongRunningWriteTest( LWTParameters* params, KtlLogManager::SPtr logManager ) { ULONG NumberStreams = params->NumberStreams; const ULONG NumberWriteStress = params->NumberStreams; const ULONG MaxWriteRecordSize = params->RecordSize; const ULONGLONG AllowedLogSpace = 25165824; const ULONGLONG MaxWriteAsn = AllowedLogSpace * 64; const LONGLONG StreamSize = MaxWriteAsn / 2; const ULONG WaitTimerInMs = params->WaitTimerInMs; static const NumberWriteStressMax = 512; KInvariant(NumberWriteStress <= NumberWriteStressMax); LongRunningWriteStress::SPtr writeStress[NumberWriteStressMax]; KSynchronizer writeSyncs[NumberWriteStressMax]; KtlLogStreamId logStreamId[NumberWriteStressMax]; KtlLogStream::SPtr logStream[NumberWriteStressMax]; NTSTATUS status; KSynchronizer sync; // // Configure logger with no throttling limits // KtlLogManager::MemoryThrottleLimits* memoryThrottleLimits; KtlLogManager::AsyncConfigureContext::SPtr configureContext; KBuffer::SPtr inBuffer; KBuffer::SPtr outBuffer; ULONG result; status = logManager->CreateAsyncConfigureContext(configureContext); VERIFY_IS_TRUE(NT_SUCCESS(status)); status = KBuffer::Create(sizeof(KtlLogManager::MemoryThrottleLimits), inBuffer, *g_Allocator); VERIFY_IS_TRUE(NT_SUCCESS(status)); // // Configure defaults // memoryThrottleLimits = (KtlLogManager::MemoryThrottleLimits*)inBuffer->GetBuffer(); memoryThrottleLimits->WriteBufferMemoryPoolMax = KtlLogManager::MemoryThrottleLimits::_NoLimit; memoryThrottleLimits->WriteBufferMemoryPoolMin = KtlLogManager::MemoryThrottleLimits::_NoLimit; memoryThrottleLimits->WriteBufferMemoryPoolPerStream = KtlLogManager::MemoryThrottleLimits::_DefaultWriteBufferMemoryPoolPerStream; memoryThrottleLimits->PinnedMemoryLimit = KtlLogManager::MemoryThrottleLimits::_NoLimit; memoryThrottleLimits->PeriodicFlushTimeInSec = KtlLogManager::MemoryThrottleLimits::_DefaultPeriodicFlushTimeInSec; memoryThrottleLimits->PeriodicTimerIntervalInSec = KtlLogManager::MemoryThrottleLimits::_DefaultPeriodicTimerIntervalInSec; memoryThrottleLimits->AllocationTimeoutInMs = KtlLogManager::MemoryThrottleLimits::_DefaultAllocationTimeoutInMs; memoryThrottleLimits->MaximumDestagingWriteOutstanding = KtlLogManager::MemoryThrottleLimits::_NoLimit; configureContext->Reuse(); configureContext->StartConfigure(KtlLogManager::ConfigureMemoryThrottleLimits2, inBuffer.RawPtr(), result, outBuffer, NULL, sync); status = sync.WaitForCompletion(); VERIFY_IS_TRUE(NT_SUCCESS(status)); KGuid logContainerGuid; KtlLogContainerId logContainerId; ContainerCloseSynchronizer closeContainerSync; KtlLogContainer::SPtr logContainer; KGuid logStreamGuid; ULONG metadataLength = 0x10000; StreamCloseSynchronizer closeStreamSync; // // Create container // KString::SPtr containerPath; CreateGuidPathname(params->LogContainerDirectory, containerPath); KtlLogManager::AsyncCreateLogContainer::SPtr createContainerAsync; LONGLONG logSize = 0x10000000; // 0x200000000; // 8GB logContainerGuid.CreateNew(); logContainerId = static_cast<KtlLogContainerId>(logContainerGuid); status = logManager->CreateAsyncCreateLogContainerContext(createContainerAsync); VERIFY_IS_TRUE(NT_SUCCESS(status)); createContainerAsync->StartCreateLogContainer(*containerPath, logContainerId, logSize, 0, // Max Number Streams 0, // Max Record Size logContainer, NULL, // ParentAsync sync); status = sync.WaitForCompletion(); VERIFY_IS_TRUE(NT_SUCCESS(status)); KtlLogContainer::AsyncCreateLogStreamContext::SPtr createStreamAsync; status = logContainer->CreateAsyncCreateLogStreamContext(createStreamAsync); VERIFY_IS_TRUE(NT_SUCCESS(status)); KBuffer::SPtr securityDescriptor = nullptr; for (ULONG i = 0; i < NumberStreams; i++) { KString::SPtr streamPath; CreateGuidPathname(params->LogStreamDirectory, streamPath); logStreamGuid.CreateNew(); logStreamId[i] = static_cast<KtlLogStreamId>(logStreamGuid); createStreamAsync->Reuse(); createStreamAsync->StartCreateLogStream(logStreamId[i], KLogicalLogInformation::GetLogicalLogStreamType(), nullptr, // Alias KString::CSPtr(streamPath.RawPtr()), securityDescriptor, metadataLength, StreamSize, 1024*1024, // 1MB KtlLogManager::FlagSparseFile, logStream[i], NULL, // ParentAsync sync); status = sync.WaitForCompletion(); VERIFY_IS_TRUE(NT_SUCCESS(status)); } printf("Starting Asyncs\n"); KDbgPrintf("Starting Asyncs\n"); for (ULONG i = 0; i < NumberWriteStress; i++) { LongRunningWriteStress::StartParameters params1; status = LongRunningWriteStress::Create(*g_Allocator, KTL_TAG_TEST, writeStress[i]); VERIFY_IS_TRUE(NT_SUCCESS(status)); params1.LogStream = logStream[i]; params1.MaxRandomRecordSize = MaxWriteRecordSize; params1.LogSpaceAllowed = AllowedLogSpace; params1.HighestAsn = MaxWriteAsn; params1.UseFixedRecordSize = TRUE; params1.WaitTimerInMs = WaitTimerInMs; writeStress[i]->StartIt(&params1, NULL, writeSyncs[i]); } // // Time bound how long this test runs // KTimer::SPtr timer; status = KTimer::Create(timer, *g_Allocator, KTL_TAG_TEST); VERIFY_IS_TRUE(NT_SUCCESS(status)); timer->StartTimer(params->TimeToRunInSeconds * 1000, NULL, sync); status = sync.WaitForCompletion(); VERIFY_IS_TRUE(NT_SUCCESS(status)); printf("Shutting down Asyncs\n"); KDbgPrintf("Shutting down Asyncs\n"); ULONGLONG bytes = 0; ULONGLONG a; for (ULONG i = 0; i < NumberWriteStress; i++) { a = writeStress[i]->ForceCompletion(); bytes += a; } ULONGLONG bytesPerSec = bytes / params->TimeToRunInSeconds; printf("%I64d bytes or %I64d per second\n", bytes, bytesPerSec); KDbgPrintf("%I64d bytes or %I64d per second\n", bytes, bytesPerSec); ULONG completed = NumberWriteStress; while (completed > 0) { for (ULONG i = 0; i < NumberWriteStress; i++) { status = writeSyncs[i].WaitForCompletion(60 * 1000); if (status == STATUS_IO_TIMEOUT) { printf("writeStress[%d]: %I64x Version, %I64x Asn, %I64x TruncationAsn\n", i, writeStress[i]->GetVersion(), writeStress[i]->GetAsn(), writeStress[i]->GetTruncationAsn()); } else { VERIFY_IS_TRUE(NT_SUCCESS(status)); completed--; } } } for (ULONG i = 0; i < NumberStreams; i++) { logStream[i]->StartClose(NULL, closeStreamSync.CloseCompletionCallback()); status = closeStreamSync.WaitForCompletion(); VERIFY_IS_TRUE(NT_SUCCESS(status)); } logContainer->StartClose(NULL, closeContainerSync.CloseCompletionCallback()); status = closeContainerSync.WaitForCompletion(); VERIFY_IS_TRUE(NT_SUCCESS(status)); KtlLogManager::AsyncDeleteLogContainer::SPtr deleteContainerAsync; status = logManager->CreateAsyncDeleteLogContainerContext(deleteContainerAsync); VERIFY_IS_TRUE(NT_SUCCESS(status)); deleteContainerAsync->StartDeleteLogContainer(*containerPath, logContainerId, NULL, // ParentAsync sync); status = sync.WaitForCompletion(); VERIFY_IS_TRUE(NT_SUCCESS(status)); } VOID SetupTests( ) { NTSTATUS status; KtlSystem* System; EventRegisterMicrosoft_Windows_KTL(); status = KtlSystem::Initialize(FALSE, // Do not enable VNetwork, we don't need it &System); VERIFY_IS_TRUE(NT_SUCCESS(status), "KtlSystemInitialize"); g_Allocator = &KtlSystem::GlobalNonPagedAllocator(); KDbgMirrorToDebugger = TRUE; System->SetStrictAllocationChecks(TRUE); #ifdef UDRIVER // // For UDRIVER, need to perform work done in PNP Device Add // status = FileObjectTable::CreateAndRegisterOverlayManager(*g_Allocator, ALLOCATION_TAG); VERIFY_IS_TRUE(NT_SUCCESS(status), "CreateAndRegisterOverlayManager"); #endif #if defined(KDRIVER) || defined(UPASSTHROUGH) || defined(DAEMON) // // For kernel, assume driver already installed by InstallForCITs // #endif } VOID CleanupTests( ) { #ifdef UDRIVER // // For UDRIVER need to perform work done in PNP RemoveDevice // NTSTATUS status; status = FileObjectTable::StopAndUnregisterOverlayManager(*g_Allocator); KInvariant(NT_SUCCESS(status)); #endif #if defined(KDRIVER) || defined(UPASSTHROUGH) || defined(DAEMON) // // For kernel, assume driver already installed by InstallForCITs // #endif EventUnregisterMicrosoft_Windows_KTL(); KtlSystem::Shutdown(); } VOID Usage() { printf("LWTPerf - long write test performance\n"); printf(" -r:<record size in bytes>\n"); printf(" -f:<directory to log container file>\n"); printf(" -s:<directory to log stream files>\n"); printf(" -d:<true to write to dedicated log only>\n"); printf(" -n:<Number streams>\n"); printf(" -t:<TimeToRunInSeconds>\n"); printf(" -w:<WaitTimerInMs between writes>\n"); printf("\n"); printf(" Note that path for log directories must be an absolute path in the form of \\??\\c:\\temp\\\n"); printf("\n"); } NTSTATUS ParseLWTParameters( int argc, wchar_t** args, LWTParameters* params ) { // // Setup defaults // params->RecordSize = 512 * 1024; params->WriteDedicatedOnly = FALSE; params->NumberStreams = 1; params->TimeToRunInSeconds = 120; *params->LogContainerDirectory = 0; *params->LogStreamDirectory = 0; // // Parse Parameters // for (int i = 1; i < argc; i++) { PWCHAR a = args[i]; WCHAR option; PWCHAR value; size_t len; len = wcslen(a); if ((len < 3) || (a[0] != L'-') || (a[2] != L':')) { printf("Invalid Parameter %ws\n\n", a); return(STATUS_INVALID_PARAMETER); } option = towlower(a[1]); value = &a[3]; switch(option) { case L'r': { ULONG x; x = _wtoi(value); if (x == 0) { printf("Invalid parameter value %ws\n\n", a); return(STATUS_INVALID_PARAMETER); } params->RecordSize = x; break; } case L'f': { StringCchCopy(params->LogContainerDirectory, MAX_PATH, value); break; } case L's': { StringCchCopy(params->LogStreamDirectory, MAX_PATH, value); break; } case L'd': { if (_wcsnicmp(value, L"false", (sizeof(L"false") / sizeof(WCHAR))) == 0) { params->WriteDedicatedOnly = FALSE; } else if (_wcsnicmp(value, L"true", (sizeof(L"true") / sizeof(WCHAR))) == 0) { params->WriteDedicatedOnly = TRUE; } else { printf("Invalid parameter value %ws\n\n", a); return(STATUS_INVALID_PARAMETER); } break; } case L'n': { ULONG x; x = _wtoi(value); if (x == 0) { printf("Invalid parameter value %ws\n\n", a); return(STATUS_INVALID_PARAMETER); } params->NumberStreams = x; break; } case L't': { ULONG x; x = _wtoi(value); if (x == 0) { printf("Invalid parameter value %ws\n\n", a); return(STATUS_INVALID_PARAMETER); } params->TimeToRunInSeconds = x; break; } case L'w': { ULONG x; x = _wtoi(value); if (x == 0) { printf("Invalid parameter value %ws\n\n", a); return(STATUS_INVALID_PARAMETER); } params->WaitTimerInMs = x; break; } default: { printf("Invalid Parameter %ws\n\n", a); return(STATUS_INVALID_PARAMETER); } } } if ((*params->LogContainerDirectory == 0) || (*params->LogStreamDirectory == 0)) { printf("Invalid Parameters - must specify fully qualified log container path and log stream path of the form \\??\\x:\\Directory\\\n\n"); return(STATUS_INVALID_PARAMETER); } return(STATUS_SUCCESS); } int wmain(int argc, wchar_t** args) { NTSTATUS status = STATUS_SUCCESS; LWTParameters params; printf("LongWriteTest Logger Performance test\n"); status = ParseLWTParameters(argc, args, &params); if (! NT_SUCCESS(status)) { Usage(); return(status); } printf("Log Container Path : %ws\n", params.LogContainerDirectory); printf("Log Stream Path : %ws\n", params.LogStreamDirectory); printf("RecordSize : %d 0x%x\n", params.RecordSize, params.RecordSize); printf("WriteToDedicatedOnly : %s\n", params.WriteDedicatedOnly ? "true" : "false"); printf("Number Streams : %d\n", params.NumberStreams); printf("TimeToRunInSeconds : %d\n", params.TimeToRunInSeconds); printf("WaitTimerInMs : %d\n", params.WaitTimerInMs); printf("\n"); SetupTests(); { KtlLogManager::SPtr logManager; KServiceSynchronizer serviceSync; #ifdef UPASSTHROUGH status = KtlLogManager::CreateInproc(ALLOCATION_TAG, *g_Allocator, logManager); #else status = KtlLogManager::CreateDriver(ALLOCATION_TAG, *g_Allocator, logManager); #endif VERIFY_IS_TRUE(NT_SUCCESS(status)); status = logManager->StartOpenLogManager(NULL, // ParentAsync serviceSync.OpenCompletionCallback()); VERIFY_IS_TRUE(NT_SUCCESS(status)); status = serviceSync.WaitForCompletion(); VERIFY_IS_TRUE(NT_SUCCESS(status)); LongRunningWriteTest(&params, logManager); // // Now close the log manager and verify that it closes promptly. // status = logManager->StartCloseLogManager(NULL, // ParentAsync serviceSync.CloseCompletionCallback()); VERIFY_IS_TRUE(NT_SUCCESS(status)); status = serviceSync.WaitForCompletion(); VERIFY_IS_TRUE(NT_SUCCESS(status)); logManager= nullptr; } CleanupTests(); return(status); }
9,582
416
<gh_stars>100-1000 // This file is distributed under the MIT license. // See the LICENSE file for details. #include <visionaray/math/intersect.h> // hit_record #include <visionaray/get_color.h> using namespace visionaray; int main() { #if defined GET_COLOR_FLOAT4 vector<3, float>* colors; hit_record<basic_ray<simd::float4>, primitive<unsigned>> hr; vector<3, simd::float4> c = get_color(colors, hr, primitive<unsigned>{}, colors_per_face_binding{}); #elif defined GET_COLOR_FLOAT8 vector<3, float>* colors; hit_record<basic_ray<simd::float8>, primitive<unsigned>> hr; vector<3, simd::float8> c = get_color(colors, hr, primitive<unsigned>{}, colors_per_face_binding{}); #elif defined GET_COLOR_INT4 vector<3, float>* colors; hit_record<basic_ray<simd::int4>, primitive<unsigned>> hr; auto c = get_color(colors, hr, primitive<unsigned>{}, colors_per_face_binding{}); #elif defined GET_COLOR_MASK4 vector<3, float>* colors; hit_record<basic_ray<simd::mask4>, primitive<unsigned>> hr; auto c = get_color(colors, hr, primitive<unsigned>{}, colors_per_face_binding{}); #elif defined GET_COLOR_TRI_FLOAT4 vector<3, float>* colors; hit_record<basic_ray<simd::float4>, primitive<unsigned>> hr; vector<3, simd::float4> c = get_color(colors, hr, basic_triangle<3, float, unsigned>{}, colors_per_vertex_binding{}); #elif defined GET_COLOR_TRI_FLOAT8 vector<3, float>* colors; hit_record<basic_ray<simd::float8>, primitive<unsigned>> hr; vector<3, simd::float8> c = get_color(colors, hr, basic_triangle<3, float, unsigned>{}, colors_per_vertex_binding{}); #elif defined GET_COLOR_TRI_INT4 vector<3, float>* colors; hit_record<basic_ray<simd::int4>, primitive<unsigned>> hr; auto c = get_color(colors, hr, basic_triangle<3, float, unsigned>{}, colors_per_vertex_binding{}); #elif defined GET_COLOR_TRI_MASK4 vector<3, float>* colors; hit_record<basic_ray<simd::mask4>, primitive<unsigned>> hr; auto c = get_color(colors, hr, basic_triangle<3, float, unsigned>{}, colors_per_vertex_binding{}); #endif return 0; }
821
348
{"nom":"Castelnou","circ":"4ème circonscription","dpt":"Pyrénées-Orientales","inscrits":331,"abs":170,"votants":161,"blancs":0,"nuls":27,"exp":134,"res":[{"nuance":"REM","nom":"<NAME>","voix":82},{"nuance":"FN","nom":"<NAME>","voix":52}]}
95
419
<gh_stars>100-1000 #pragma once #include "EntityComponent.h" #include "System/Core/Math/BoundingVolumes.h" #include "System/Core/Math/Transform.h" //------------------------------------------------------------------------- namespace KRG { class KRG_ENGINE_CORE_API SpatialEntityComponent : public EntityComponent { KRG_REGISTER_ENTITY_COMPONENT( SpatialEntityComponent ); friend class Entity; friend class EntityDebugView; friend class EntityModel::EntityMapEditor; friend class EntityModel::EntityCollection; struct AttachmentSocketTransformResult { AttachmentSocketTransformResult( Matrix transform ) : m_transform( transform ) {} Matrix m_transform; bool m_wasFound = false; }; public: // Spatial data //------------------------------------------------------------------------- inline bool IsRootComponent() const { return m_pSpatialParent == nullptr; } inline bool HasChildren() const { return !m_spatialChildren.empty(); } inline Transform const& GetLocalTransform() const { return m_transform; } inline OBB const& GetLocalBounds() const { return m_bounds; } inline Transform const& GetWorldTransform() const { return m_worldTransform; } inline OBB const& GetWorldBounds() const { return m_worldBounds; } // Get world space position inline Vector const& GetPosition() const { return m_worldTransform.GetTranslation(); } // Get world space orientation inline Quaternion const& GetOrientation() const { return m_worldTransform.GetRotation(); } // Get world space forward vector inline Vector GetForwardVector() const { return m_worldTransform.GetForwardVector(); } // Get world space up vector inline Vector GetUpVector() const { return m_worldTransform.GetUpVector(); } // Get world space right vector inline Vector GetRightVector() const { return m_worldTransform.GetRightVector(); } // Call to update the local transform - this will also update the world transform for this component and all children inline void SetLocalTransform( Transform const& newTransform ) { m_transform = newTransform; CalculateWorldTransform(); } // Call to update the world transform - this will also updated the local transform for this component and all children's world transforms inline void SetWorldTransform( Transform const& newTransform ) { SetWorldTransformDirectly( newTransform ); } // Move the component by the specified delta transform inline void MoveByDelta( Transform const& deltaTransform ) { KRG_ASSERT( !deltaTransform.HasScale() ); Transform const newWorldTransform = deltaTransform * GetWorldTransform(); SetWorldTransform( newWorldTransform ); } inline bool HasSpatialParent() const { return m_pSpatialParent != nullptr; } inline ComponentID GetSpatialParentID() const { KRG_ASSERT( HasSpatialParent() ); return m_pSpatialParent->GetID(); } inline Transform const& GetSpatialParentWorldTransform() const { KRG_ASSERT( HasSpatialParent() ); return m_pSpatialParent->GetWorldTransform(); } int32 GetSpatialHierarchyDepth( bool limitToCurrentEntity = true ) const; // The socket that this component is attached to inline StringID GetAttachmentSocketID() const { return m_parentAttachmentSocketID; } inline void SetAttachmentSocketID( StringID socketID ) { m_parentAttachmentSocketID = socketID; } // Returns the world transform for the specified attachment socket if it exists, if it doesnt this function returns the world transform // The search children parameter controls, whether to only search this component or to also search it's children Transform GetAttachmentSocketTransform( StringID socketID ) const; // Conversion Functions //------------------------------------------------------------------------- // Convert a world transform to a component local transform inline Transform ConvertWorldTransformToLocalTransform( Transform const& worldTransform ) const { return worldTransform * m_worldTransform.GetInverse(); } // Convert a world point to a component local point inline Vector ConvertWorldPointToLocalPoint( Vector const& worldPoint ) const { return m_worldTransform.GetInverse().TransformPoint( worldPoint ); } // Convert a world direction to a component local direction inline Vector ConvertWorldVectorToLocalVector( Vector const& worldVector ) const { return m_worldTransform.GetInverse().RotateVector( worldVector ); } protected: using EntityComponent::EntityComponent; // Call to update the local bounds - this will ONLY update the world bounds for this component inline void SetLocalBounds( OBB const& newBounds ) { m_bounds = newBounds; m_worldBounds = m_bounds.GetTransformed( m_worldTransform ); } // Try to find and return the world space transform for the specified socket bool TryGetAttachmentSocketTransform( StringID socketID, Transform& outSocketWorldTransform ) const; // This should be implemented on each derived spatial component to find the transform of the socket if it exists // This function must always return a valid world transform in the outSocketTransform usually that of the component virtual bool TryFindAttachmentSocketTransform( StringID socketID, Transform& outSocketWorldTransform ) const; // This function should be implemented on any component that supports sockets, it will check if the specified socket exists on the component virtual bool HasSocket( StringID socketID ) const { return false; } // This function should be called whenever a socket is created/destroyed or its position is updated void NotifySocketsUpdated(); // Called whenever the world transform is updated, try to avoid doing anything expensive in this function virtual void OnWorldTransformUpdated() {} // This function allows you to directly set the world transform for a component and skip the callback. // This must be used with care and so not be exposed externally. inline void SetWorldTransformDirectly( Transform NewWorldTransform, bool triggerCallback = true ) { // Only update the transform if we have a parent, if we dont have a parent it means we are the root transform if ( m_pSpatialParent != nullptr ) { auto parentWorldTransform = m_pSpatialParent->GetAttachmentSocketTransform( m_parentAttachmentSocketID ); m_worldTransform = NewWorldTransform; m_transform = m_worldTransform * parentWorldTransform.GetInverse(); } else { m_worldTransform = NewWorldTransform; m_transform = NewWorldTransform; } // Calculate world bounds m_worldBounds = m_bounds.GetTransformed( m_worldTransform ); // Propagate the world transforms on the children - children will always have their callbacks fired! for ( auto pChild : m_spatialChildren ) { pChild->CalculateWorldTransform(); } // Should we fire the transform updated callback? if ( triggerCallback ) { OnWorldTransformUpdated(); } } private: // Called whenever the local transform is modified inline void CalculateWorldTransform( bool triggerCallback = true ) { // Only update the transform if we have a parent, if we dont have a parent it means we are the root transform if ( m_pSpatialParent != nullptr ) { auto parentWorldTransform = m_pSpatialParent->GetAttachmentSocketTransform( m_parentAttachmentSocketID ); m_worldTransform = m_transform * parentWorldTransform; } else { m_worldTransform = m_transform; } // Calculate world bounds m_worldBounds = m_bounds.GetTransformed( m_worldTransform ); // Propagate the world transforms on the children for ( auto pChild : m_spatialChildren ) { pChild->CalculateWorldTransform( triggerCallback ); } if ( triggerCallback ) { OnWorldTransformUpdated(); } } private: KRG_EXPOSE Transform m_transform; // Local space transform OBB m_bounds; // Local space bounding box Transform m_worldTransform; // World space transform (left uninitialized to catch initialization errors) OBB m_worldBounds; // World space bounding box //------------------------------------------------------------------------- SpatialEntityComponent* m_pSpatialParent = nullptr; // The component we are attached to KRG_EXPOSE StringID m_parentAttachmentSocketID; // The socket we are attached to (can be invalid) TInlineVector<SpatialEntityComponent*, 2> m_spatialChildren; // All components that are attached to us }; }
3,769
454
<filename>apps/pretrained_compound/pretrain_gnns/pretrain_attrmask.py #!/usr/bin/python #-*-coding:utf-8-*- # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Reproduction of paper Pretrain GNNs """ import os from os.path import join, exists import json import argparse import numpy as np import paddle import paddle.nn as nn import paddle.distributed as dist import pgl from pahelix.model_zoo.pretrain_gnns_model import PretrainGNNModel, AttrmaskModel from pahelix.datasets.zinc_dataset import load_zinc_dataset from pahelix.utils.splitters import RandomSplitter from pahelix.featurizers.pretrain_gnn_featurizer import AttrmaskTransformFn, AttrmaskCollateFn from pahelix.utils import load_json_config def train(args, model, train_dataset, collate_fn, opt): """ Define the training function according to the given settings, calculate the training loss. Args: args,model,train_dataset,collate_fn,opt; Returns: the average of the list loss. """ data_gen = train_dataset.get_data_loader( batch_size=args.batch_size, num_workers=args.num_workers, shuffle=True, collate_fn=collate_fn) list_loss = [] model.train() for graphs, masked_node_indice, masked_node_label in data_gen: graphs = graphs.tensor() masked_node_indice = paddle.to_tensor(masked_node_indice, 'int64') masked_node_label = paddle.to_tensor(masked_node_label, 'int64') loss = model(graphs, masked_node_indice, masked_node_label) loss.backward() opt.step() opt.clear_grad() list_loss.append(loss.numpy()) return np.mean(list_loss) def evaluate(args, model, test_dataset, collate_fn): """ Define the evaluate function In the dataset, a proportion of labels are blank. So we use a `valid` tensor to help eliminate these blank labels in both training and evaluation phase. """ data_gen = test_dataset.get_data_loader( batch_size=args.batch_size, num_workers=args.num_workers, shuffle=False, collate_fn=collate_fn) list_loss = [] model.eval() for graphs, masked_node_indice, masked_node_label in data_gen: graphs = graphs.tensor() masked_node_indice = paddle.to_tensor(masked_node_indice, 'int64') masked_node_label = paddle.to_tensor(masked_node_label, 'int64') loss = model(graphs, masked_node_indice, masked_node_label) list_loss.append(loss.numpy()) return np.mean(list_loss) def main(args): """ Call the configuration function of the compound encoder and model, build the model and load data, then start training. compound_encoder_config: a json file with the compound encoder configurations, such as dropout rate ,learning rate,num tasks and so on; model_config: a json file with the pretrain_gnn model configurations,such as dropout rate , learning rate,num tasks and so on; lr: It means the learning rate of different optimizer; AttrmaskModel: It is an unsupervised pretraining model which randomly masks the atom type of some node and then use the masked atom type as the prediction targets. """ if args.dist: dist.init_parallel_env() compound_encoder_config = load_json_config(args.compound_encoder_config) model_config = load_json_config(args.model_config) if not args.dropout_rate is None: compound_encoder_config['dropout_rate'] = args.dropout_rate model_config['dropout_rate'] = args.dropout_rate ### build model compound_encoder = PretrainGNNModel(compound_encoder_config) model = AttrmaskModel(model_config, compound_encoder) if args.dist: model = paddle.DataParallel(model) opt = paddle.optimizer.Adam(args.lr, parameters=model.parameters()) if not args.init_model is None and not args.init_model == "": compound_encoder.set_state_dict(paddle.load(args.init_model)) print('Load state_dict from %s' % args.init_model) ### load data dataset = load_zinc_dataset(args.data_path) splitter = RandomSplitter() train_dataset, _, test_dataset = splitter.split( dataset, frac_train=0.9, frac_valid=0.0, frac_test=0.1, seed=32) if args.dist: train_dataset = train_dataset[dist.get_rank()::dist.get_world_size()] transform_fn = AttrmaskTransformFn() train_dataset.transform(transform_fn, num_workers=args.num_workers) test_dataset.transform(transform_fn, num_workers=args.num_workers) print("Train/Test num: %s/%s" % (len(train_dataset), len(test_dataset))) ### start train collate_fn = AttrmaskCollateFn( atom_names=compound_encoder_config['atom_names'], bond_names=compound_encoder_config['bond_names'], mask_ratio=model_config['mask_ratio']) for epoch_id in range(args.max_epoch): train_loss = train(args, model, train_dataset, collate_fn, opt) test_loss = evaluate(args, model, test_dataset, collate_fn) if not args.dist or dist.get_rank() == 0: print("epoch:%d train/loss:%s" % (epoch_id, train_loss)) print("epoch:%d test/loss:%s" % (epoch_id, test_loss)) paddle.save(compound_encoder.state_dict(), '%s/epoch%d/compound_encoder.pdparams' % (args.model_dir, epoch_id)) paddle.save(model.state_dict(), '%s/epoch%d/model.pdparams' % (args.model_dir, epoch_id)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("--dist", action='store_true', default=False) parser.add_argument("--batch_size", type=int, default=256) parser.add_argument("--num_workers", type=int, default=4) parser.add_argument("--max_epoch", type=int, default=100) parser.add_argument("--data_path", type=str, default=None) parser.add_argument("--compound_encoder_config", type=str) parser.add_argument("--model_config", type=str) parser.add_argument("--init_model", type=str) parser.add_argument("--model_dir", type=str) parser.add_argument("--lr", type=float, default=0.001) parser.add_argument("--dropout_rate", type=float) args = parser.parse_args() main(args)
2,758
886
/* * Copyright 2020 flow.ci * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.flowci.core.agent.domain; import com.flowci.exception.ArgumentException; import lombok.Getter; import lombok.Setter; import javax.validation.constraints.Max; import javax.validation.constraints.Min; import javax.validation.constraints.NotEmpty; import javax.validation.constraints.NotNull; import java.util.HashSet; import java.util.Set; @Getter @Setter public class AgentHostOption { private String id; private Set<String> tags = new HashSet<>(); private int exitOnIdle; @NotNull private AgentHost.Type type; @NotEmpty private String name; private String secret; private String user; private String ip; private String namespace; @Min(1) @Max(Integer.MAX_VALUE) private int port = 22; @Min(1) @Max(Integer.MAX_VALUE) private int maxSize = 5; public AgentHost toObj() { if (type == AgentHost.Type.SSH) { SshAgentHost host = new SshAgentHost(); host.setId(id); host.setName(name); host.setTags(tags); host.setSecret(secret); host.setUser(user); host.setIp(ip); host.setMaxSize(maxSize); host.setPort(port); host.setExitOnIdle(exitOnIdle); return host; } if (type == AgentHost.Type.LocalUnixSocket) { LocalUnixAgentHost host = new LocalUnixAgentHost(); host.setId(id); host.setName(name); host.setTags(tags); host.setMaxSize(maxSize); host.setExitOnIdle(exitOnIdle); return host; } if (type == AgentHost.Type.K8s) { K8sAgentHost host = new K8sAgentHost(); host.setId(id); host.setName(name); host.setTags(tags); host.setSecret(secret); host.setNamespace(namespace); host.setMaxSize(maxSize); host.setExitOnIdle(exitOnIdle); return host; } throw new ArgumentException("Unsupported host type"); } }
1,134
456
// SPDX-License-Identifier: BSD-3-Clause // Copyright (c) 2019-2020 <NAME> // All rights reserved. #pragma once #include <djvScene3D/IPrimitive.h> #include <djvGeom/PointList.h> namespace djv { namespace Scene3D { //! Poly-line primitive. class PolyLinePrimitive : public IPrimitive { DJV_NON_COPYABLE(PolyLinePrimitive); protected: PolyLinePrimitive(); public: static std::shared_ptr<PolyLinePrimitive> create(); void setPointLists(const std::vector<std::shared_ptr<Geom::PointList> >&); void addPointList(const std::shared_ptr<Geom::PointList>&); std::string getClassName() const override; bool isShaded() const override; const std::vector<std::shared_ptr<Geom::PointList> >& getPolyLines() const override; size_t getPointCount() const override; private: std::vector<std::shared_ptr<Geom::PointList> > _pointLists; size_t _pointCount = 0; }; } // namespace Scene3D } // namespace djv #include <djvScene3D/PolyLinePrimitiveInline.h>
511
831
/* * Copyright (C) 2016 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.android.tools.idea.uibuilder.api.actions; import com.android.tools.idea.common.model.NlComponent; import com.android.tools.idea.uibuilder.api.ViewEditor; import com.android.tools.idea.uibuilder.api.ViewHandler; import org.intellij.lang.annotations.JdkConstants.InputEventMask; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import javax.swing.*; import java.util.ArrayList; import java.util.List; /** A separator between actions */ public final class ViewActionSeparator extends AbstractViewAction { private List<ViewAction> myFollowingActions = new ArrayList<>(); /** * Creates a separator */ public ViewActionSeparator() { super(null, ""); } /** * Setup {@link ViewActionSeparator} with a list of actions following the separator. * Use this method to omit separators when all the following actions are invisible. * There is no need to call this method if actions are never invisible. * * @param actions the actions for e.g. a toolbar that have separators between actions * that may be invisible. */ public static void setupFollowingActions(@NotNull List<ViewAction> actions) { ViewActionSeparator separator = null; for (ViewAction action : actions) { if (action instanceof ViewActionSeparator) { separator = (ViewActionSeparator)action; } else if (separator != null) { separator.addFollowingAction(action); } if (action instanceof NestedViewActionMenu) { List<ViewAction> nested = new ArrayList<>(); ((NestedViewActionMenu)action).getActions().forEach(list -> nested.addAll(list)); setupFollowingActions(nested); } } } /** * Register action following this separator. */ private void addFollowingAction(@NotNull ViewAction action) { myFollowingActions.add(action); } @Override public void updatePresentation(@NotNull ViewActionPresentation presentation, @NotNull ViewEditor editor, @NotNull ViewHandler handler, @NotNull NlComponent component, @NotNull List<NlComponent> selectedChildren, @InputEventMask int modifiersEx) { } @Override public void perform(@NotNull ViewEditor editor, @NotNull ViewHandler handler, @NotNull NlComponent component, @NotNull List<NlComponent> selectedChildren, int modifiers) { } public boolean isVisible(@NotNull ViewEditor editor, @NotNull ViewHandler handler, @NotNull NlComponent component, @NotNull List<NlComponent> selectedChildren) { // If there are no following actions: assume that setupFollowingActions // was never called, and thus we should not hide separators. if (myFollowingActions.isEmpty()) { return true; } // The separator is visible if at least one of the following actions are visible: for (ViewAction action : myFollowingActions) { SeparatorPresentation following = new SeparatorPresentation(); action.updatePresentation(following, editor, handler, component, selectedChildren, 0); if (following.isVisible()) { return true; } } // All following actions are invisible: Don't show the separator either. return false; } private static class SeparatorPresentation implements ViewActionPresentation { private boolean myVisible = true; @Override public void setLabel(@NotNull String label) {} @Override public void setEnabled(boolean enabled) {} @Override public void setIcon(@Nullable Icon icon) {} @Override public void setVisible(boolean visible) { myVisible = visible; } public boolean isVisible() { return myVisible; } } }
1,675
3,485
<filename>simple-rules-spel/src/test/resources/composite-rule-invalid-empty-composing-rules.json<gh_stars>1000+ [ { "name": "invalid rule", "compositeRuleType": "UnitRuleGroup", "priority": 1, "composingRules": [] } ]
98
407
package com.alibaba.tesla.appmanager.server.repository.mapper; import com.alibaba.tesla.appmanager.server.repository.domain.RtTraitInstanceDO; import com.alibaba.tesla.appmanager.server.repository.domain.RtTraitInstanceDOExample; import org.apache.ibatis.annotations.Mapper; import org.apache.ibatis.annotations.Param; import java.util.List; @Mapper public interface RtTraitInstanceDOMapper { long countByExample(RtTraitInstanceDOExample example); int deleteByExample(RtTraitInstanceDOExample example); int deleteByPrimaryKey(Long id); int insertSelective(RtTraitInstanceDO record); List<RtTraitInstanceDO> selectByExample(RtTraitInstanceDOExample example); RtTraitInstanceDO selectByPrimaryKey(Long id); int updateByExampleSelective(@Param("record") RtTraitInstanceDO record, @Param("example") RtTraitInstanceDOExample example); }
283
819
<filename>PaddleCV/adversarial/advbox/models/__init__.py """ Models __init__.py """
33
852
<reponame>togawamanabu/RoboND-DeepRL-Project<filename>c/pyTorch.h /* * http://github.com/dusty-nv/jetson-reinforcement */ #ifndef __PY_TORCH_H_ #define __PY_TORCH_H_ #ifdef USE_PYTHON #include <stdio.h> #include <stdint.h> #include <Python.h> #include <TH/TH.h> #include <THC/THC.h> #include <THP.h> #include <THCP.h> #endif #endif
168
1,194
package ca.uhn.fhir.jpa.cache.config; import ca.uhn.fhir.jpa.cache.IResourceChangeListener; import ca.uhn.fhir.jpa.cache.ResourceChangeListenerCache; import ca.uhn.fhir.jpa.cache.ResourceChangeListenerCacheFactory; import ca.uhn.fhir.jpa.searchparam.SearchParameterMap; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.context.annotation.Scope; @Configuration public class RegisteredResourceListenerFactoryConfig { @Bean ResourceChangeListenerCacheFactory resourceChangeListenerCacheFactory() { return new ResourceChangeListenerCacheFactory(); } @Bean @Scope("prototype") ResourceChangeListenerCache resourceChangeListenerCache(String theResourceName, IResourceChangeListener theResourceChangeListener, SearchParameterMap theSearchParameterMap, long theRemoteRefreshIntervalMs) { return new ResourceChangeListenerCache(theResourceName, theResourceChangeListener, theSearchParameterMap, theRemoteRefreshIntervalMs); } }
288
447
<gh_stars>100-1000 """ Core auditing APIs. """ import logging from dataclasses import dataclass from typing import Iterator, List, Tuple from pip_audit._dependency_source import DependencySource from pip_audit._service import Dependency, VulnerabilityResult, VulnerabilityService logger = logging.getLogger(__name__) @dataclass(frozen=True) class AuditOptions: """ Settings the control the behavior of an `Auditor` instance. """ dry_run: bool = False class Auditor: """ The core class of the `pip-audit` API. For a given dependency source and vulnerability service, supply a mapping of dependencies to known vulnerabilities. """ def __init__( self, service: VulnerabilityService, options: AuditOptions = AuditOptions(), ): """ Create a new auditor. Auditors start with no dependencies to audit; each `audit` step is fed a `DependencySource`. The behavior of the auditor can be optionally tweaked with the `options` parameter. """ self._service = service self._options = options def audit( self, source: DependencySource ) -> Iterator[Tuple[Dependency, List[VulnerabilityResult]]]: """ Perform the auditing step, collecting dependencies from `source`. """ specs = source.collect() if self._options.dry_run: # Drain the iterator in dry-run mode. logger.info(f"Dry run: would have audited {len(list(specs))} packages") return {} else: yield from self._service.query_all(specs)
604
665
<filename>http/http_named_endpoint.h<gh_stars>100-1000 /* http_named_endpoint.h -*- C++ -*- <NAME>, 9 November 2012 Copyright (c) 2012 mldb.ai inc. All rights reserved. This file is part of MLDB. Copyright 2015 mldb.ai inc. All rights reserved. */ #pragma once #include "mldb/http/http_endpoint.h" #include "named_endpoint.h" #include "http_rest_proxy.h" #include "http_rest_endpoint.h" namespace MLDB { /*****************************************************************************/ /* HTTP NAMED ENDPOINT */ /*****************************************************************************/ /** A message loop-compatible endpoint for http connections. */ struct HttpNamedEndpoint : public NamedEndpoint, public HttpRestEndpoint { HttpNamedEndpoint(); void init(std::shared_ptr<ConfigurationService> config, const std::string & endpointName); /** Bid into a given address. Address is host:port. If no port is given (and no colon), than use any port number. If port is a number and then "+", then scan for any port higher than the given number. If host is empty or "*", then use all interfaces. */ std::string bindTcpAddress(const std::string & address); /** Bind into a specific tcp port. If the port is not available, it will throw an exception. Returns the uri to connect to. */ std::string bindTcpFixed(std::string host, int port); /** Bind into a tcp port. If the preferred port is not available, it will scan until it finds one that is. Returns the uri to connect to. */ std::string bindTcp(PortRange const & portRange, std::string host = ""); }; } // namespace MLDB
675
3,428
<reponame>ghalimi/stdlib {"id":"01362","group":"easy-ham-2","checksum":{"type":"MD5","value":"a63dddffa1f9903a480cd9aaaf6d16f3"},"text":"From <EMAIL> Fri Aug 9 15:34:57 2002\nReturn-Path: <<EMAIL>>\nDelivered-To: yyyy@<EMAIL>.netnoteinc.com\nReceived: from localhost (localhost [127.0.0.1])\n\tby phobos.labs.netnoteinc.com (Postfix) with ESMTP id 39F08440E0\n\tfor <jm@localhost>; Fri, 9 Aug 2002 10:33:42 -0400 (EDT)\nReceived: from phobos [127.0.0.1]\n\tby localhost with IMAP (fetchmail-5.9.0)\n\tfor jm@localhost (single-drop); Fri, 09 Aug 2002 15:33:42 +0100 (IST)\nReceived: from webnote.net (mail.webnote.net [193.120.211.219]) by\n dogma.slashnull.org (8.11.6/8.11.6) with ESMTP id g79EPxb06947 for\n <<EMAIL>>; Fri, 9 Aug 2002 15:25:59 +0100\nReceived: from cpu59.osdn.com (slashdot.org [192.168.127.12] (may be forged))\n by webnote.net (8.9.3/8.9.3) with ESMTP id DAA02784 for\n <<EMAIL>>; Fri, 9 Aug 2002 03:01:25 +0100\nFrom: <EMAIL>\nReceived: from [10.2.181.14] (helo=perl.org) by cpu59.osdn.com with smtp\n (Exim 3.35 #1 (Debian)) id 17cz4L-0008EV-01 for <<EMAIL>>;\n Thu, 08 Aug 2002 22:00:01 -0400\nDate: Fri, 09 Aug 2002 02:00:28 +0000\nSubject: [use Perl] Stories for 2002-08-09\nTo: y<EMAIL>int.<EMAIL>\nPrecedence: list\nX-Bulkmail: 2.051\nMessage-Id: <<EMAIL>>\n\nuse Perl Daily Newsletter\n\nIn this issue:\n * <NAME> to speak in London\n * Meeting in Budapest\n\n+--------------------------------------------------------------------+\n| <NAME> to speak in London |\n| posted by ziggy on Thursday August 08, @15:21 (damian) |\n| http://use.perl.org/article.pl?sid=02/08/08/1923211 |\n+--------------------------------------------------------------------+\n\n[0]blech writes \"<NAME> will be coming to London in August, and\nhe'll be speaking at a couple of special [1]London.pm meetings, at the\napproprately named [2]Conway Hall in [3]Central London.\n\nThe first is [4]Life, the Universe and Everything on Tuesday 27th August,\nand the second is [5]Perl 6 Prospective on Thursday 29th August. Both\nstart at 6.30 pm sharp, so we can fit in the maximum possible\nDamian-ness.\n\nWe hope to see you there!\"\n\nDiscuss this story at:\n http://use.perl.org/comments.pl?sid=02/08/08/1923211\n\nLinks:\n 0. http://london.pm.org/\n 1. http://london.pm.org/meetings/\n 2. http://www.conwayhall.org.uk/\n 3. http://www.streetmap.co.uk/streetmap.dll?G2M?X=530661&Y=181764&A=Y&Z=1\n 4. http://www.yetanother.org/damian/seminars/Life.html\n 5. http://www.yetanother.org/damian/seminars/Perl6.html\n\n\n+--------------------------------------------------------------------+\n| Meeting in Budapest |\n| posted by ziggy on Thursday August 08, @17:24 (groups) |\n| http://use.perl.org/article.pl?sid=02/08/08/1927253 |\n+--------------------------------------------------------------------+\n\n[0]<NAME> writes \"We are organizing a meeting for Perl users in\nBudapest. We'll meet on 26th August at 17:00 at a location we'll announce\nwithin a few days on [1]our mailing list. Planned agenda: How to raise\ncamels on the Hungarian Puszta.\n\nYou can read more details (in Hungarian) [2]here. Non Hungarians who\nhappen to be in the area are also welcome, though the presentation(s)\nwill be given in Hungarian.\"\n\nDiscuss this story at:\n http://use.perl.org/comments.pl?sid=02/08/08/1927253\n\nLinks:\n 0. mailto:<EMAIL>\n 1. http://www.atom.hu/mailman/listinfo/perl\n 2. http://www.atom.hu/pipermail/perl/2002-August/001150.html\n\n\n\nCopyright 1997-2002 pudge. All rights reserved.\n\n\n======================================================================\n\nYou have received this message because you subscribed to it\non use Perl. To stop receiving this and other\nmessages from use Perl, or to add more messages\nor change your preferences, please go to your user page.\n\n\thttp://use.perl.org/my/messages/\n\nYou can log in and change your preferences from there.\n\n\n"}
1,608
1,163
#!/usr/bin/env python3 # Copyright 2021 The IREE Authors # # Licensed under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception """Diffs two local benchmark result JSON files. Example usage: python3 diff_local_benchmarks.py --base=/path/to/base_benchmarks.json --target=/path/to/target_benchmarks.json """ import argparse import json import os import requests from common.benchmark_presentation import * def get_benchmark_result_markdown(base_benchmark_file: str, target_benchmark_file: str, verbose: bool = False) -> str: """Gets the full markdown summary of all benchmarks in files.""" base_benchmarks = aggregate_all_benchmarks([base_benchmark_file]) target_benchmarks = aggregate_all_benchmarks([target_benchmark_file]) # Update the target benchmarks with their corresponding base numbers. for bench in base_benchmarks: if bench in target_benchmarks: target_benchmarks[bench].base_mean_time = base_benchmarks[bench].mean_time # Compose the full benchmark tables. full_table = [md.header("Full Benchmark Summary", 2)] full_table.append(categorize_benchmarks_into_tables(target_benchmarks)) return "\n\n".join(full_table) def parse_arguments(): """Parses command-line options.""" def check_file_path(path): if os.path.isfile(path): return path else: raise ValueError(path) parser = argparse.ArgumentParser() parser.add_argument("--base", type=check_file_path, required=True, help="Base benchmark results") parser.add_argument("--target", type=check_file_path, required=True, help="Target benchmark results") parser.add_argument("--verbose", action="store_true", help="Print internal information during execution") args = parser.parse_args() return args if __name__ == "__main__": args = parse_arguments() print( get_benchmark_result_markdown(args.base, args.target, verbose=args.verbose))
978
1,546
/* * Copyright (C) 2017 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.android.launcher3.views; import static android.view.HapticFeedbackConstants.CLOCK_TICK; import static androidx.recyclerview.widget.RecyclerView.SCROLL_STATE_IDLE; import android.animation.ObjectAnimator; import android.content.Context; import android.content.res.Resources; import android.content.res.TypedArray; import android.graphics.Canvas; import android.graphics.Insets; import android.graphics.Paint; import android.graphics.Point; import android.graphics.Rect; import android.graphics.RectF; import android.os.Build; import android.util.AttributeSet; import android.util.Log; import android.util.Property; import android.view.MotionEvent; import android.view.View; import android.view.ViewConfiguration; import android.view.WindowInsets; import android.widget.TextView; import androidx.annotation.Nullable; import androidx.annotation.RequiresApi; import androidx.recyclerview.widget.RecyclerView; import com.android.launcher3.BaseRecyclerView; import com.android.launcher3.R; import com.android.launcher3.Utilities; import com.android.launcher3.graphics.FastScrollThumbDrawable; import com.android.launcher3.util.Themes; import java.util.Collections; import java.util.List; /** * The track and scrollbar that shows when you scroll the list. */ public class RecyclerViewFastScroller extends View { private static final String TAG = "RecyclerViewFastScroller"; private static final boolean DEBUG = false; private static final int FASTSCROLL_THRESHOLD_MILLIS = 40; private static final int SCROLL_DELTA_THRESHOLD_DP = 4; // Track is very narrow to target and correctly. This is especially the case if a user is // using a hardware case. Even if x is offset by following amount, we consider it to be valid. private static final int SCROLLBAR_LEFT_OFFSET_TOUCH_DELEGATE_DP = 5; private static final Rect sTempRect = new Rect(); private static final Property<RecyclerViewFastScroller, Integer> TRACK_WIDTH = new Property<RecyclerViewFastScroller, Integer>(Integer.class, "width") { @Override public Integer get(RecyclerViewFastScroller scrollBar) { return scrollBar.mWidth; } @Override public void set(RecyclerViewFastScroller scrollBar, Integer value) { scrollBar.setTrackWidth(value); } }; private final static int MAX_TRACK_ALPHA = 30; private final static int SCROLL_BAR_VIS_DURATION = 150; private static final List<Rect> SYSTEM_GESTURE_EXCLUSION_RECT = Collections.singletonList(new Rect()); private final int mMinWidth; private final int mMaxWidth; private final int mThumbPadding; /** Keeps the last known scrolling delta/velocity along y-axis. */ private int mDy = 0; private final float mDeltaThreshold; private final float mScrollbarLeftOffsetTouchDelegate; private final ViewConfiguration mConfig; // Current width of the track private int mWidth; private ObjectAnimator mWidthAnimator; private final Paint mThumbPaint; protected final int mThumbHeight; private final RectF mThumbBounds = new RectF(); private final Point mThumbDrawOffset = new Point(); private final Paint mTrackPaint; private float mLastTouchY; private boolean mIsDragging; private boolean mIsThumbDetached; private final boolean mCanThumbDetach; private boolean mIgnoreDragGesture; private boolean mIsRecyclerViewFirstChildInParent = true; private long mDownTimeStampMillis; // This is the offset from the top of the scrollbar when the user first starts touching. To // prevent jumping, this offset is applied as the user scrolls. protected int mTouchOffsetY; protected int mThumbOffsetY; protected int mRvOffsetY; // Fast scroller popup private TextView mPopupView; private boolean mPopupVisible; private String mPopupSectionName; private Insets mSystemGestureInsets; protected BaseRecyclerView mRv; private RecyclerView.OnScrollListener mOnScrollListener; @Nullable private OnFastScrollChangeListener mOnFastScrollChangeListener; private int mDownX; private int mDownY; private int mLastY; public RecyclerViewFastScroller(Context context) { this(context, null); } public RecyclerViewFastScroller(Context context, AttributeSet attrs) { this(context, attrs, 0); } public RecyclerViewFastScroller(Context context, AttributeSet attrs, int defStyleAttr) { super(context, attrs, defStyleAttr); mTrackPaint = new Paint(); mTrackPaint.setColor(Themes.getAttrColor(context, android.R.attr.textColorPrimary)); mTrackPaint.setAlpha(MAX_TRACK_ALPHA); mThumbPaint = new Paint(); mThumbPaint.setAntiAlias(true); mThumbPaint.setColor(Themes.getColorAccent(context)); mThumbPaint.setStyle(Paint.Style.FILL); Resources res = getResources(); mWidth = mMinWidth = res.getDimensionPixelSize(R.dimen.fastscroll_track_min_width); mMaxWidth = res.getDimensionPixelSize(R.dimen.fastscroll_track_max_width); mThumbPadding = res.getDimensionPixelSize(R.dimen.fastscroll_thumb_padding); mThumbHeight = res.getDimensionPixelSize(R.dimen.fastscroll_thumb_height); mConfig = ViewConfiguration.get(context); mDeltaThreshold = res.getDisplayMetrics().density * SCROLL_DELTA_THRESHOLD_DP; mScrollbarLeftOffsetTouchDelegate = res.getDisplayMetrics().density * SCROLLBAR_LEFT_OFFSET_TOUCH_DELEGATE_DP; TypedArray ta = context.obtainStyledAttributes(attrs, R.styleable.RecyclerViewFastScroller, defStyleAttr, 0); mCanThumbDetach = ta.getBoolean(R.styleable.RecyclerViewFastScroller_canThumbDetach, false); ta.recycle(); } public void setRecyclerView(BaseRecyclerView rv, TextView popupView) { if (mRv != null && mOnScrollListener != null) { mRv.removeOnScrollListener(mOnScrollListener); } mRv = rv; mRv.addOnScrollListener(mOnScrollListener = new RecyclerView.OnScrollListener() { @Override public void onScrolled(RecyclerView recyclerView, int dx, int dy) { mDy = dy; // TODO(winsonc): If we want to animate the section heads while scrolling, we can // initiate that here if the recycler view scroll state is not // RecyclerView.SCROLL_STATE_IDLE. mRv.onUpdateScrollbar(dy); } }); mPopupView = popupView; mPopupView.setBackground( new FastScrollThumbDrawable(mThumbPaint, Utilities.isRtl(getResources()))); } public void reattachThumbToScroll() { mIsThumbDetached = false; } public void setThumbOffsetY(int y) { if (mThumbOffsetY == y) { int rvCurrentOffsetY = mRv.getCurrentScrollY(); if (mRvOffsetY != rvCurrentOffsetY) { mRvOffsetY = mRv.getCurrentScrollY(); notifyScrollChanged(); } return; } updatePopupY(y); mThumbOffsetY = y; invalidate(); mRvOffsetY = mRv.getCurrentScrollY(); notifyScrollChanged(); } public int getThumbOffsetY() { return mThumbOffsetY; } private void setTrackWidth(int width) { if (mWidth == width) { return; } mWidth = width; invalidate(); } public int getThumbHeight() { return mThumbHeight; } public boolean isDraggingThumb() { return mIsDragging; } public boolean isThumbDetached() { return mIsThumbDetached; } /** * Handles the touch event and determines whether to show the fast scroller (or updates it if * it is already showing). */ public boolean handleTouchEvent(MotionEvent ev, Point offset) { int x = (int) ev.getX() - offset.x; int y = (int) ev.getY() - offset.y; switch (ev.getAction()) { case MotionEvent.ACTION_DOWN: // Keep track of the down positions mDownX = x; mDownY = mLastY = y; mDownTimeStampMillis = ev.getDownTime(); if ((Math.abs(mDy) < mDeltaThreshold && mRv.getScrollState() != SCROLL_STATE_IDLE)) { // now the touch events are being passed to the {@link WidgetCell} until the // touch sequence goes over the touch slop. mRv.stopScroll(); } if (isNearThumb(x, y)) { mTouchOffsetY = mDownY - mThumbOffsetY; } break; case MotionEvent.ACTION_MOVE: mLastY = y; int absDeltaY = Math.abs(y - mDownY); int absDeltaX = Math.abs(x - mDownX); // Check if we should start scrolling, but ignore this fastscroll gesture if we have // exceeded some fixed movement mIgnoreDragGesture |= absDeltaY > mConfig.getScaledPagingTouchSlop(); if (!mIsDragging && !mIgnoreDragGesture && mRv.supportsFastScrolling()) { if ((isNearThumb(mDownX, mLastY) && ev.getEventTime() - mDownTimeStampMillis > FASTSCROLL_THRESHOLD_MILLIS)) { calcTouchOffsetAndPrepToFastScroll(mDownY, mLastY); } } if (mIsDragging) { updateFastScrollSectionNameAndThumbOffset(y); } break; case MotionEvent.ACTION_UP: case MotionEvent.ACTION_CANCEL: mRv.onFastScrollCompleted(); mTouchOffsetY = 0; mLastTouchY = 0; mIgnoreDragGesture = false; if (mIsDragging) { mIsDragging = false; animatePopupVisibility(false); showActiveScrollbar(false); } break; } if (DEBUG) { Log.d(TAG, (ev.getAction() == MotionEvent.ACTION_DOWN ? "\n" : "") + "handleTouchEvent " + MotionEvent.actionToString(ev.getAction()) + " (" + x + "," + y + ")" + " isDragging=" + mIsDragging + " mIgnoreDragGesture=" + mIgnoreDragGesture); } return mIsDragging; } private void calcTouchOffsetAndPrepToFastScroll(int downY, int lastY) { mIsDragging = true; if (mCanThumbDetach) { mIsThumbDetached = true; } mTouchOffsetY += (lastY - downY); animatePopupVisibility(true); showActiveScrollbar(true); } private void updateFastScrollSectionNameAndThumbOffset(int y) { // Update the fastscroller section name at this touch position int bottom = mRv.getScrollbarTrackHeight() - mThumbHeight; float boundedY = (float) Math.max(0, Math.min(bottom, y - mTouchOffsetY)); String sectionName = mRv.scrollToPositionAtProgress(boundedY / bottom); if (!sectionName.equals(mPopupSectionName)) { mPopupSectionName = sectionName; mPopupView.setText(sectionName); performHapticFeedback(CLOCK_TICK); } animatePopupVisibility(!sectionName.isEmpty()); mLastTouchY = boundedY; setThumbOffsetY((int) mLastTouchY); } public void onDraw(Canvas canvas) { if (mThumbOffsetY < 0) { return; } int saveCount = canvas.save(); canvas.translate(getWidth() / 2, mRv.getScrollBarTop()); mThumbDrawOffset.set(getWidth() / 2, mRv.getScrollBarTop()); // Draw the track float halfW = mWidth / 2; canvas.drawRoundRect(-halfW, 0, halfW, mRv.getScrollbarTrackHeight(), mWidth, mWidth, mTrackPaint); canvas.translate(0, mThumbOffsetY); mThumbDrawOffset.y += mThumbOffsetY; halfW += mThumbPadding; float r = getScrollThumbRadius(); mThumbBounds.set(-halfW, 0, halfW, mThumbHeight); canvas.drawRoundRect(mThumbBounds, r, r, mThumbPaint); if (Utilities.ATLEAST_Q) { mThumbBounds.roundOut(SYSTEM_GESTURE_EXCLUSION_RECT.get(0)); // swiping very close to the thumb area (not just within it's bound) // will also prevent back gesture SYSTEM_GESTURE_EXCLUSION_RECT.get(0).offset(mThumbDrawOffset.x, mThumbDrawOffset.y); if (Utilities.ATLEAST_Q && mSystemGestureInsets != null) { SYSTEM_GESTURE_EXCLUSION_RECT.get(0).left = SYSTEM_GESTURE_EXCLUSION_RECT.get(0).right - mSystemGestureInsets.right; } setSystemGestureExclusionRects(SYSTEM_GESTURE_EXCLUSION_RECT); } canvas.restoreToCount(saveCount); } @Override @RequiresApi(Build.VERSION_CODES.Q) public WindowInsets onApplyWindowInsets(WindowInsets insets) { if (Utilities.ATLEAST_Q) { mSystemGestureInsets = insets.getSystemGestureInsets(); } return super.onApplyWindowInsets(insets); } private float getScrollThumbRadius() { return mWidth + mThumbPadding + mThumbPadding; } /** * Animates the width of the scrollbar. */ private void showActiveScrollbar(boolean isScrolling) { if (mWidthAnimator != null) { mWidthAnimator.cancel(); } mWidthAnimator = ObjectAnimator.ofInt(this, TRACK_WIDTH, isScrolling ? mMaxWidth : mMinWidth); mWidthAnimator.setDuration(SCROLL_BAR_VIS_DURATION); mWidthAnimator.start(); } /** * Returns whether the specified point is inside the thumb bounds. */ private boolean isNearThumb(int x, int y) { int offset = y - mThumbOffsetY; return x >= 0 && x < getWidth() && offset >= 0 && offset <= mThumbHeight; } /** * Returns true if AllAppsTransitionController can handle vertical motion * beginning at this point. */ public boolean shouldBlockIntercept(int x, int y) { return isNearThumb(x, y); } /** * Returns whether the specified x position is near the scroll bar. */ public boolean isNearScrollBar(int x) { return x >= (getWidth() - mMaxWidth) / 2 - mScrollbarLeftOffsetTouchDelegate && x <= (getWidth() + mMaxWidth) / 2; } private void animatePopupVisibility(boolean visible) { if (mPopupVisible != visible) { mPopupVisible = visible; mPopupView.animate().cancel(); mPopupView.animate().alpha(visible ? 1f : 0f).setDuration(visible ? 200 : 150).start(); } } private void updatePopupY(int lastTouchY) { int height = mPopupView.getHeight(); // Aligns the rounded corner of the pop up with the top of the thumb. float top = mRv.getScrollBarTop() + lastTouchY + (getScrollThumbRadius() / 2f) - (height / 2f); top = Utilities.boundToRange(top, 0, getTop() + mRv.getScrollBarTop() + mRv.getScrollbarTrackHeight() - height); mPopupView.setTranslationY(top); } public boolean isHitInParent(float x, float y, Point outOffset) { if (mThumbOffsetY < 0) { return false; } getHitRect(sTempRect); if (mIsRecyclerViewFirstChildInParent) { sTempRect.top += mRv.getScrollBarTop(); } if (outOffset != null) { outOffset.set(sTempRect.left, sTempRect.top); } return sTempRect.contains((int) x, (int) y); } @Override public boolean hasOverlappingRendering() { // There is actually some overlap between the track and the thumb. But since the track // alpha is so low, it does not matter. return false; } public void setIsRecyclerViewFirstChildInParent(boolean isRecyclerViewFirstChildInParent) { mIsRecyclerViewFirstChildInParent = isRecyclerViewFirstChildInParent; } public void setOnFastScrollChangeListener( @Nullable OnFastScrollChangeListener onFastScrollChangeListener) { mOnFastScrollChangeListener = onFastScrollChangeListener; } private void notifyScrollChanged() { if (mOnFastScrollChangeListener != null) { mOnFastScrollChangeListener.onScrollChanged(); } } /** * A callback that is invoked when there is a scroll change in {@link RecyclerViewFastScroller}. */ public interface OnFastScrollChangeListener { /** Called when the recycler view scroll has changed. */ void onScrollChanged(); } }
7,557
562
<reponame>rockandsalt/conan-center-index #include <assert.h> #include <stdbool.h> #include <stdlib.h> #include <stdio.h> #include <stdint.h> #include <string.h> #include <mikktspace.h> static int GetNumFaces(const SMikkTSpaceContext *pContext) { return 0; } int main() { SMikkTSpaceInterface sInterface = {NULL}; sInterface.m_getNumFaces = GetNumFaces; SMikkTSpaceContext sContext = {NULL}; sContext.m_pInterface = &sInterface; genTangSpaceDefault(&sContext); return 0; }
202
343
<reponame>nzeh/syzygy<gh_stars>100-1000 // Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "syzygy/wsdump/process_working_set.h" #include <psapi.h> #include <tlhelp32.h> #include <algorithm> #include <map> #include <memory> #include "base/win/scoped_handle.h" #include "syzygy/common/com_utils.h" #include "syzygy/core/address_space.h" namespace wsdump { namespace { const size_t kPageSize = 4096; // These are inferred from the MSDN page for QueryWorkingSet. const int kPageReadOnly = 0x001; const int kPageExecute = 0x002; const int kPageExecuteRead = 0x003; const int kPageReadWrite = 0x004; const int kPageWriteCopy = 0x005; const int kPageExecuteReadWrite = 0x006; const int kPageExecuteWriteCopy = 0x007; bool LessModuleName(const ProcessWorkingSet::ModuleStats& a, const ProcessWorkingSet::ModuleStats& b) { return a.module_name < b.module_name; } } // namespace bool ProcessWorkingSet::Initialize(DWORD process_id) { ModuleAddressSpace modules; if (!CaptureModules(process_id, &modules)) return false; const DWORD kProcessPermissions = PROCESS_QUERY_INFORMATION | PROCESS_VM_READ; base::win::ScopedHandle process( ::OpenProcess(kProcessPermissions, FALSE, process_id)); if (!process.IsValid()) { DWORD err = ::GetLastError(); LOG(ERROR) << "OpenProcess failed: " << common::LogWe(err); return false; } ScopedWsPtr working_set; if (!CaptureWorkingSet(process.Get(), &working_set)) return false; // The new stats we're building. ModuleStatsVector new_stats; // This maps from module name to index in the above vector. typedef std::map<std::wstring, size_t> NameToIndexMap; NameToIndexMap name_to_index; for (size_t i = 0; i < working_set->NumberOfEntries; ++i) { PSAPI_WORKING_SET_BLOCK entry = working_set->WorkingSetInfo[i]; size_t address = entry.VirtualPage * kPageSize; ModuleAddressSpace::Range page_range(address, kPageSize); ModuleAddressSpace::RangeMap::const_iterator it = modules.FindContaining(page_range); Stats* stats = NULL; if (it == modules.end()) { stats = &non_module_stats_; } else { // Find the module with this name, or add it if it's missing. const std::wstring& module_name = it->second; NameToIndexMap::const_iterator it = name_to_index.find(module_name); if (it == name_to_index.end()) { // We haven't seen this module, add it to the end of the vector. name_to_index[module_name] = new_stats.size(); new_stats.push_back(ModuleStats()); ModuleStats* module_stats = &new_stats.back(); module_stats->module_name = module_name; stats = module_stats; } else { stats = &new_stats[it->second]; } } DCHECK(stats != NULL); total_stats_.pages++; stats->pages++; if (entry.Shared) { total_stats_.shareable_pages++; stats->shareable_pages++; } if (entry.ShareCount > 1) { total_stats_.shared_pages++; stats->shared_pages++; } if (entry.Protection & kPageReadWrite) { total_stats_.writable_pages++; stats->writable_pages++; } else if (entry.Protection & kPageExecute) { total_stats_.executable_pages++; stats->executable_pages++; } else if (entry.Protection & kPageReadOnly) { total_stats_.read_only_pages++; stats->read_only_pages++; } } std::sort(new_stats.begin(), new_stats.end(), LessModuleName); new_stats.swap(module_stats_); return true; } bool ProcessWorkingSet::CaptureWorkingSet(HANDLE process, ScopedWsPtr* working_set) { DCHECK(working_set != NULL); // Estimate the starting buffer size by the current WS size. PROCESS_MEMORY_COUNTERS counters = {}; if (!::GetProcessMemoryInfo(process, &counters, sizeof(counters))) { DWORD err = ::GetLastError(); LOG(ERROR) << "Unable to get process memory info: " << common::LogWe(err); return false; } std::unique_ptr<PSAPI_WORKING_SET_INFORMATION> buffer; DWORD number_of_entries = counters.WorkingSetSize / kPageSize; int retries = 5; for (;;) { DWORD buffer_size = sizeof(PSAPI_WORKING_SET_INFORMATION) + (number_of_entries * sizeof(PSAPI_WORKING_SET_BLOCK)); // If we can't expand the buffer, don't leak the previous // contents or pass a NULL pointer to QueryWorkingSet. buffer.reset(reinterpret_cast<PSAPI_WORKING_SET_INFORMATION*>( new char[buffer_size])); if (!buffer.get()) { LOG(ERROR) << "Unable to allocate working set buffer."; return false; } // Zero the buffer as <NAME> warns that undefined bits may not be set // in the Windows NT/2000 Native API Reference. memset(buffer.get(), 0, buffer_size); // Call the function once to get number of items. if (::QueryWorkingSet(process, buffer.get(), buffer_size)) break; if (::GetLastError() != ERROR_BAD_LENGTH) { return false; } number_of_entries = static_cast<DWORD>(buffer->NumberOfEntries); // Maybe some entries are being added right now. Increase the buffer to // take that into account. number_of_entries = static_cast<DWORD>(number_of_entries * 1.25); if (--retries == 0) { LOG(ERROR) << "Out of retries to query working set."; return false; } } working_set->swap(buffer); return true; } bool ProcessWorkingSet::CaptureModules(DWORD process_id, ModuleAddressSpace* modules) { DCHECK(modules != NULL); base::win::ScopedHandle snap( ::CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, process_id)); if (!snap.IsValid()) { DWORD err = ::GetLastError(); LOG(ERROR) << "CreateToolhelp32Snapshot failed: " << common::LogWe(err); return false; } MODULEENTRY32 module = { sizeof(module) }; if (!::Module32First(snap.Get(), &module)) { DWORD err = ::GetLastError(); LOG(ERROR) << "Module32First failed: " << common::LogWe(err); return false; } do { ModuleAddressSpace::Range range( reinterpret_cast<size_t>(module.modBaseAddr), module.modBaseSize); if (!modules->Insert(range, module.szExePath)) { LOG(ERROR) << "Module insertion failed, overlapping modules?"; return false; } } while (::Module32Next(snap.Get(), &module)); DWORD err = ::GetLastError(); if (err != ERROR_NO_MORE_FILES) { LOG(ERROR) << "Module32Next failed: " << common::LogWe(err); return false; } return true; } } // namespace wsdump
2,633
2,550
// // Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #include "algorithms/internal/count-tree.h" #include "base/testing/proto_matchers.h" #include "base/testing/status_matchers.h" #include "gmock/gmock.h" #include "gtest/gtest.h" namespace differential_privacy { namespace internal { namespace { using ::testing::HasSubstr; using ::differential_privacy::base::testing::StatusIs; TEST(CountTreeTest, NumberOfNodes) { CountTree test(3, 5); EXPECT_EQ(test.GetNumberOfNodes(), 1 + 5 + 25 + 125); CountTree test2 = CountTree(4, 9); EXPECT_EQ(test2.GetNumberOfNodes(), 1 + 9 + 81 + 729 + 6561); } TEST(CountTreeTest, NumberOfLeaves) { CountTree test(3, 5); EXPECT_EQ(test.GetNumberOfLeaves(), 125); CountTree test2 = CountTree(4, 9); EXPECT_EQ(test2.GetNumberOfLeaves(), 6561); } TEST(CountTreeTest, GetNthLeaf) { CountTree test(3, 5); EXPECT_EQ(test.GetNthLeaf(0), 31); EXPECT_EQ(test.GetNthLeaf(5), 36); EXPECT_EQ(test.GetNthLeaf(18), 49); } TEST(CountTreeTest, ParentChildInverse) { CountTree test(5, 6); for (int i = 0; i < test.GetLeftMostLeaf(); ++i) { for (int child = test.LeftMostChild(i); child < test.RightMostChild(i); ++child) { EXPECT_EQ(test.Parent(child), i); } } } TEST(CountTreeTest, ParentChildExamples) { CountTree test(3, 5); EXPECT_EQ(test.LeftMostChild(0), 1); EXPECT_EQ(test.RightMostChild(0), 5); EXPECT_EQ(test.LeftMostChild(1), 6); EXPECT_EQ(test.RightMostChild(1), 10); EXPECT_EQ(test.LeftMostChild(8), 41); EXPECT_EQ(test.RightMostChild(8), 45); EXPECT_EQ(test.Parent(38), 7); EXPECT_EQ(test.Parent(8), 1); EXPECT_EQ(test.Parent(2), 0); } TEST(CountTreeTest, IsLeaf) { CountTree test(3, 5); EXPECT_FALSE(test.IsLeaf(0)); EXPECT_FALSE(test.IsLeaf(1)); EXPECT_FALSE(test.IsLeaf(6)); EXPECT_FALSE(test.IsLeaf(30)); EXPECT_TRUE(test.IsLeaf(31)); EXPECT_TRUE(test.IsLeaf(155)); } TEST(CountTreeTest, SubtreeQueries) { CountTree test(3, 5); EXPECT_EQ(test.LeftMostInSubtree(0), 31); EXPECT_EQ(test.RightMostInSubtree(0), 155); EXPECT_EQ(test.LeftMostInSubtree(1), 31); EXPECT_EQ(test.RightMostInSubtree(1), 55); EXPECT_EQ(test.LeftMostInSubtree(3), 81); EXPECT_EQ(test.RightMostInSubtree(3), 105); EXPECT_EQ(test.LeftMostInSubtree(82), 82); EXPECT_EQ(test.RightMostInSubtree(83), 83); } TEST(CountTreeTest, IncrementGet) { CountTree test(3, 5); test.IncrementNode(1); EXPECT_EQ(test.GetNodeCount(1), 1); EXPECT_EQ(test.GetNodeCount(2), 0); test.IncrementNode(8); test.IncrementNode(8); test.IncrementNode(8); EXPECT_EQ(test.GetNodeCount(8), 3); } TEST(CountTreeTest, IncrementNodeByGet) { CountTree test(3, 5); test.IncrementNode(1); test.IncrementNodeBy(1, 3); EXPECT_EQ(test.GetNodeCount(1), 4); test.IncrementNodeBy(1, 5); EXPECT_EQ(test.GetNodeCount(1), 9); test.IncrementNode(1); EXPECT_EQ(test.GetNodeCount(1), 10); } TEST(CountTreeTest, SerializeMerge) { CountTree test1(3, 5); test1.IncrementNode(1); test1.IncrementNode(8); test1.IncrementNode(8); CountTree test2(3, 5); EXPECT_OK(test2.Merge(test1.Serialize())); test1.IncrementNode(8); test2.IncrementNode(8); test1.IncrementNode(10); test2.IncrementNode(10); for (int i = test1.GetRoot(); i < test1.GetNumberOfNodes(); ++i) { EXPECT_EQ(test1.GetNodeCount(i), test2.GetNodeCount(i)); } } TEST(CountTreeTest, MisatchMergeFails) { CountTree standard(3, 5); CountTree shorter(2, 5); CountTree wider(3, 6); EXPECT_THAT(shorter.Merge(standard.Serialize()), StatusIs(absl::StatusCode::kInternal, HasSubstr("Height"))); EXPECT_THAT(wider.Merge(standard.Serialize()), StatusIs(absl::StatusCode::kInternal, HasSubstr("Branching"))); } TEST(CountTreeTest, MemoryUsed) { CountTree empty(3, 5); CountTree single(3, 5); CountTree twice(3, 5); single.IncrementNode(1); twice.IncrementNode(9); twice.IncrementNode(9); EXPECT_GT(single.MemoryUsed(), empty.MemoryUsed()); EXPECT_EQ(twice.MemoryUsed(), single.MemoryUsed()); } TEST(CountTreeTest, ClearNodes) { CountTree test1(3, 5); test1.IncrementNode(1); test1.IncrementNode(8); test1.IncrementNode(8); CountTree test2(3, 5); test1.ClearNodes(); test1.IncrementNode(8); test2.IncrementNode(8); test1.IncrementNode(10); test2.IncrementNode(10); for (int i = test1.GetRoot(); i < test1.GetNumberOfNodes(); ++i) { EXPECT_EQ(test1.GetNodeCount(i), test2.GetNodeCount(i)); } } } // namespace } // namespace internal } // namespace differential_privacy
2,055
467
<reponame>retr0-13/armitage package ui; import java.io.*; import msf.*; import javax.swing.*; import java.util.*; /* upload a file to the team server... */ public class UploadFile implements Runnable { protected FileInputStream in = null; protected byte[] buffer = new byte[256 * 1024]; protected File file = null; protected RpcConnection client = null; protected UploadNotify listener = null; protected Thread thread = null; protected String rfile = null; public static interface UploadNotify { /* call with the remote path of the file */ public void complete(String name); } public UploadFile(RpcConnection client, File file, UploadNotify listener) { this.file = file; this.client = client; this.listener = listener; this.thread = new Thread(this); thread.start(); } /* wait for the upload to finish and return our file */ public String getRemoteFile() { if (SwingUtilities.isEventDispatchThread()) { System.err.println("DiSS! upload of " + file + " is happening in EDT (unsafe)"); } try { thread.join(); } catch (InterruptedException iex) { } if (rfile == null) throw new RuntimeException("user canceled upload of file"); return rfile; } protected Object[] argz(byte[] data, long length) { /* copy relevant bytes to a temporary byte buffer */ byte[] me = new byte[(int)length]; for (int x = 0; x < length; x++) { me[x] = data[x]; } Object[] args = new Object[2]; args[0] = file.getName(); args[1] = me; return args; } public void run() { try { long total = file.length(); long start = System.currentTimeMillis(); long read = 0; long ret = 0; long sofar = 0; double time = 0; ProgressMonitor progress = new ProgressMonitor(null, "Upload " + file.getName(), "Starting upload", 0, (int)total); in = new FileInputStream(file); /* read our first round and then call a function to upload the data */ read = in.read(buffer); sofar += read; Map result = (Map)client.execute("armitage.upload", argz(buffer, read)); while (sofar < total) { /* update our progress bar */ time = (System.currentTimeMillis() - start) / 1000.0; progress.setProgress((int)sofar); progress.setNote("Speed: " + Math.round((sofar / 1024) / time) + " KB/s"); /* honor the user's request to cancel the upload */ if (progress.isCanceled()) { progress.close(); in.close(); return; } /* read in some data */ read = in.read(buffer); sofar += read; /* upload the data to the team server */ client.execute("armitage.append", argz(buffer, read)); /* give it a break */ Thread.yield(); } /* update our progress bar */ time = (System.currentTimeMillis() - start) / 1000.0; progress.setProgress((int)sofar); progress.setNote("Speed: " + Math.round((sofar / 1024) / time) + " KB/s"); /* clean up, now that we're done */ progress.close(); in.close(); /* call our listener, if it's not null */ if (listener != null) listener.complete(result.get("file") + ""); /* set the remote file */ rfile = result.get("file") + ""; } catch (Exception ioex) { JOptionPane.showMessageDialog(null, "Aborted upload of: " + file.getName() + "\n" + ioex.getMessage(), "Error", JOptionPane.ERROR_MESSAGE); System.err.println("Aborted upload of: " + file); ioex.printStackTrace(); } } }
1,263
14,668
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "net/nqe/network_id.h" #include <tuple> #include "base/base64.h" #include "base/strings/string_number_conversions.h" #include "net/nqe/proto/network_id_proto.pb.h" namespace net { namespace nqe { namespace internal { // static NetworkID NetworkID::FromString(const std::string& network_id) { std::string base64_decoded; if (!base::Base64Decode(network_id, &base64_decoded)) { return NetworkID(NetworkChangeNotifier::CONNECTION_UNKNOWN, std::string(), INT32_MIN); } NetworkIDProto network_id_proto; if (!network_id_proto.ParseFromString(base64_decoded)) { return NetworkID(NetworkChangeNotifier::CONNECTION_UNKNOWN, std::string(), INT32_MIN); } return NetworkID(static_cast<NetworkChangeNotifier::ConnectionType>( network_id_proto.connection_type()), network_id_proto.id(), network_id_proto.signal_strength()); } NetworkID::NetworkID(NetworkChangeNotifier::ConnectionType type, const std::string& id, int32_t signal_strength) : type(type), id(id), signal_strength(signal_strength) { // A valid value of |signal_strength| must be between 0 and 4 (both // inclusive). DCHECK((0 <= signal_strength && 4 >= signal_strength) || (INT32_MIN == signal_strength)); } NetworkID::NetworkID(const NetworkID& other) = default; NetworkID::~NetworkID() = default; bool NetworkID::operator==(const NetworkID& other) const { return type == other.type && id == other.id && signal_strength == other.signal_strength; } bool NetworkID::operator!=(const NetworkID& other) const { return !operator==(other); } NetworkID& NetworkID::operator=(const NetworkID& other) = default; // Overloaded to support ordered collections. bool NetworkID::operator<(const NetworkID& other) const { return std::tie(type, id, signal_strength) < std::tie(other.type, other.id, other.signal_strength); } std::string NetworkID::ToString() const { NetworkIDProto network_id_proto; network_id_proto.set_connection_type(static_cast<int>(type)); network_id_proto.set_id(id); network_id_proto.set_signal_strength(signal_strength); std::string serialized_network_id; if (!network_id_proto.SerializeToString(&serialized_network_id)) return ""; std::string base64_encoded; base::Base64Encode(serialized_network_id, &base64_encoded); return base64_encoded; } } // namespace internal } // namespace nqe } // namespace net
988
852
<reponame>ckamtsikis/cmssw<gh_stars>100-1000 // -*- C++ -*- #ifndef METReco_SpecificPFMETData_h #define METReco_SpecificPFMETData_h /// \class SpecificPFMETData /// /// \short MET made from Particle Flow Candidates /// /// \authors <NAME>, UIC & R.Remington, UFL //____________________________________________________________________________|| struct SpecificPFMETData { SpecificPFMETData() : NeutralEMFraction(0.0), NeutralHadFraction(0.0), ChargedEMFraction(0.0), ChargedHadFraction(0.0), MuonFraction(0.0), Type6Fraction(0.0), Type7Fraction(0.0) {} // Data Members (should be renamed with "Et" in them to avoid ambiguities, see below) float NeutralEMFraction; float NeutralHadFraction; float ChargedEMFraction; float ChargedHadFraction; float MuonFraction; float Type6Fraction; float Type7Fraction; // float NeutralEMEtFraction; // float NeutralHadEtFraction; // float ChargedEMEtFraction; // float ChargedHadEtFraction; // float MuonEtFraction; // float Type6EtFraction; // float Type7EtFraction; }; //____________________________________________________________________________|| #endif // METReco_SpecificPFMETData_h
440
531
<filename>Adjust/sdk-core/src/main/java/com/adjust/sdk/AttributionHandler.java<gh_stars>100-1000 // // AttributionHandler.java // Adjust SDK // // Created by <NAME> (@nonelse) on 7th November 2014. // Copyright (c) 2014-2018 Adjust GmbH. All rights reserved. // package com.adjust.sdk; import android.net.Uri; import com.adjust.sdk.network.IActivityPackageSender; import com.adjust.sdk.scheduler.SingleThreadCachedScheduler; import com.adjust.sdk.scheduler.ThreadScheduler; import com.adjust.sdk.scheduler.TimerOnce; import org.json.JSONObject; import java.lang.ref.WeakReference; import java.util.HashMap; import java.util.List; import java.util.Map; public class AttributionHandler implements IAttributionHandler, IActivityPackageSender.ResponseDataCallbackSubscriber { private static final String ATTRIBUTION_TIMER_NAME = "Attribution timer"; private boolean paused; private String lastInitiatedBy; private IActivityPackageSender activityPackageSender; private ILogger logger; private TimerOnce timer; private ThreadScheduler scheduler; private WeakReference<IActivityHandler> activityHandlerWeakRef; @Override public void teardown() { logger.verbose("AttributionHandler teardown"); if (timer != null) { timer.teardown(); } if (scheduler != null) { scheduler.teardown(); } if (activityHandlerWeakRef != null) { activityHandlerWeakRef.clear(); } timer = null; logger = null; scheduler = null; activityHandlerWeakRef = null; } public AttributionHandler(IActivityHandler activityHandler, boolean startsSending, IActivityPackageSender attributionHandlerActivityPackageSender) { logger = AdjustFactory.getLogger(); scheduler = new SingleThreadCachedScheduler("AttributionHandler"); timer = new TimerOnce(new Runnable() { @Override public void run() { sendAttributionRequest(); } }, ATTRIBUTION_TIMER_NAME); init(activityHandler, startsSending, attributionHandlerActivityPackageSender); } @Override public void init(IActivityHandler activityHandler, boolean startsSending, IActivityPackageSender attributionHandlerActivityPackageSender) { this.activityHandlerWeakRef = new WeakReference<IActivityHandler>(activityHandler); this.paused = !startsSending; this.activityPackageSender = attributionHandlerActivityPackageSender; } @Override public void getAttribution() { scheduler.submit(new Runnable() { @Override public void run() { lastInitiatedBy = "sdk"; getAttributionI(0); } }); } @Override public void checkSessionResponse(final SessionResponseData sessionResponseData) { scheduler.submit(new Runnable() { @Override public void run() { IActivityHandler activityHandler = activityHandlerWeakRef.get(); if (activityHandler == null) { return; } checkSessionResponseI(activityHandler, sessionResponseData); } }); } @Override public void checkSdkClickResponse(final SdkClickResponseData sdkClickResponseData) { scheduler.submit(new Runnable() { @Override public void run() { IActivityHandler activityHandler = activityHandlerWeakRef.get(); if (activityHandler == null) { return; } checkSdkClickResponseI(activityHandler, sdkClickResponseData); } }); } public void checkAttributionResponse(final AttributionResponseData attributionResponseData) { scheduler.submit(new Runnable() { @Override public void run() { IActivityHandler activityHandler = activityHandlerWeakRef.get(); if (activityHandler == null) { return; } checkAttributionResponseI(activityHandler, attributionResponseData); } }); } @Override public void pauseSending() { paused = true; } @Override public void resumeSending() { paused = false; } private void sendAttributionRequest() { scheduler.submit(new Runnable() { @Override public void run() { sendAttributionRequestI(); } }); } private void getAttributionI(long delayInMilliseconds) { // Don't reset if new time is shorter than last one. if (timer.getFireIn() > delayInMilliseconds) { return; } if (delayInMilliseconds != 0) { double waitTimeSeconds = delayInMilliseconds / 1000.0; String secondsString = Util.SecondsDisplayFormat.format(waitTimeSeconds); logger.debug("Waiting to query attribution in %s seconds", secondsString); } // Set the new time the timer will fire in. timer.startIn(delayInMilliseconds); } private void checkAttributionI(IActivityHandler activityHandler, ResponseData responseData) { if (responseData.jsonResponse == null) { return; } Long timerMilliseconds = responseData.askIn; // responseData.jsonResponse.optLong("ask_in", -1); if (timerMilliseconds != null && timerMilliseconds >= 0) { activityHandler.setAskingAttribution(true); lastInitiatedBy = "backend"; getAttributionI(timerMilliseconds); return; } activityHandler.setAskingAttribution(false); } private void checkSessionResponseI(IActivityHandler activityHandler, SessionResponseData sessionResponseData) { checkAttributionI(activityHandler, sessionResponseData); activityHandler.launchSessionResponseTasks(sessionResponseData); } private void checkSdkClickResponseI(IActivityHandler activityHandler, SdkClickResponseData sdkClickResponseData) { checkAttributionI(activityHandler, sdkClickResponseData); activityHandler.launchSdkClickResponseTasks(sdkClickResponseData); } private void checkAttributionResponseI(IActivityHandler activityHandler, AttributionResponseData attributionResponseData) { checkAttributionI(activityHandler, attributionResponseData); checkDeeplinkI(attributionResponseData); activityHandler.launchAttributionResponseTasks(attributionResponseData); } private void checkDeeplinkI(AttributionResponseData attributionResponseData) { if (attributionResponseData.jsonResponse == null) { return; } JSONObject attributionJson = attributionResponseData.jsonResponse.optJSONObject("attribution"); if (attributionJson == null) { return; } String deeplinkString = attributionJson.optString("deeplink", null); if (deeplinkString == null) { return; } attributionResponseData.deeplink = Uri.parse(deeplinkString); } private void sendAttributionRequestI() { if (activityHandlerWeakRef.get().getActivityState().isGdprForgotten) { return; } if (paused) { logger.debug("Attribution handler is paused"); return; } // Create attribution package before sending attribution request. ActivityPackage attributionPackage = buildAndGetAttributionPackage(); logger.verbose("%s", attributionPackage.getExtendedString()); Map<String, String> sendingParameters = generateSendingParametersI(); activityPackageSender.sendActivityPackage( attributionPackage, sendingParameters, this); } private Map<String, String> generateSendingParametersI() { HashMap<String, String> sendingParameters = new HashMap<>(); long now = System.currentTimeMillis(); String dateString = Util.dateFormatter.format(now); PackageBuilder.addString(sendingParameters, "sent_at", dateString); return sendingParameters; } private ActivityPackage buildAndGetAttributionPackage() { long now = System.currentTimeMillis(); IActivityHandler activityHandler = activityHandlerWeakRef.get(); PackageBuilder packageBuilder = new PackageBuilder( activityHandler.getAdjustConfig(), activityHandler.getDeviceInfo(), activityHandler.getActivityState(), activityHandler.getSessionParameters(), now); ActivityPackage activityPackage = packageBuilder.buildAttributionPackage(lastInitiatedBy); lastInitiatedBy = null; return activityPackage; } @Override public void onResponseDataCallback(final ResponseData responseData) { scheduler.submit(new Runnable() { @Override public void run() { IActivityHandler activityHandler = activityHandlerWeakRef.get(); if (activityHandler == null) { return; } if (responseData.trackingState == TrackingState.OPTED_OUT) { activityHandler.gotOptOutResponse(); return; } if (!(responseData instanceof AttributionResponseData)) { return; } checkAttributionResponseI(activityHandler, (AttributionResponseData)responseData); } }); } }
4,187