max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
5,169
{ "name": "DebugMenuUserDefaultsBrowser", "version": "0.1.0", "summary": "Easy to browse UserDefaults values.", "description": "You can browse UserDefaults values with DebugHead.", "homepage": "https://github.com/malt03/DebugMenuUserDefaultsBrowser", "license": { "type": "MIT", "file": "LICENSE" }, "authors": { "<NAME>": "<EMAIL>" }, "source": { "git": "https://github.com/malt03/DebugMenuUserDefaultsBrowser.git", "tag": "0.1.0" }, "platforms": { "ios": "8.0" }, "source_files": "DebugMenuUserDefaultsBrowser/Classes/**/*", "dependencies": { "TAKKitSwift": [ "~> 2.3.0" ], "DebugHead": [ "~> 0.1.0" ] } }
299
1,550
# Leo colorizer control file for redcode mode. # This file is in the public domain. # Properties for redcode mode. properties = { "lineComment": ";", } # Attributes dict for redcode_main ruleset. redcode_main_attributes_dict = { "default": "MARKUP", "digit_re": "", "escape": "", "highlight_digits": "true", "ignore_case": "true", "no_word_sep": "", } # Dictionary of attributes dictionaries for redcode mode. attributesDictDict = { "redcode_main": redcode_main_attributes_dict, } # Keywords dict for redcode_main ruleset. redcode_main_keywords_dict = { "add": "keyword1", "cmp": "keyword1", "coresize": "keyword2", "curline": "keyword2", "dat": "keyword1", "div": "keyword1", "djn": "keyword1", "end": "keyword2", "equ": "keyword2", "for": "keyword2", "jmn": "keyword1", "jmp": "keyword1", "jmz": "keyword1", "ldp": "keyword1", "maxcycles": "keyword2", "maxlength": "keyword2", "maxprocesses": "keyword2", "mindistance": "keyword2", "mod": "keyword1", "mov": "keyword1", "mul": "keyword1", "nop": "keyword1", "org": "keyword2", "pin": "keyword2", "pspacesize": "keyword2", "rof": "keyword2", "rounds": "keyword2", "seq": "keyword1", "slt": "keyword1", "sne": "keyword1", "spl": "keyword1", "stp": "keyword1", "sub": "keyword1", "version": "keyword2", "warriors": "keyword2", } # Dictionary of keywords dictionaries for redcode mode. keywordsDictDict = { "redcode_main": redcode_main_keywords_dict, } # Rules for redcode_main ruleset. def redcode_rule0(colorer, s, i): return colorer.match_eol_span(s, i, kind="comment2", seq=";redcode", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="", exclude_match=False) def redcode_rule1(colorer, s, i): return colorer.match_eol_span(s, i, kind="comment2", seq=";author", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="", exclude_match=False) def redcode_rule2(colorer, s, i): return colorer.match_eol_span(s, i, kind="comment2", seq=";name", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="", exclude_match=False) def redcode_rule3(colorer, s, i): return colorer.match_eol_span(s, i, kind="comment2", seq=";strategy", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="", exclude_match=False) def redcode_rule4(colorer, s, i): return colorer.match_eol_span(s, i, kind="comment2", seq=";password", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="", exclude_match=False) def redcode_rule5(colorer, s, i): return colorer.match_eol_span(s, i, kind="comment1", seq=";", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="", exclude_match=False) def redcode_rule6(colorer, s, i): return colorer.match_seq(s, i, kind="keyword3", seq=".AB", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def redcode_rule7(colorer, s, i): return colorer.match_seq(s, i, kind="keyword3", seq=".BA", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def redcode_rule8(colorer, s, i): return colorer.match_seq(s, i, kind="keyword3", seq=".A", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def redcode_rule9(colorer, s, i): return colorer.match_seq(s, i, kind="keyword3", seq=".B", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def redcode_rule10(colorer, s, i): return colorer.match_seq(s, i, kind="keyword3", seq=".F", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def redcode_rule11(colorer, s, i): return colorer.match_seq(s, i, kind="keyword3", seq=".X", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def redcode_rule12(colorer, s, i): return colorer.match_seq(s, i, kind="keyword3", seq=".I", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def redcode_rule13(colorer, s, i): return colorer.match_seq(s, i, kind="operator", seq=",", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def redcode_rule14(colorer, s, i): return colorer.match_seq(s, i, kind="operator", seq=":", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def redcode_rule15(colorer, s, i): return colorer.match_seq(s, i, kind="operator", seq="(", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def redcode_rule16(colorer, s, i): return colorer.match_seq(s, i, kind="operator", seq=")", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def redcode_rule17(colorer, s, i): return colorer.match_seq(s, i, kind="operator", seq="+", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def redcode_rule18(colorer, s, i): return colorer.match_seq(s, i, kind="operator", seq="-", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def redcode_rule19(colorer, s, i): return colorer.match_seq(s, i, kind="operator", seq="/", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def redcode_rule20(colorer, s, i): return colorer.match_seq(s, i, kind="operator", seq="%", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def redcode_rule21(colorer, s, i): return colorer.match_seq(s, i, kind="operator", seq="==", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def redcode_rule22(colorer, s, i): return colorer.match_seq(s, i, kind="operator", seq="!=", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def redcode_rule23(colorer, s, i): return colorer.match_seq(s, i, kind="operator", seq="<=", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def redcode_rule24(colorer, s, i): return colorer.match_seq(s, i, kind="operator", seq=">=", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def redcode_rule25(colorer, s, i): return colorer.match_seq(s, i, kind="operator", seq="<", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def redcode_rule26(colorer, s, i): return colorer.match_seq(s, i, kind="operator", seq=">", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def redcode_rule27(colorer, s, i): return colorer.match_seq(s, i, kind="operator", seq="&&", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def redcode_rule28(colorer, s, i): return colorer.match_seq(s, i, kind="operator", seq="||", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def redcode_rule29(colorer, s, i): return colorer.match_seq(s, i, kind="operator", seq="!", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def redcode_rule30(colorer, s, i): return colorer.match_seq(s, i, kind="operator", seq="=", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def redcode_rule31(colorer, s, i): return colorer.match_seq(s, i, kind="literal1", seq="$", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def redcode_rule32(colorer, s, i): return colorer.match_seq(s, i, kind="literal1", seq="@", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def redcode_rule33(colorer, s, i): return colorer.match_seq(s, i, kind="literal1", seq="#", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def redcode_rule34(colorer, s, i): return colorer.match_seq(s, i, kind="literal1", seq="*", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def redcode_rule35(colorer, s, i): return colorer.match_seq(s, i, kind="literal1", seq="{", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def redcode_rule36(colorer, s, i): return colorer.match_seq(s, i, kind="literal1", seq="}", at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="") def redcode_rule37(colorer, s, i): return colorer.match_keywords(s, i) # Rules dict for redcode_main ruleset. rulesDict1 = { "!": [redcode_rule22,redcode_rule29,], "#": [redcode_rule33,], "$": [redcode_rule31,], "%": [redcode_rule20,], "&": [redcode_rule27,], "(": [redcode_rule15,], ")": [redcode_rule16,], "*": [redcode_rule34,], "+": [redcode_rule17,], ",": [redcode_rule13,], "-": [redcode_rule18,], ".": [redcode_rule6,redcode_rule7,redcode_rule8,redcode_rule9,redcode_rule10,redcode_rule11,redcode_rule12,], "/": [redcode_rule19,], "0": [redcode_rule37,], "1": [redcode_rule37,], "2": [redcode_rule37,], "3": [redcode_rule37,], "4": [redcode_rule37,], "5": [redcode_rule37,], "6": [redcode_rule37,], "7": [redcode_rule37,], "8": [redcode_rule37,], "9": [redcode_rule37,], ":": [redcode_rule14,], ";": [redcode_rule0,redcode_rule1,redcode_rule2,redcode_rule3,redcode_rule4,redcode_rule5,], "<": [redcode_rule23,redcode_rule25,], "=": [redcode_rule21,redcode_rule30,], ">": [redcode_rule24,redcode_rule26,], "@": [redcode_rule32,redcode_rule37,], "A": [redcode_rule37,], "B": [redcode_rule37,], "C": [redcode_rule37,], "D": [redcode_rule37,], "E": [redcode_rule37,], "F": [redcode_rule37,], "G": [redcode_rule37,], "H": [redcode_rule37,], "I": [redcode_rule37,], "J": [redcode_rule37,], "K": [redcode_rule37,], "L": [redcode_rule37,], "M": [redcode_rule37,], "N": [redcode_rule37,], "O": [redcode_rule37,], "P": [redcode_rule37,], "Q": [redcode_rule37,], "R": [redcode_rule37,], "S": [redcode_rule37,], "T": [redcode_rule37,], "U": [redcode_rule37,], "V": [redcode_rule37,], "W": [redcode_rule37,], "X": [redcode_rule37,], "Y": [redcode_rule37,], "Z": [redcode_rule37,], "a": [redcode_rule37,], "b": [redcode_rule37,], "c": [redcode_rule37,], "d": [redcode_rule37,], "e": [redcode_rule37,], "f": [redcode_rule37,], "g": [redcode_rule37,], "h": [redcode_rule37,], "i": [redcode_rule37,], "j": [redcode_rule37,], "k": [redcode_rule37,], "l": [redcode_rule37,], "m": [redcode_rule37,], "n": [redcode_rule37,], "o": [redcode_rule37,], "p": [redcode_rule37,], "q": [redcode_rule37,], "r": [redcode_rule37,], "s": [redcode_rule37,], "t": [redcode_rule37,], "u": [redcode_rule37,], "v": [redcode_rule37,], "w": [redcode_rule37,], "x": [redcode_rule37,], "y": [redcode_rule37,], "z": [redcode_rule37,], "{": [redcode_rule35,], "|": [redcode_rule28,], "}": [redcode_rule36,], } # x.rulesDictDict for redcode mode. rulesDictDict = { "redcode_main": rulesDict1, } # Import dict for redcode mode. importDict = {}
5,496
336
package com.shazam.fork.runner; import com.shazam.fork.model.Pool; import com.shazam.fork.model.PoolTestCaseAccumulator; import javax.annotation.Nonnull; import java.util.Map; public final class ProgressReporterFactory { private int totalAllowedRetryQuota; private int retryPerTestCaseQuota; private Map<Pool, PoolProgressTracker> poolProgressTrackers; private PoolTestCaseAccumulator failedTestCasesAccumulator; public ProgressReporterFactory(int totalAllowedRetryQuota, int retryPerTestCaseQuota, Map<Pool, PoolProgressTracker> poolProgressTrackers, PoolTestCaseAccumulator failedTestCasesAccumulator) { this.totalAllowedRetryQuota = totalAllowedRetryQuota; this.retryPerTestCaseQuota = retryPerTestCaseQuota; this.poolProgressTrackers = poolProgressTrackers; this.failedTestCasesAccumulator = failedTestCasesAccumulator; } @Nonnull public ProgressReporter createProgressReporter() { return new OverallProgressReporter( totalAllowedRetryQuota, retryPerTestCaseQuota, poolProgressTrackers, failedTestCasesAccumulator ); } }
550
5,169
<gh_stars>1000+ { "name": "CCPCycleScrollView", "version": "1.0", "summary": "For cycle scroll", "swift_version": "4.2", "description": "使用UICollectionView创建的轮播控件,支持各种UIView的使用", "homepage": "https://github.com/coolboy-ccp/CCPCycleScrollView", "license": "MIT", "authors": { "Brian.Chu": "<EMAIL>" }, "platforms": { "ios": "9.0" }, "source": { "git": "https://github.com/coolboy-ccp/CCPCycleScrollView.git", "tag": "1.0" }, "source_files": [ "CCPCycleScrollView/CycleSCroll", "*.swift" ], "exclude_files": "Classes/Exclude", "requires_arc": true, "dependencies": { "SnapKit": [ ], "Kingfisher": [ ] } }
336
892
<reponame>wuchunfu/cron-utils<filename>src/main/java/com/cronutils/converter/CalendarToCronTransformer.java<gh_stars>100-1000 /* * Copyright 2019 fahmpeermoh * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.cronutils.converter; public class CalendarToCronTransformer extends BaseCronTransformer { @Override protected void transform() { String updatedValue = String .valueOf(calendarInstance.get(calendarField)); LOGGER.debug( "Updating cron field at position {} with {}, using calendar field {}", cronFieldPosition, updatedValue, calendarField); cronParts[cronFieldPosition] = updatedValue; } }
331
703
<filename>Code/Engine/GameEngine/Gameplay/SpawnComponent.h #pragma once #include <GameEngine/GameEngineDLL.h> #include <Core/Prefabs/PrefabResource.h> #include <Core/World/World.h> #include <Foundation/Types/RangeView.h> struct ezMsgComponentInternalTrigger; struct ezSpawnComponentFlags { typedef ezUInt16 StorageType; enum Enum { None = 0, SpawnAtStart = EZ_BIT(0), ///< The component will schedule a spawn once at creation time SpawnContinuously = EZ_BIT(1), ///< Every time a scheduled spawn was done, a new one is scheduled AttachAsChild = EZ_BIT(2), ///< All objects spawned will be attached as children to this node SpawnInFlight = EZ_BIT(3), ///< [internal] A spawn trigger message has been posted. Default = None }; struct Bits { StorageType SpawnAtStart : 1; StorageType SpawnContinuously : 1; StorageType AttachAsChild : 1; StorageType SpawnInFlight : 1; }; }; EZ_DECLARE_FLAGS_OPERATORS(ezSpawnComponentFlags); typedef ezComponentManager<class ezSpawnComponent, ezBlockStorageType::Compact> ezSpawnComponentManager; class EZ_GAMEENGINE_DLL ezSpawnComponent : public ezComponent { EZ_DECLARE_COMPONENT_TYPE(ezSpawnComponent, ezComponent, ezSpawnComponentManager); ////////////////////////////////////////////////////////////////////////// // ezComponent public: virtual void SerializeComponent(ezWorldWriter& stream) const override; virtual void DeserializeComponent(ezWorldReader& stream) override; protected: virtual void OnSimulationStarted() override; virtual void OnDeactivated() override; ////////////////////////////////////////////////////////////////////////// // ezSpawnComponent public: ezSpawnComponent(); ~ezSpawnComponent(); /// \brief Checks whether the last spawn time was long enough ago that a call to TriggerManualSpawn() would succeed. bool CanTriggerManualSpawn() const; // [ scriptable ] /// \brief Spawns a new object, unless the minimum spawn delay has not been reached between calls to this function. /// /// Manual spawns and continuous (scheduled) spawns are independent from each other regarding minimum spawn delays. /// If this function is called in too short intervals, it is ignored and false is returned. /// Returns true, if an object was spawned. bool TriggerManualSpawn(bool bIgnoreSpawnDelay = false, const ezVec3& vLocalOffset = ezVec3::ZeroVector()); // [ scriptable ] /// \brief Unless a spawn is already scheduled, this will schedule one within the configured time frame. /// /// If continuous spawning is enabled, this will kick off the first spawn and then continue indefinitely. /// To stop continuously spawning, remove the continuous spawn flag. void ScheduleSpawn(); // [ scriptable ] void SetPrefabFile(const char* szFile); // [ property ] const char* GetPrefabFile() const; // [ property ] bool GetSpawnAtStart() const; // [ property ] void SetSpawnAtStart(bool b); // [ property ] bool GetSpawnContinuously() const; // [ property ] void SetSpawnContinuously(bool b); // [ property ] bool GetAttachAsChild() const; // [ property ] void SetAttachAsChild(bool b); // [ property ] void SetPrefab(const ezPrefabResourceHandle& hPrefab); EZ_ALWAYS_INLINE const ezPrefabResourceHandle& GetPrefab() const { return m_hPrefab; } /// The minimum delay between spawning objects. This is also enforced for manually spawning things. ezTime m_MinDelay; // [ property ] /// For scheduled spawns (continuous / at start) this is an additional random range on top of the minimum spawn delay. ezTime m_DelayRange; // [ property ] /// The spawned object's orientation may deviate by this amount around the X axis. 180° is completely random orientation. ezAngle m_MaxDeviation; // [ property ] const ezRangeView<const char*, ezUInt32> GetParameters() const; // [ property ] (exposed parameter) void SetParameter(const char* szKey, const ezVariant& value); // [ property ] (exposed parameter) void RemoveParameter(const char* szKey); // [ property ] (exposed parameter) bool GetParameter(const char* szKey, ezVariant& out_value) const; // [ property ] (exposed parameter) ezArrayMap<ezHashedString, ezVariant> m_Parameters; protected: ezBitflags<ezSpawnComponentFlags> m_SpawnFlags; virtual void DoSpawn(const ezTransform& tLocalSpawn); bool SpawnOnce(const ezVec3& vLocalOffset); void OnTriggered(ezMsgComponentInternalTrigger& msg); ezTime m_LastManualSpawn; ezPrefabResourceHandle m_hPrefab; };
1,346
3,102
// RUN: %clang_cc1 -fsyntax-only -ast-print %s | FileCheck %s namespace N { template<typename T, typename U> void f(U); template<int> void f(); } void g() { // CHECK: N::f<int>(3.14 N::f<int>(3.14); // CHECK: N::f<double> void (*fp)(int) = N::f<double>; } // (NNS qualified) DeclRefExpr. namespace DRE { template <typename T> void foo(); void test() { // CHECK: DRE::foo<int>; DRE::foo<int>; // CHECK: DRE::template foo<int>; DRE::template foo<int>; // CHECK: DRE::foo<int>(); DRE::foo<int>(); // CHECK: DRE::template foo<int>(); DRE::template foo<int>(); } } // namespace DRE // MemberExpr. namespace ME { struct S { template <typename T> void mem(); }; void test() { S s; // CHECK: s.mem<int>(); s.mem<int>(); // CHECK: s.template mem<int>(); s.template mem<int>(); } } // namespace ME // UnresolvedLookupExpr. namespace ULE { template <typename T> int foo(); template <typename T> void test() { // CHECK: ULE::foo<T>; ULE::foo<T>; // CHECK: ULE::template foo<T>; ULE::template foo<T>; } } // namespace ULE // UnresolvedMemberExpr. namespace UME { struct S { template <typename T> void mem(); }; template <typename U> void test() { S s; // CHECK: s.mem<U>(); s.mem<U>(); // CHECK: s.template mem<U>(); s.template mem<U>(); } } // namespace UME // DependentScopeDeclRefExpr. namespace DSDRE { template <typename T> struct S; template <typename T> void test() { // CHECK: S<T>::foo; S<T>::foo; // CHECK: S<T>::template foo; S<T>::template foo; // CHECK: S<T>::template foo<>; S<T>::template foo<>; // CHECK: S<T>::template foo<T>; S<T>::template foo<T>; } } // namespace DSDRE // DependentScopeMemberExpr. namespace DSME { template <typename T> struct S; template <typename T> void test() { S<T> s; // CHECK: s.foo; s.foo; // CHECK: s.template foo; s.template foo; // CHECK: s.template foo<>; s.template foo<>; // CHECK: s.template foo<T>; s.template foo<T>; } } // namespace DSME namespace DSDRE_withImplicitTemplateArgs { template <typename T> void foo() { // CHECK: T::template bar(); T::template bar(); } } // namespace DSDRE_withImplicitTemplateArgs
946
578
<filename>app/connector/sql/src/test/java/io/syndesis/connector/sql/SqlConnectorBatchUpdateTest.java /* * Copyright (C) 2016 Red Hat, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.syndesis.connector.sql; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import io.syndesis.common.model.integration.Step; import io.syndesis.connector.sql.common.DbEnum; import io.syndesis.connector.sql.common.JSONBeanUtil; import io.syndesis.connector.sql.common.SqlTest; import io.syndesis.connector.sql.common.SqlTest.ConnectionInfo; import io.syndesis.connector.sql.common.SqlTest.Setup; import io.syndesis.connector.sql.common.SqlTest.Teardown; import io.syndesis.connector.sql.common.SqlTest.Variant; import io.syndesis.connector.sql.util.SqlConnectorTestSupport; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @ExtendWith(SqlTest.class) @Setup(variants = { @Variant(type = DbEnum.POSTGRESQL, value = "CREATE TABLE ADDRESS (ID SERIAL PRIMARY KEY, street VARCHAR(255), nummer INTEGER)"), @Variant(type = DbEnum.MYSQL, value = "CREATE TABLE ADDRESS (ID INT NOT NULL AUTO_INCREMENT PRIMARY KEY, street VARCHAR(255), nummer INTEGER)"), @Variant(type = DbEnum.APACHE_DERBY, value = "CREATE TABLE ADDRESS (ID INTEGER NOT NULL GENERATED ALWAYS AS IDENTITY (START WITH 1, INCREMENT BY 1), street VARCHAR(255), number INTEGER)"), @Variant(type = DbEnum.STANDARD, value = "CREATE TABLE ADDRESS (ID NUMBER GENERATED ALWAYS AS IDENTITY, street VARCHAR(255), nummer INTEGER)"), }) @Teardown("DROP TABLE ADDRESS") public class SqlConnectorBatchUpdateTest extends SqlConnectorTestSupport { public SqlConnectorBatchUpdateTest(final ConnectionInfo info) { super(info); } @Override protected List<Step> createSteps() { return Arrays.asList( newSimpleEndpointStep( "direct", builder -> builder.putConfiguredProperty("name", "start")), newSqlEndpointStep( "sql-connector", builder -> builder .putConfiguredProperty("batch", "true") .putConfiguredProperty("query", "INSERT INTO ADDRESS (street, number) VALUES (:#street, :#number)")), newSimpleEndpointStep( "log", builder -> builder.putConfiguredProperty("loggerName", "test"))); } @Test public void sqlConnectorBatchUpdateTest() { final List<Map<String, Object>> parameters = new ArrayList<>(); Map<String, Object> first = new HashMap<>(); first.put("number", 14); first.put("street", "LaborInVain"); Map<String, Object> second = new HashMap<>(); second.put("number", 15); second.put("street", "Werner-von-Siemens-Ring"); Map<String, Object> third = new HashMap<>(); third.put("number", 75); third.put("street", "Am Treptower Park"); parameters.add(first); parameters.add(second); parameters.add(third); List<String> body = new ArrayList<>(); for (Map<String, Object> paramMap : parameters) { body.add(JSONBeanUtil.toJSONBean(paramMap)); } @SuppressWarnings("unchecked") List<String> jsonBeans = template().requestBody("direct:start", body, List.class); Assertions.assertFalse(jsonBeans.isEmpty()); validateJson(jsonBeans, "ID", "3"); } }
1,592
679
/************************************************************** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * *************************************************************/ #ifndef _SVX_SIDEBAR_SELECTION_CHANGE_HANDLER_HXX_ #define _SVX_SIDEBAR_SELECTION_CHANGE_HANDLER_HXX_ #include "svx/svxdllapi.h" #include <sfx2/sidebar/EnumContext.hxx> #include <com/sun/star/frame/XController.hpp> #include <com/sun/star/view/XSelectionSupplier.hpp> #include <com/sun/star/view/XSelectionChangeListener.hpp> #include <cppuhelper/compbase1.hxx> #include <cppuhelper/basemutex.hxx> #include <boost/noncopyable.hpp> #include <boost/function.hpp> namespace css = ::com::sun::star; namespace cssu = ::com::sun::star::uno; class SdrMarkView; namespace svx { namespace sidebar { namespace { typedef ::cppu::WeakComponentImplHelper1 < css::view::XSelectionChangeListener > SelectionChangeHandlerInterfaceBase; } class SVX_DLLPUBLIC SelectionChangeHandler : private ::boost::noncopyable, private ::cppu::BaseMutex, public SelectionChangeHandlerInterfaceBase { public: SelectionChangeHandler ( const boost::function<rtl::OUString(void)>& rSelectionChangeCallback, const cssu::Reference<css::frame::XController>& rxController, const sfx2::sidebar::EnumContext::Context eDefaultContext); virtual ~SelectionChangeHandler (void); virtual void SAL_CALL selectionChanged (const css::lang::EventObject& rEvent) throw (cssu::RuntimeException); virtual void SAL_CALL disposing (const css::lang::EventObject& rEvent) throw (cssu::RuntimeException); virtual void SAL_CALL disposing (void) throw (cssu::RuntimeException); void Connect (void); void Disconnect (void); private: const boost::function<rtl::OUString(void)> maSelectionChangeCallback; cssu::Reference<css::frame::XController> mxController; const sfx2::sidebar::EnumContext::Context meDefaultContext; bool mbIsConnected; }; } } // end of namespace svx::sidebar #endif
932
2,151
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CONTENT_BROWSER_INDEXED_DB_LEVELDB_LEVELDB_ENV_H_ #define CONTENT_BROWSER_INDEXED_DB_LEVELDB_LEVELDB_ENV_H_ #include "base/lazy_instance.h" #include "content/common/content_export.h" #include "third_party/leveldatabase/env_chromium.h" namespace content { // The leveldb::Env used by the Indexed DB backend. class LevelDBEnv : public leveldb_env::ChromiumEnv { LevelDBEnv(); public: friend struct base::LazyInstanceTraitsBase<LevelDBEnv>; CONTENT_EXPORT static LevelDBEnv* Get(); }; } // namespace content #endif // CONTENT_BROWSER_INDEXED_DB_LEVELDB_LEVELDB_ENV_H_
280
3,055
/* Fontname: open_iconic_embedded_1x Copyright: https://github.com/iconic/open-iconic, SIL OPEN FONT LICENSE Glyphs: 17/17 BBX Build Mode: 3 */ const uint8_t u8x8_font_open_iconic_embedded_1x1[140] U8X8_FONT_SECTION("u8x8_font_open_iconic_embedded_1x1") = "@P\1\1~BBBBB~\30 \60>\277\277>\60 \30~~\347\347~~\30\0\0dg" "=\4\0\0\0|~\37\37~|\0\300\340p\70\30\0\2\0\20\60<\16\70\30\20\20\300\370\376\247" "\376\360\300\0\300\340p>\37\30\30\10~~~~~~~\30\0$<\333Z$\0\0<~\377\377" "\201\201B<\357\357\357\357\357\257\357\0\0\12*\277\65\24\0\0\70D\200\217\200D\70\0<B\201\201" "\201\205\6\6\2\1\11\311\311\1\2";
355
319
package eu.f3rog.blade.compiler.parcel.p; import com.squareup.javapoet.ClassName; import com.squareup.javapoet.TypeName; import java.io.Serializable; /** * Class {@link SerializableClassParceler} * * @author FrantisekGazo */ final class SerializableClassParceler implements BaseParceler { @Override public TypeName type() { return ClassName.get(Serializable.class); } @Override public CallFormat writeCall() { return new CallFormat("%s.writeSerializable(%s)", CallFormat.Arg.PARCEL, CallFormat.Arg.TARGET_GETTER); } @Override public CallFormat readCall() { return new CallFormat("(%s) %s.readSerializable()", CallFormat.Arg.TYPE, CallFormat.Arg.PARCEL); } }
272
304
<reponame>fduguet-nv/cunumeric # Copyright 2022 NVIDIA Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import annotations from dataclasses import dataclass from types import ModuleType from ..coverage import is_implemented, is_multi, is_single from ._comparison_config import MISSING_NP_REFS, SKIP YES = "\u2713" NO = "\u274C" @dataclass(frozen=True) class ItemDetail: name: str implemented: bool np_ref: str lg_ref: str single: str multi: str @dataclass(frozen=True) class SectionDetail: title: str np_count: int lg_count: int items: list[ItemDetail] def _npref(name, obj): if isinstance(obj, ModuleType): full_name = f"{obj.__name__}.{name}" else: full_name = f"numpy.{obj.__name__}.{name}" role = "meth" if "ndarray" in full_name else "obj" if full_name in MISSING_NP_REFS: return f"``{full_name}``" return f":{role}:`{full_name}`" def _lgref(name, obj, implemented): if not implemented: return "-" if isinstance(obj, ModuleType): full_name = f"{obj.__name__}.{name}" else: full_name = f"cunumeric.{obj.__name__}.{name}" role = "meth" if "ndarray" in full_name else "obj" return f":{role}:`{full_name}`" def filter_names(obj, types=None, skip=()): names = (n for n in dir(obj)) # every name in the module or class names = (n for n in names if n not in skip) # except the ones we skip names = (n for n in names if not n.startswith("_")) # or any private names if types: # optionally filtered by type names = (n for n in names if isinstance(getattr(obj, n), types)) return names def get_item(name, np_obj, lg_obj): lg_attr = getattr(lg_obj, name) if implemented := is_implemented(lg_attr): single = YES if is_single(lg_attr) else NO multi = YES if is_multi(lg_attr) else NO else: single = multi = "" return ItemDetail( name=name, implemented=implemented, np_ref=_npref(name, np_obj), lg_ref=_lgref(name, lg_obj, implemented), single=single, multi=multi, ) def get_namespaces(attr): import numpy import cunumeric if attr is None: return numpy, cunumeric return getattr(numpy, attr), getattr(cunumeric, attr) def generate_section(config): np_obj, lg_obj = get_namespaces(config.attr) if config.names: names = config.names else: names = filter_names(np_obj, config.types, skip=SKIP) items = [get_item(name, np_obj, lg_obj) for name in names] return SectionDetail( title=config.title, np_count=len(items), lg_count=len([item for item in items if item.implemented]), items=sorted(items, key=lambda x: x.name), )
1,335
736
<gh_stars>100-1000 #include "StdAfx.h" #include "Outlook365RestTask.h" #include "Common/FSecure/Crypto/Base64.h" #include "Common/FSecure/CppTools/ScopeGuard.h" #include "Common/json/json.hpp" #include "Common/FSecure/CppTools/StringConversions.h" #include "Common/FSecure/WinHttp/HttpClient.h" #include "Common/FSecure/WinHttp/Constants.h" #include "Common/FSecure/WinHttp/Uri.h" // Namespaces using json = nlohmann::json; using base64 = cppcodec::base64_rfc4648; using namespace FSecure::StringConversions; using namespace FSecure::WinHttp; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// FSecure::Crypto::String FSecure::C3::Interfaces::Channels::Outlook365RestTask::ItemEndpoint = OBF("https://outlook.office.com/api/v2.0/me/tasks/"); FSecure::Crypto::String FSecure::C3::Interfaces::Channels::Outlook365RestTask::ListEndpoint = OBF("https://outlook.office.com/api/v2.0/me/tasks"); FSecure::Crypto::String FSecure::C3::Interfaces::Channels::Outlook365RestTask::TokenEndpoint = OBF("https://login.windows.net/organizations/oauth2/v2.0/token/"); FSecure::Crypto::String FSecure::C3::Interfaces::Channels::Outlook365RestTask::Scope = OBF("https://outlook.office365.com/.default"); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// FSecure::C3::Interfaces::Channels::Outlook365RestTask::Outlook365RestTask(ByteView arguments) : Office365<Outlook365RestTask>(arguments) { } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// size_t FSecure::C3::Interfaces::Channels::Outlook365RestTask::OnSendToChannel(ByteView data) { RateLimitDelay(m_MinUpdateDelay, m_MaxUpdateDelay); try { // Construct the HTTP request auto webClient = HttpClient{ Convert<Utf16>(ItemEndpoint.Decrypt()), m_ProxyConfig }; auto request = CreateAuthRequest(Method::POST); auto chunkSize = std::min<size_t>(data.size(), base64::decoded_max_size(4 * 1024 * 1024) ); // Send max 4 MB. auto fileData = json(); fileData[OBF("Subject")] = m_OutboundDirectionName; fileData[OBF("Body")][OBF("Content")] = base64::encode(&data.front(), chunkSize); fileData[OBF("Body")][OBF("ContentType")] = OBF("Text"); auto body = fileData.dump(); request.SetData(ContentType::ApplicationJson, { body.begin(), body.end() }); EvaluateResponse(webClient.Request(request)); return chunkSize; } catch (std::exception & exception) { Log({ OBF_SEC("Caught a std::exception when running OnSend(): ") + exception.what(), LogMessage::Severity::Error }); return 0u; } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// std::vector<FSecure::ByteVector> FSecure::C3::Interfaces::Channels::Outlook365RestTask::OnReceiveFromChannel() { RateLimitDelay(m_MinUpdateDelay, m_MaxUpdateDelay); auto packets = std::vector<ByteVector>{}; try { auto fileList = ListData(OBF("?top=1000&filter=startswith(Subject,'") + m_InboundDirectionName + OBF("')&orderby=CreatedDateTime")); for (auto& element : fileList.at(OBF("value"))) packets.push_back(base64::decode<ByteVector, std::string>(element.at(OBF("Body")).at(OBF("Content")))); for (auto& element : fileList.at(OBF("value"))) RemoveItem(element.at(OBF("Id"))); } catch (std::exception& exception) { Log({ OBF_SEC("Caught a std::exception when running OnReceive(): ") + exception.what(), LogMessage::Severity::Warning }); } return packets; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// FSecure::ByteVector FSecure::C3::Interfaces::Channels::Outlook365RestTask::OnRunCommand(ByteView command) { auto commandCopy = command; // Each read moves ByteView. CommandCoppy is needed for default. switch (command.Read<uint16_t>()) { case 0: try { RemoveAllItems(); } catch (std::exception const& e) { Log({ OBF_SEC("Caught a std::exception when running RemoveAllFiles(): ") + e.what(), LogMessage::Severity::Error }); } return {}; default: return AbstractChannel::OnRunCommand(commandCopy); } }
1,353
4,518
from socket import * import random import time serverSocket = socket(AF_INET, SOCK_DGRAM) serverPort=800 serverSocket.bind(('',serverPort)) start_time=float(time.time()) end_time=start_time while True: try: serverSocket.settimeout(0.1) message, address = serverSocket.recvfrom(1024) message=message.decode() rtime=float(message.split()[1]) end_time=rtime Ping=float(time.time())-rtime print(str(message.split()[0])+':',Ping) except Exception as e: if end_time==start_time: continue if time.time()-end_time>=1.0: print('Heartbeat pause') break else: print('Packet lost') ''' 1: 0.0010023117065429688 2: 0.0009434223175048828 3: 0.0009434223175048828 4: 0.0029366016387939453 5: 0.0029366016387939453 6: 0.004778385162353516 7: 0.004778385162353516 8: 0.00577998161315918 9: 0.00577998161315918 10: 0.006776571273803711 Packet lost Packet lost Packet lost Packet lost Packet lost Packet lost Packet lost Packet lost Packet lost Heartbeat pause '''
498
482
<gh_stars>100-1000 {"ast":null,"code":"var g; // This works in non-strict mode\n\ng = function () {\n return this;\n}();\n\ntry {\n // This works if eval is allowed (see CSP)\n g = g || new Function(\"return this\")();\n} catch (e) {\n // This works if the window reference is available\n if (typeof window === \"object\") g = window;\n} // g can still be undefined, but nothing to do about it...\n// We return undefined, instead of nothing here, so it's\n// easier to handle this case. if(!global) { ...}\n\n\nmodule.exports = g;","map":null,"metadata":{},"sourceType":"script"}
191
1,380
# -*- coding: utf-8 -*- """ Defines the unit tests for the :mod:`colour.io.image` module. """ import numpy as np import os import platform import shutil import unittest import tempfile from colour.io import convert_bit_depth from colour.io import read_image_OpenImageIO, write_image_OpenImageIO from colour.io import read_image_Imageio, write_image_Imageio from colour.io import read_image, write_image from colour.io import ImageAttribute_Specification from colour.utilities import is_openimageio_installed __author__ = 'Colour Developers' __copyright__ = 'Copyright (C) 2013-2021 - Colour Developers' __license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause' __maintainer__ = 'Colour Developers' __email__ = '<EMAIL>' __status__ = 'Production' __all__ = [ 'RESOURCES_DIRECTORY', 'TestReadImageOpenImageIO', 'TestWriteImageOpenImageIO', 'TestReadImageImageio', 'TestWriteImageImageio', 'TestReadImage', 'TestWriteImage' ] RESOURCES_DIRECTORY = os.path.join(os.path.dirname(__file__), 'resources') class TestConvertBitDepth(unittest.TestCase): """ Defines :func:`colour.io.image.convert_bit_depth` definition unit tests methods. """ def test_convert_bit_depth(self): """ Tests :func:`colour.io.image.convert_bit_depth` definition. """ a = np.around(np.linspace(0, 1, 10) * 255).astype('uint8') self.assertIs(convert_bit_depth(a, 'uint8').dtype, np.dtype('uint8')) np.testing.assert_equal(convert_bit_depth(a, 'uint8'), a) self.assertIs(convert_bit_depth(a, 'uint16').dtype, np.dtype('uint16')) np.testing.assert_equal( convert_bit_depth(a, 'uint16'), np.array([ 0, 7196, 14649, 21845, 29041, 36494, 43690, 50886, 58339, 65535 ])) self.assertIs( convert_bit_depth(a, 'float16').dtype, np.dtype('float16')) np.testing.assert_almost_equal( convert_bit_depth(a, 'float16'), np.array([ 0.0000, 0.1098, 0.2235, 0.3333, 0.443, 0.5566, 0.6665, 0.7764, 0.8900, 1.0000 ]), decimal=3) self.assertIs( convert_bit_depth(a, 'float32').dtype, np.dtype('float32')) np.testing.assert_almost_equal( convert_bit_depth(a, 'float32'), np.array([ 0.00000000, 0.10980392, 0.22352941, 0.33333334, 0.44313726, 0.55686277, 0.66666669, 0.77647060, 0.89019608, 1.00000000 ]), decimal=7) self.assertIs( convert_bit_depth(a, 'float64').dtype, np.dtype('float64')) if platform.system() not in ('Windows', 'Microsoft'): # pragma: no cover self.assertIs( convert_bit_depth(a, 'float128').dtype, np.dtype('float128')) a = np.around(np.linspace(0, 1, 10) * 65535).astype('uint16') self.assertIs(convert_bit_depth(a, 'uint8').dtype, np.dtype('uint8')) np.testing.assert_equal( convert_bit_depth(a, 'uint8'), np.array([0, 28, 56, 85, 113, 141, 170, 198, 226, 255])) self.assertIs(convert_bit_depth(a, 'uint16').dtype, np.dtype('uint16')) np.testing.assert_equal(convert_bit_depth(a, 'uint16'), a) self.assertIs( convert_bit_depth(a, 'float16').dtype, np.dtype('float16')) np.testing.assert_almost_equal( convert_bit_depth(a, 'float16'), np.array([ 0.0000, 0.1098, 0.2235, 0.3333, 0.443, 0.5566, 0.6665, 0.7764, 0.8900, 1.0000 ]), decimal=3) self.assertIs( convert_bit_depth(a, 'float32').dtype, np.dtype('float32')) np.testing.assert_almost_equal( convert_bit_depth(a, 'float32'), np.array([ 0.00000000, 0.11111620, 0.22221714, 0.33333334, 0.44444954, 0.55555046, 0.66666669, 0.77778286, 0.88888383, 1.00000000 ]), decimal=7) self.assertIs( convert_bit_depth(a, 'float64').dtype, np.dtype('float64')) if platform.system() not in ('Windows', 'Microsoft'): # pragma: no cover self.assertIs( convert_bit_depth(a, 'float128').dtype, np.dtype('float128')) a = np.linspace(0, 1, 10, dtype=np.float64) self.assertIs(convert_bit_depth(a, 'uint8').dtype, np.dtype('uint8')) np.testing.assert_equal( convert_bit_depth(a, 'uint8'), np.array([0, 28, 57, 85, 113, 142, 170, 198, 227, 255])) self.assertIs(convert_bit_depth(a, 'uint16').dtype, np.dtype('uint16')) np.testing.assert_equal( convert_bit_depth(a, 'uint16'), np.array([ 0, 7282, 14563, 21845, 29127, 36408, 43690, 50972, 58253, 65535 ])) self.assertIs( convert_bit_depth(a, 'float16').dtype, np.dtype('float16')) np.testing.assert_almost_equal( convert_bit_depth(a, 'float16'), np.array([ 0.0000, 0.1111, 0.2222, 0.3333, 0.4443, 0.5557, 0.6665, 0.7780, 0.8887, 1.0000 ]), decimal=3) self.assertIs( convert_bit_depth(a, 'float32').dtype, np.dtype('float32')) np.testing.assert_almost_equal( convert_bit_depth(a, 'float32'), a, decimal=7) self.assertIs( convert_bit_depth(a, 'float64').dtype, np.dtype('float64')) if platform.system() not in ('Windows', 'Microsoft'): # pragma: no cover self.assertIs( convert_bit_depth(a, 'float128').dtype, np.dtype('float128')) class TestReadImageOpenImageIO(unittest.TestCase): """ Defines :func:`colour.io.image.read_image_OpenImageIO` definition unit tests methods. """ def test_read_image_OpenImageIO(self): # pragma: no cover """ Tests :func:`colour.io.image.read_image_OpenImageIO` definition. """ if not is_openimageio_installed(): return image = read_image_OpenImageIO( os.path.join(RESOURCES_DIRECTORY, 'CMS_Test_Pattern.exr')) self.assertTupleEqual(image.shape, (1267, 1274, 3)) self.assertIs(image.dtype, np.dtype('float32')) image = read_image_OpenImageIO( os.path.join(RESOURCES_DIRECTORY, 'CMS_Test_Pattern.exr'), 'float16') self.assertIs(image.dtype, np.dtype('float16')) image, attributes = read_image_OpenImageIO( os.path.join(RESOURCES_DIRECTORY, 'CMS_Test_Pattern.exr'), attributes=True) self.assertTupleEqual(image.shape, (1267, 1274, 3)) self.assertEqual(attributes[0].name, 'oiio:ColorSpace') self.assertEqual(attributes[0].value, 'Linear') image = read_image_OpenImageIO( os.path.join(RESOURCES_DIRECTORY, 'Single_Channel.exr')) self.assertTupleEqual(image.shape, (256, 256)) image = read_image_OpenImageIO( os.path.join(RESOURCES_DIRECTORY, 'Colour_Logo.png'), 'uint8') self.assertTupleEqual(image.shape, (128, 256, 4)) self.assertIs(image.dtype, np.dtype('uint8')) self.assertEqual(np.min(image), 0) self.assertEqual(np.max(image), 255) image = read_image_OpenImageIO( os.path.join(RESOURCES_DIRECTORY, 'Colour_Logo.png'), 'uint16') self.assertTupleEqual(image.shape, (128, 256, 4)) self.assertIs(image.dtype, np.dtype('uint16')) self.assertEqual(np.min(image), 0) self.assertEqual(np.max(image), 65535) # TODO: Investigate "OIIO" behaviour here: 1.0 != 15360.0 # image = read_image_OpenImageIO( # os.path.join(RESOURCES_DIRECTORY, 'Colour_Logo.png'), 'float16') # self.assertIs(image.dtype, np.dtype('float16')) # self.assertEqual(np.min(image), 0.0) # self.assertEqual(np.max(image), 1.0) image = read_image_OpenImageIO( os.path.join(RESOURCES_DIRECTORY, 'Colour_Logo.png'), 'float32') self.assertIs(image.dtype, np.dtype('float32')) self.assertEqual(np.min(image), 0.0) self.assertEqual(np.max(image), 1.0) class TestWriteImageOpenImageIO(unittest.TestCase): """ Defines :func:`colour.io.image.write_image_OpenImageIO` definition unit tests methods. """ def setUp(self): """ Initialises common tests attributes. """ self._temporary_directory = tempfile.mkdtemp() def tearDown(self): """ After tests actions. """ shutil.rmtree(self._temporary_directory) def test_write_image_OpenImageIO(self): # pragma: no cover """ Tests :func:`colour.io.image.write_image_OpenImageIO` definition. """ if not is_openimageio_installed(): return from OpenImageIO import TypeDesc source_image_path = os.path.join(RESOURCES_DIRECTORY, 'Overflowing_Gradient.png') target_image_path = os.path.join(self._temporary_directory, 'Overflowing_Gradient.png') RGB = np.arange(0, 256, 1, dtype=np.uint8)[np.newaxis] * 2 write_image_OpenImageIO(RGB, target_image_path, bit_depth='uint8') image = read_image_OpenImageIO(source_image_path, bit_depth='uint8') np.testing.assert_equal(np.squeeze(RGB), image) source_image_path = os.path.join(RESOURCES_DIRECTORY, 'CMS_Test_Pattern.exr') target_image_path = os.path.join(self._temporary_directory, 'CMS_Test_Pattern.exr') image = read_image_OpenImageIO(source_image_path) write_image_OpenImageIO(image, target_image_path) image = read_image_OpenImageIO(target_image_path) self.assertTupleEqual(image.shape, (1267, 1274, 3)) self.assertIs(image.dtype, np.dtype('float32')) chromaticities = ( 0.73470, 0.26530, 0.00000, 1.00000, 0.00010, -0.07700, 0.32168, 0.33767, ) write_attributes = [ ImageAttribute_Specification('acesImageContainerFlag', True), ImageAttribute_Specification('chromaticities', chromaticities, TypeDesc('float[8]')), ImageAttribute_Specification('compression', 'none') ] write_image_OpenImageIO( image, target_image_path, attributes=write_attributes) image, read_attributes = read_image_OpenImageIO( target_image_path, attributes=True) for write_attribute in write_attributes: attribute_exists = False for read_attribute in read_attributes: if write_attribute.name == read_attribute.name: attribute_exists = True if isinstance(write_attribute.value, tuple): np.testing.assert_almost_equal( write_attribute.value, read_attribute.value, decimal=5) else: self.assertEqual(write_attribute.value, read_attribute.value) assert attribute_exists, ( '"{0}" attribute was not found on image!'.format( write_attribute.name)) class TestReadImageImageio(unittest.TestCase): """ Defines :func:`colour.io.image.read_image_Imageio` definition unit tests methods. """ def test_read_image_Imageio(self): """ Tests :func:`colour.io.image.read_image_Imageio` definition. """ image = read_image_Imageio( os.path.join(RESOURCES_DIRECTORY, 'CMS_Test_Pattern.exr')) self.assertTupleEqual(image.shape, (1267, 1274, 3)) self.assertIs(image.dtype, np.dtype('float32')) image = read_image_Imageio( os.path.join(RESOURCES_DIRECTORY, 'CMS_Test_Pattern.exr'), 'float16') self.assertTupleEqual(image.shape, (1267, 1274, 3)) self.assertIs(image.dtype, np.dtype('float16')) image = read_image_Imageio( os.path.join(RESOURCES_DIRECTORY, 'Single_Channel.exr')) self.assertTupleEqual(image.shape, (256, 256)) image = read_image_Imageio( os.path.join(RESOURCES_DIRECTORY, 'Colour_Logo.png'), 'uint8') self.assertTupleEqual(image.shape, (128, 256, 4)) self.assertIs(image.dtype, np.dtype('uint8')) self.assertEqual(np.min(image), 0) self.assertEqual(np.max(image), 255) image = read_image_Imageio( os.path.join(RESOURCES_DIRECTORY, 'Colour_Logo.png'), 'uint16') self.assertTupleEqual(image.shape, (128, 256, 4)) self.assertIs(image.dtype, np.dtype('uint16')) self.assertEqual(np.min(image), 0) self.assertEqual(np.max(image), 65535) image = read_image_Imageio( os.path.join(RESOURCES_DIRECTORY, 'Colour_Logo.png'), 'float16') self.assertIs(image.dtype, np.dtype('float16')) self.assertEqual(np.min(image), 0.0) self.assertEqual(np.max(image), 1.0) image = read_image_Imageio( os.path.join(RESOURCES_DIRECTORY, 'Colour_Logo.png'), 'float32') self.assertIs(image.dtype, np.dtype('float32')) self.assertEqual(np.min(image), 0.0) self.assertEqual(np.max(image), 1.0) class TestWriteImageImageio(unittest.TestCase): """ Defines :func:`colour.io.image.write_image_Imageio` definition unit tests methods. """ def setUp(self): """ Initialises common tests attributes. """ self._temporary_directory = tempfile.mkdtemp() def tearDown(self): """ After tests actions. """ shutil.rmtree(self._temporary_directory) def test_write_image_Imageio(self): """ Tests :func:`colour.io.image.write_image_Imageio` definition. """ source_image_path = os.path.join(RESOURCES_DIRECTORY, 'Overflowing_Gradient.png') target_image_path = os.path.join(self._temporary_directory, 'Overflowing_Gradient.png') RGB = np.arange(0, 256, 1, dtype=np.uint8)[np.newaxis] * 2 write_image_Imageio(RGB, target_image_path, bit_depth='uint8') image = read_image_Imageio(source_image_path, bit_depth='uint8') np.testing.assert_equal(np.squeeze(RGB), image) source_image_path = os.path.join(RESOURCES_DIRECTORY, 'CMS_Test_Pattern.exr') target_image_path = os.path.join(self._temporary_directory, 'CMS_Test_Pattern.exr') image = read_image_Imageio(source_image_path) write_image_Imageio(image, target_image_path) image = read_image_Imageio(target_image_path) self.assertTupleEqual(image.shape, (1267, 1274, 3)) self.assertIs(image.dtype, np.dtype('float32')) class TestReadImage(unittest.TestCase): """ Defines :func:`colour.io.image.read_image` definition unit tests methods. """ def test_read_image(self): """ Tests :func:`colour.io.image.read_image` definition. """ image = read_image( os.path.join(RESOURCES_DIRECTORY, 'CMS_Test_Pattern.exr')) self.assertTupleEqual(image.shape, (1267, 1274, 3)) self.assertIs(image.dtype, np.dtype('float32')) image = read_image( os.path.join(RESOURCES_DIRECTORY, 'Single_Channel.exr')) self.assertTupleEqual(image.shape, (256, 256)) class TestWriteImage(unittest.TestCase): """ Defines :func:`colour.io.image.write_image` definition unit tests methods. """ def setUp(self): """ Initialises common tests attributes. """ self._temporary_directory = tempfile.mkdtemp() def tearDown(self): """ After tests actions. """ shutil.rmtree(self._temporary_directory) def test_write_image(self): """ Tests :func:`colour.io.image.write_image` definition. """ source_image_path = os.path.join(RESOURCES_DIRECTORY, 'CMS_Test_Pattern.exr') target_image_path = os.path.join(self._temporary_directory, 'CMS_Test_Pattern.exr') image = read_image(source_image_path) write_image(image, target_image_path) image = read_image(target_image_path) self.assertTupleEqual(image.shape, (1267, 1274, 3)) self.assertIs(image.dtype, np.dtype('float32')) if __name__ == '__main__': unittest.main()
8,539
940
/* * UAE - The Un*x Amiga Emulator * * MC68000 emulation - machine dependent bits * * Copyright 1996 <NAME> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef M68K_FLAGS_H #define M68K_FLAGS_H #ifdef OPTIMIZED_FLAGS #if defined(X86_ASSEMBLY) || defined(X86_64_ASSEMBLY) || defined(MSVC_INTRINSICS) #ifndef SAHF_SETO_PROFITABLE /* PUSH/POP instructions are naturally 64-bit sized on x86-64, thus unsigned long hereunder is either 64-bit or 32-bit wide depending on the target. */ struct flag_struct { unsigned long cznv; unsigned long x; }; #define FLAGVAL_Z 0x40 #define FLAGVAL_N 0x80 #define SET_ZFLG(y) (regflags.cznv = (((uae_u32)regflags.cznv) & ~0x40) | (((y) & 1) << 6)) #define SET_CFLG(y) (regflags.cznv = (((uae_u32)regflags.cznv) & ~1) | ((y) & 1)) #define SET_VFLG(y) (regflags.cznv = (((uae_u32)regflags.cznv) & ~0x800) | (((y) & 1) << 11)) #define SET_NFLG(y) (regflags.cznv = (((uae_u32)regflags.cznv) & ~0x80) | (((y) & 1) << 7)) #define SET_XFLG(y) (regflags.x = (y)) #define GET_ZFLG ((regflags.cznv >> 6) & 1) #define GET_CFLG (regflags.cznv & 1) #define GET_VFLG ((regflags.cznv >> 11) & 1) #define GET_NFLG ((regflags.cznv >> 7) & 1) #define GET_XFLG (regflags.x & 1) #define CLEAR_CZNV (regflags.cznv = 0) #define GET_CZNV (regflags.cznv) #define IOR_CZNV(X) (regflags.cznv |= (X)) #define SET_CZNV(X) (regflags.cznv = (X)) #define COPY_CARRY (regflags.x = regflags.cznv) extern struct flag_struct regflags ASM_SYM ("regflags"); static __inline__ int cctrue(int cc) { uae_u32 cznv = regflags.cznv; switch(cc){ case 0: return 1; /* T */ case 1: return 0; /* F */ case 2: return (cznv & 0x41) == 0; /* !GET_CFLG && !GET_ZFLG; HI */ case 3: return (cznv & 0x41) != 0; /* GET_CFLG || GET_ZFLG; LS */ case 4: return (cznv & 1) == 0; /* !GET_CFLG; CC */ case 5: return (cznv & 1) != 0; /* GET_CFLG; CS */ case 6: return (cznv & 0x40) == 0; /* !GET_ZFLG; NE */ case 7: return (cznv & 0x40) != 0; /* GET_ZFLG; EQ */ case 8: return (cznv & 0x800) == 0;/* !GET_VFLG; VC */ case 9: return (cznv & 0x800) != 0;/* GET_VFLG; VS */ case 10:return (cznv & 0x80) == 0; /* !GET_NFLG; PL */ case 11:return (cznv & 0x80) != 0; /* GET_NFLG; MI */ case 12:return (((cznv << 4) ^ cznv) & 0x800) == 0; /* GET_NFLG == GET_VFLG; GE */ case 13:return (((cznv << 4) ^ cznv) & 0x800) != 0;/* GET_NFLG != GET_VFLG; LT */ case 14: cznv &= 0x8c0; return (((cznv << 4) ^ cznv) & 0x840) == 0; /* !GET_ZFLG && (GET_NFLG == GET_VFLG); GT */ case 15: cznv &= 0x8c0; return (((cznv << 4) ^ cznv) & 0x840) != 0; /* GET_ZFLG || (GET_NFLG != GET_VFLG); LE */ } return 0; } #define optflag_testl(v) \ __asm__ __volatile__ ("andl %1,%1\n\t" \ "pushf\n\t" \ "pop %0\n\t" \ : "=r" (regflags.cznv) : "r" (v) : "cc") #define optflag_testw(v) \ __asm__ __volatile__ ("andw %w1,%w1\n\t" \ "pushf\n\t" \ "pop %0\n\t" \ : "=r" (regflags.cznv) : "r" (v) : "cc") #define optflag_testb(v) \ __asm__ __volatile__ ("andb %b1,%b1\n\t" \ "pushf\n\t" \ "pop %0\n\t" \ : "=r" (regflags.cznv) : "q" (v) : "cc") #define optflag_addl(v, s, d) do { \ __asm__ __volatile__ ("addl %k2,%k1\n\t" \ "pushf\n\t" \ "pop %0\n\t" \ : "=r" (regflags.cznv), "=r" (v) : "rmi" (s), "1" (d) : "cc"); \ COPY_CARRY; \ } while (0) #define optflag_addw(v, s, d) do { \ __asm__ __volatile__ ("addw %w2,%w1\n\t" \ "pushf\n\t" \ "pop %0\n\t" \ : "=r" (regflags.cznv), "=r" (v) : "rmi" (s), "1" (d) : "cc"); \ COPY_CARRY; \ } while (0) #define optflag_addb(v, s, d) do { \ __asm__ __volatile__ ("addb %b2,%b1\n\t" \ "pushf\n\t" \ "pop %0\n\t" \ : "=r" (regflags.cznv), "=q" (v) : "qmi" (s), "1" (d) : "cc"); \ COPY_CARRY; \ } while (0) #define optflag_subl(v, s, d) do { \ __asm__ __volatile__ ("subl %k2,%k1\n\t" \ "pushf\n\t" \ "pop %0\n\t" \ : "=r" (regflags.cznv), "=r" (v) : "rmi" (s), "1" (d) : "cc"); \ COPY_CARRY; \ } while (0) #define optflag_subw(v, s, d) do { \ __asm__ __volatile__ ("subw %w2,%w1\n\t" \ "pushf\n\t" \ "pop %0\n\t" \ : "=r" (regflags.cznv), "=r" (v) : "rmi" (s), "1" (d) : "cc"); \ COPY_CARRY; \ } while (0) #define optflag_subb(v, s, d) do { \ __asm__ __volatile__ ("subb %b2,%b1\n\t" \ "pushf\n\t" \ "pop %0\n\t" \ : "=r" (regflags.cznv), "=q" (v) : "qmi" (s), "1" (d) : "cc"); \ COPY_CARRY; \ } while (0) #define optflag_cmpl(s, d) \ __asm__ __volatile__ ("cmpl %k1,%k2\n\t" \ "pushf\n\t" \ "pop %0\n\t" \ : "=r" (regflags.cznv) : "rmi" (s), "r" (d) : "cc") #define optflag_cmpw(s, d) \ __asm__ __volatile__ ("cmpw %w1,%w2\n\t" \ "pushf\n\t" \ "pop %0\n\t" \ : "=r" (regflags.cznv) : "rmi" (s), "r" (d) : "cc") #define optflag_cmpb(s, d) \ __asm__ __volatile__ ("cmpb %b1,%b2\n\t" \ "pushf\n\t" \ "pop %0\n\t" \ : "=r" (regflags.cznv) : "qmi" (s), "q" (d) : "cc") #else struct flag_struct { uae_u32 cznv; uae_u32 x; }; #define FLAGVAL_Z 0x4000 #define FLAGVAL_N 0x8000 #define SET_ZFLG(y) (regflags.cznv = (regflags.cznv & ~0x4000) | (((y) & 1) << 14)) #define SET_CFLG(y) (regflags.cznv = (regflags.cznv & ~0x100) | (((y) & 1) << 8)) #define SET_VFLG(y) (regflags.cznv = (regflags.cznv & ~0x1) | (((y) & 1))) #define SET_NFLG(y) (regflags.cznv = (regflags.cznv & ~0x8000) | (((y) & 1) << 15)) #define SET_XFLG(y) (regflags.x = (y)) #define GET_ZFLG ((regflags.cznv >> 14) & 1) #define GET_CFLG ((regflags.cznv >> 8) & 1) #define GET_VFLG ((regflags.cznv >> 0) & 1) #define GET_NFLG ((regflags.cznv >> 15) & 1) #define GET_XFLG (regflags.x & 1) #define CLEAR_CZNV (regflags.cznv = 0) #define GET_CZNV (regflags.cznv) #define IOR_CZNV(X) (regflags.cznv |= (X)) #define SET_CZNV(X) (regflags.cznv = (X)) #define COPY_CARRY (regflags.x = (regflags.cznv)>>8) extern struct flag_struct regflags ASM_SYM("regflags"); static __inline__ int cctrue(int cc) { uae_u32 cznv = regflags.cznv; switch(cc){ case 0: return 1; /* T */ case 1: return 0; /* F */ case 2: return (cznv & 0x4100) == 0; /* !GET_CFLG && !GET_ZFLG; HI */ case 3: return (cznv & 0x4100) != 0; /* GET_CFLG || GET_ZFLG; LS */ case 4: return (cznv & 0x100) == 0; /* !GET_CFLG; CC */ case 5: return (cznv & 0x100) != 0; /* GET_CFLG; CS */ case 6: return (cznv & 0x4000) == 0; /* !GET_ZFLG; NE */ case 7: return (cznv & 0x4000) != 0; /* GET_ZFLG; EQ */ case 8: return (cznv & 0x01) == 0; /* !GET_VFLG; VC */ case 9: return (cznv & 0x01) != 0; /* GET_VFLG; VS */ case 10:return (cznv & 0x8000) == 0; /* !GET_NFLG; PL */ case 11:return (cznv & 0x8000) != 0; /* GET_NFLG; MI */ case 12:return (((cznv << 15) ^ cznv) & 0x8000) == 0; /* GET_NFLG == GET_VFLG; GE */ case 13:return (((cznv << 15) ^ cznv) & 0x8000) != 0;/* GET_NFLG != GET_VFLG; LT */ case 14: cznv &= 0xc001; return (((cznv << 15) ^ cznv) & 0xc000) == 0; /* !GET_ZFLG && (GET_NFLG == GET_VFLG); GT */ case 15: cznv &= 0xc001; return (((cznv << 15) ^ cznv) & 0xc000) != 0; /* GET_ZFLG || (GET_NFLG != GET_VFLG); LE */ } abort(); return 0; } /* Manually emit LAHF instruction so that 64-bit assemblers can grok it */ #if defined __x86_64__ && defined __GNUC__ #define ASM_LAHF ".byte 0x9f" #else #define ASM_LAHF "lahf" #endif /* Is there any way to do this without declaring *all* memory clobbered? I.e. any way to tell gcc that some byte-sized value is in %al? */ #define optflag_testl(v) \ __asm__ __volatile__ ("andl %0,%0\n\t" \ ASM_LAHF "\n\t" \ "seto %%al\n\t" \ "movb %%al,regflags\n\t" \ "movb %%ah,regflags+1\n\t" \ : : "r" (v) : "%eax","cc","memory") #define optflag_testw(v) \ __asm__ __volatile__ ("andw %w0,%w0\n\t" \ ASM_LAHF "\n\t" \ "seto %%al\n\t" \ "movb %%al,regflags\n\t" \ "movb %%ah,regflags+1\n\t" \ : : "r" (v) : "%eax","cc","memory") #define optflag_testb(v) \ __asm__ __volatile__ ("andb %b0,%b0\n\t" \ ASM_LAHF "\n\t" \ "seto %%al\n\t" \ "movb %%al,regflags\n\t" \ "movb %%ah,regflags+1\n\t" \ : : "q" (v) : "%eax","cc","memory") #define optflag_addl(v, s, d) do { \ __asm__ __volatile__ ("addl %k1,%k0\n\t" \ ASM_LAHF "\n\t" \ "seto %%al\n\t" \ "movb %%al,regflags\n\t" \ "movb %%ah,regflags+1\n\t" \ : "=r" (v) : "rmi" (s), "0" (d) : "%eax","cc","memory"); \ COPY_CARRY; \ } while (0) #define optflag_addw(v, s, d) do { \ __asm__ __volatile__ ("addw %w1,%w0\n\t" \ ASM_LAHF "\n\t" \ "seto %%al\n\t" \ "movb %%al,regflags\n\t" \ "movb %%ah,regflags+1\n\t" \ : "=r" (v) : "rmi" (s), "0" (d) : "%eax","cc","memory"); \ COPY_CARRY; \ } while (0) #define optflag_addb(v, s, d) do { \ __asm__ __volatile__ ("addb %b1,%b0\n\t" \ ASM_LAHF "\n\t" \ "seto %%al\n\t" \ "movb %%al,regflags\n\t" \ "movb %%ah,regflags+1\n\t" \ : "=q" (v) : "qmi" (s), "0" (d) : "%eax","cc","memory"); \ COPY_CARRY; \ } while (0) #define optflag_subl(v, s, d) do { \ __asm__ __volatile__ ("subl %k1,%k0\n\t" \ ASM_LAHF "\n\t" \ "seto %%al\n\t" \ "movb %%al,regflags\n\t" \ "movb %%ah,regflags+1\n\t" \ : "=r" (v) : "rmi" (s), "0" (d) : "%eax","cc","memory"); \ COPY_CARRY; \ } while (0) #define optflag_subw(v, s, d) do { \ __asm__ __volatile__ ("subw %w1,%w0\n\t" \ ASM_LAHF "\n\t" \ "seto %%al\n\t" \ "movb %%al,regflags\n\t" \ "movb %%ah,regflags+1\n\t" \ : "=r" (v) : "rmi" (s), "0" (d) : "%eax","cc","memory"); \ COPY_CARRY; \ } while (0) #define optflag_subb(v, s, d) do { \ __asm__ __volatile__ ("subb %b1,%b0\n\t" \ ASM_LAHF "\n\t" \ "seto %%al\n\t" \ "movb %%al,regflags\n\t" \ "movb %%ah,regflags+1\n\t" \ : "=q" (v) : "qmi" (s), "0" (d) : "%eax","cc","memory"); \ COPY_CARRY; \ } while (0) #define optflag_cmpl(s, d) \ __asm__ __volatile__ ("cmpl %k0,%k1\n\t" \ ASM_LAHF "\n\t" \ "seto %%al\n\t" \ "movb %%al,regflags\n\t" \ "movb %%ah,regflags+1\n\t" \ : : "rmi" (s), "r" (d) : "%eax","cc","memory") #define optflag_cmpw(s, d) \ __asm__ __volatile__ ("cmpw %w0,%w1\n\t" \ ASM_LAHF "\n\t" \ "seto %%al\n\t" \ "movb %%al,regflags\n\t" \ "movb %%ah,regflags+1\n\t" \ : : "rmi" (s), "r" (d) : "%eax","cc","memory"); #define optflag_cmpb(s, d) \ __asm__ __volatile__ ("cmpb %b0,%b1\n\t" \ ASM_LAHF "\n\t" \ "seto %%al\n\t" \ "movb %%al,regflags\n\t" \ "movb %%ah,regflags+1\n\t" \ : : "qmi" (s), "q" (d) : "%eax","cc","memory") #endif #elif defined(SPARC_V8_ASSEMBLY) || defined(SPARC_V9_ASSEMBLY) struct flag_struct { unsigned char nzvc; unsigned char x; }; extern struct flag_struct regflags; #define FLAGVAL_Z 0x04 #define FLAGVAL_N 0x08 #define SET_ZFLG(y) (regflags.nzvc = (regflags.nzvc & ~0x04) | (((y) & 1) << 2)) #define SET_CFLG(y) (regflags.nzvc = (regflags.nzvc & ~1) | ((y) & 1)) #define SET_VFLG(y) (regflags.nzvc = (regflags.nzvc & ~0x02) | (((y) & 1) << 1)) #define SET_NFLG(y) (regflags.nzvc = (regflags.nzvc & ~0x08) | (((y) & 1) << 3)) #define SET_XFLG(y) (regflags.x = (y)) #define GET_ZFLG ((regflags.nzvc >> 2) & 1) #define GET_CFLG (regflags.nzvc & 1) #define GET_VFLG ((regflags.nzvc >> 1) & 1) #define GET_NFLG ((regflags.nzvc >> 3) & 1) #define GET_XFLG (regflags.x & 1) #define CLEAR_CZNV (regflags.nzvc = 0) #define GET_CZNV (reflags.nzvc) #define IOR_CZNV(X) (refglags.nzvc |= (X)) #define SET_CZNV(X) (regflags.nzvc = (X)) #define COPY_CARRY (regflags.x = regflags.nzvc) static __inline__ int cctrue(int cc) { uae_u32 nzvc = regflags.nzvc; switch(cc){ case 0: return 1; /* T */ case 1: return 0; /* F */ case 2: return (nzvc & 0x05) == 0; /* !GET_CFLG && !GET_ZFLG; HI */ case 3: return (nzvc & 0x05) != 0; /* GET_CFLG || GET_ZFLG; LS */ case 4: return (nzvc & 1) == 0; /* !GET_CFLG; CC */ case 5: return (nzvc & 1) != 0; /* GET_CFLG; CS */ case 6: return (nzvc & 0x04) == 0; /* !GET_ZFLG; NE */ case 7: return (nzvc & 0x04) != 0; /* GET_ZFLG; EQ */ case 8: return (nzvc & 0x02) == 0;/* !GET_VFLG; VC */ case 9: return (nzvc & 0x02) != 0;/* GET_VFLG; VS */ case 10:return (nzvc & 0x08) == 0; /* !GET_NFLG; PL */ case 11:return (nzvc & 0x08) != 0; /* GET_NFLG; MI */ case 12:return (((nzvc << 2) ^ nzvc) & 0x08) == 0; /* GET_NFLG == GET_VFLG; GE */ case 13:return (((nzvc << 2) ^ nzvc) & 0x08) != 0;/* GET_NFLG != GET_VFLG; LT */ case 14: nzvc &= 0x0e; return (((nzvc << 2) ^ nzvc) & 0x0c) == 0; /* !GET_ZFLG && (GET_NFLG == GET_VFLG); GT */ case 15: nzvc &= 0x0e; return (((nzvc << 2) ^ nzvc) & 0x0c) != 0; /* GET_ZFLG || (GET_NFLG != GET_VFLG); LE */ } return 0; } #ifdef SPARC_V8_ASSEMBLY static inline uae_u32 sparc_v8_flag_add_8(flag_struct *flags, uae_u32 src, uae_u32 dst) { uae_u32 value; __asm__ ("\n" " sll %2, 24, %%o0\n" " sll %3, 24, %%o1\n" " addcc %%o0, %%o1, %%o0\n" " addx %%g0, %%g0, %%o1 ! X,C flags\n" " srl %%o0, 24, %0\n" " stb %%o1, [%1 + 1]\n" " bl,a .+8\n" " or %%o1, 0x08, %%o1 ! N flag\n" " bz,a .+8\n" " or %%o1, 0x04, %%o1 ! Z flag\n" " bvs,a .+8\n" " or %%o1, 0x02, %%o1 ! V flag\n" " stb %%o1, [%1]\n" : "=&r" (value) : "r" (flags), "r" (dst), "r" (src) : "cc", "o0", "o1" ); return value; } static inline uae_u32 sparc_v8_flag_add_16(flag_struct *flags, uae_u32 src, uae_u32 dst) { uae_u32 value; __asm__ ("\n" " sll %2, 16, %%o0\n" " sll %3, 16, %%o1\n" " addcc %%o0, %%o1, %%o0\n" " addx %%g0, %%g0, %%o1 ! X,C flags\n" " srl %%o0, 16, %0\n" " stb %%o1, [%1 + 1]\n" " bl,a .+8\n" " or %%o1, 0x08, %%o1 ! N flag\n" " bz,a .+8\n" " or %%o1, 0x04, %%o1 ! Z flag\n" " bvs,a .+8\n" " or %%o1, 0x02, %%o1 ! V flag\n" " stb %%o1, [%1]\n" : "=&r" (value) : "r" (flags), "r" (dst), "r" (src) : "cc", "o0", "o1" ); return value; } static inline uae_u32 sparc_v8_flag_add_32(flag_struct *flags, uae_u32 src, uae_u32 dst) { uae_u32 value; __asm__ ("\n" " addcc %2, %3, %0\n" " addx %%g0, %%g0, %%o0 ! X,C flags\n" " stb %%o0, [%1 + 1]\n" " bl,a .+8\n" " or %%o0, 0x08, %%o0 ! N flag\n" " bz,a .+8\n" " or %%o0, 0x04, %%o0 ! Z flag\n" " bvs,a .+8\n" " or %%o0, 0x02, %%o0 ! V flag\n" " stb %%o0, [%1]\n" : "=&r" (value) : "r" (flags), "r" (dst), "r" (src) : "cc", "o0" ); return value; } static inline uae_u32 sparc_v8_flag_sub_8(flag_struct *flags, uae_u32 src, uae_u32 dst) { uae_u32 value; __asm__ ("\n" " sll %2, 24, %%o0\n" " sll %3, 24, %%o1\n" " subcc %%o0, %%o1, %%o0\n" " addx %%g0, %%g0, %%o1 ! X,C flags\n" " srl %%o0, 24, %0\n" " stb %%o1, [%1 + 1]\n" " bl,a .+8\n" " or %%o1, 0x08, %%o1 ! N flag\n" " bz,a .+8\n" " or %%o1, 0x04, %%o1 ! Z flag\n" " bvs,a .+8\n" " or %%o1, 0x02, %%o1 ! V flag\n" " stb %%o1, [%1]\n" : "=&r" (value) : "r" (flags), "r" (dst), "r" (src) : "cc", "o0", "o1" ); return value; } static inline uae_u32 sparc_v8_flag_sub_16(flag_struct *flags, uae_u32 src, uae_u32 dst) { uae_u32 value; __asm__ ("\n" " sll %2, 16, %%o0\n" " sll %3, 16, %%o1\n" " subcc %%o0, %%o1, %%o0\n" " addx %%g0, %%g0, %%o1 ! X,C flags\n" " srl %%o0, 16, %0\n" " stb %%o1, [%1 + 1]\n" " bl,a .+8\n" " or %%o1, 0x08, %%o1 ! N flag\n" " bz,a .+8\n" " or %%o1, 0x04, %%o1 ! Z flag\n" " bvs,a .+8\n" " or %%o1, 0x02, %%o1 ! V flag\n" " stb %%o1, [%1]\n" : "=&r" (value) : "r" (flags), "r" (dst), "r" (src) : "cc", "o0", "o1" ); return value; } static inline uae_u32 sparc_v8_flag_sub_32(flag_struct *flags, uae_u32 src, uae_u32 dst) { uae_u32 value; __asm__ ("\n" " subcc %2, %3, %0\n" " addx %%g0, %%g0, %%o0 ! X,C flags\n" " stb %%o0, [%1 + 1]\n" " bl,a .+8\n" " or %%o0, 0x08, %%o0 ! N flag\n" " bz,a .+8\n" " or %%o0, 0x04, %%o0 ! Z flag\n" " bvs,a .+8\n" " or %%o0, 0x02, %%o0 ! V flag\n" " stb %%o0, [%1]\n" : "=&r" (value) : "r" (flags), "r" (dst), "r" (src) : "cc", "o0" ); return value; } static inline void sparc_v8_flag_cmp_8(flag_struct *flags, uae_u32 src, uae_u32 dst) { __asm__ ("\n" " sll %1, 24, %%o0\n" " sll %2, 24, %%o1\n" " subcc %%o0, %%o1, %%g0\n" " addx %%g0, %%g0, %%o0 ! C flag\n" " bl,a .+8\n" " or %%o0, 0x08, %%o0 ! N flag\n" " bz,a .+8\n" " or %%o0, 0x04, %%o0 ! Z flag\n" " bvs,a .+8\n" " or %%o0, 0x02, %%o0 ! V flag\n" " stb %%o0, [%0]\n" : /* no outputs */ : "r" (flags), "r" (dst), "r" (src) : "cc", "o0", "o1" ); } static inline void sparc_v8_flag_cmp_16(flag_struct *flags, uae_u32 src, uae_u32 dst) { __asm__ ("\n" " sll %1, 16, %%o0\n" " sll %2, 16, %%o1\n" " subcc %%o0, %%o1, %%g0\n" " addx %%g0, %%g0, %%o0 ! C flag\n" " bl,a .+8\n" " or %%o0, 0x08, %%o0 ! N flag\n" " bz,a .+8\n" " or %%o0, 0x04, %%o0 ! Z flag\n" " bvs,a .+8\n" " or %%o0, 0x02, %%o0 ! V flag\n" " stb %%o0, [%0]\n" : /* no outputs */ : "r" (flags), "r" (dst), "r" (src) : "cc", "o0", "o1" ); } static inline void sparc_v8_flag_cmp_32(flag_struct *flags, uae_u32 src, uae_u32 dst) { __asm__ ("\n" " subcc %1, %2, %%o1\n" " srl %%o1, 31, %%o0\n" " sll %%o0, 3, %%o0\n" " addx %%o0, %%g0, %%o0\n" " bvs,a .+8\n" " or %%o0, 0x02, %%o0\n" " subcc %%g0, %%o1, %%g0\n" " addx %%g0, 7, %%o1\n" " and %%o1, 0x04, %%o1\n" " or %%o0, %%o1, %%o0\n" " stb %%o0, [%0]\n" : /* no outputs */ : "r" (flags), "r" (dst), "r" (src) : "cc", "o0", "o1" ); } static inline uae_u32 sparc_v8_flag_addx_8(flag_struct *flags, uae_u32 src, uae_u32 dst) { uae_u32 value; __asm__ ("\n" " ldub [%1 + 1], %%o1 ! Get the X Flag\n" " subcc %%g0, %%o1, %%g0 ! Set the SPARC carry flag, if X set\n" " addxcc %2, %3, %0\n" : "=&r" (value) : "r" (flags), "r" (dst), "r" (src) : "cc", "o0", "o1" ); return value; } #if 0 VERY SLOW... static inline uae_u32 sparc_v8_flag_addx_8(flag_struct *flags, uae_u32 src, uae_u32 dst) { uae_u32 value; __asm__ ("\n" " sll %2, 24, %%o0\n" " sll %3, 24, %%o1\n" " addcc %%o0, %%o1, %%o0\n" " addx %%g0, %%g0, %%o1 ! X,C flags\n" " bvs,a .+8\n" " or %%o1, 0x02, %%o1 ! V flag\n" " ldub [%1 + 1], %%o2\n" " subcc %%g0, %%o2, %%g0\n" " addx %%g0, %%g0, %%o2\n" " sll %%o2, 24, %%o2\n" " addcc %%o0, %%o2, %%o0\n" " srl %%o0, 24, %0\n" " addx %%g0, %%g0, %%o2\n" " or %%o1, %%o2, %%o1 ! update X,C flags\n" " bl,a .+8\n" " or %%o1, 0x08, %%o1 ! N flag\n" " ldub [%1], %%o0 ! retreive the old NZVC flags (XXX)\n" " bvs,a .+8\n" " or %%o1, 0x02, %%o1 ! update V flag\n" " and %%o0, 0x04, %%o0 ! (XXX) but keep only Z flag\n" " and %%o1, 1, %%o2 ! keep C flag in %%o2\n" " bnz,a .+8\n" " or %%g0, %%g0, %%o0 ! Z flag cleared if non-zero result\n" " stb %%o2, [%1 + 1] ! store the X flag\n" " or %%o1, %%o0, %%o1\n" " stb %%o1, [%1]\n" : "=&r" (value) : "r" (flags), "r" (dst), "r" (src) : "cc", "o0", "o1", "o2" ); return value; } #endif static inline uae_u32 sparc_v8_flag_addx_32(flag_struct *flags, uae_u32 src, uae_u32 dst) { uae_u32 value; __asm__ ("\n" " ldub [%1 + 1], %%o0 ! Get the X Flag\n" " subcc %%g0, %%o0, %%g0 ! Set the SPARC carry flag, if X set\n" " addxcc %2, %3, %0\n" " ldub [%1], %%o0 ! retreive the old NZVC flags\n" " and %%o0, 0x04, %%o0 ! but keep only Z flag\n" " addx %%o0, %%g0, %%o0 ! X,C flags\n" " bl,a .+8\n" " or %%o0, 0x08, %%o0 ! N flag\n" " bvs,a .+8\n" " or %%o0, 0x02, %%o0 ! V flag\n" " bnz,a .+8\n" " and %%o0, 0x0B, %%o0 ! Z flag cleared if result is non-zero\n" " stb %%o0, [%1]\n" " stb %%o0, [%1 + 1]\n" : "=&r" (value) : "r" (flags), "r" (dst), "r" (src) : "cc", "o0" ); return value; } #endif /* SPARC_V8_ASSEMBLY */ #ifdef SPARC_V9_ASSEMBLY static inline uae_u32 sparc_v9_flag_add_8(flag_struct *flags, uae_u32 src, uae_u32 dst) { uae_u32 value; __asm__ ("\n" " sll %2, 24, %%o0\n" " sll %3, 24, %%o1\n" " addcc %%o0, %%o1, %%o0\n" " rd %%ccr, %%o1\n" " srl %%o0, 24, %0\n" " stb %%o1, [%1]\n" " stb %%o1, [%1+1]\n" : "=&r" (value) : "r" (flags), "r" (dst), "r" (src) : "cc", "o0", "o1" ); return value; } static inline uae_u32 sparc_v9_flag_add_16(flag_struct *flags, uae_u32 src, uae_u32 dst) { uae_u32 value; __asm__ ("\n" " sll %2, 16, %%o0\n" " sll %3, 16, %%o1\n" " addcc %%o0, %%o1, %%o0\n" " rd %%ccr, %%o1\n" " srl %%o0, 16, %0\n" " stb %%o1, [%1]\n" " stb %%o1, [%1+1]\n" : "=&r" (value) : "r" (flags), "r" (dst), "r" (src) : "cc", "o0", "o1" ); return value; } static inline uae_u32 sparc_v9_flag_add_32(flag_struct *flags, uae_u32 src, uae_u32 dst) { uae_u32 value; __asm__ ("\n" " addcc %2, %3, %0\n" " rd %%ccr, %%o0\n" " stb %%o0, [%1]\n" " stb %%o0, [%1+1]\n" : "=&r" (value) : "r" (flags), "r" (dst), "r" (src) : "cc", "o0" ); return value; } static inline uae_u32 sparc_v9_flag_sub_8(flag_struct *flags, uae_u32 src, uae_u32 dst) { uae_u32 value; __asm__ ("\n" " sll %2, 24, %%o0\n" " sll %3, 24, %%o1\n" " subcc %%o0, %%o1, %%o0\n" " rd %%ccr, %%o1\n" " srl %%o0, 24, %0\n" " stb %%o1, [%1]\n" " stb %%o1, [%1+1]\n" : "=&r" (value) : "r" (flags), "r" (dst), "r" (src) : "cc", "o0", "o1" ); return value; } static inline uae_u32 sparc_v9_flag_sub_16(flag_struct *flags, uae_u32 src, uae_u32 dst) { uae_u32 value; __asm__ ("\n" " sll %2, 16, %%o0\n" " sll %3, 16, %%o1\n" " subcc %%o0, %%o1, %%o0\n" " rd %%ccr, %%o1\n" " srl %%o0, 16, %0\n" " stb %%o1, [%1]\n" " stb %%o1, [%1+1]\n" : "=&r" (value) : "r" (flags), "r" (dst), "r" (src) : "cc", "o0", "o1" ); return value; } static inline uae_u32 sparc_v9_flag_sub_32(flag_struct *flags, uae_u32 src, uae_u32 dst) { uae_u32 value; __asm__ ("\n" " subcc %2, %3, %0\n" " rd %%ccr, %%o0\n" " stb %%o0, [%1]\n" " stb %%o0, [%1+1]\n" : "=&r" (value) : "r" (flags), "r" (dst), "r" (src) : "cc", "o0" ); return value; } static inline void sparc_v9_flag_cmp_8(flag_struct *flags, uae_u32 src, uae_u32 dst) { __asm__ ("\n" " sll %1, 24, %%o0\n" " sll %2, 24, %%o1\n" " subcc %%o0, %%o1, %%g0\n" " rd %%ccr, %%o0\n" " stb %%o0, [%0]\n" : /* no outputs */ : "r" (flags), "r" (dst), "r" (src) : "cc", "o0", "o1" ); } static inline void sparc_v9_flag_cmp_16(flag_struct *flags, uae_u32 src, uae_u32 dst) { __asm__ ("\n" " sll %1, 16, %%o0\n" " sll %2, 16, %%o1\n" " subcc %%o0, %%o1, %%g0\n" " rd %%ccr, %%o0\n" " stb %%o0, [%0]\n" : /* no outputs */ : "r" (flags), "r" (dst), "r" (src) : "cc", "o0", "o1" ); } static inline void sparc_v9_flag_cmp_32(flag_struct *flags, uae_u32 src, uae_u32 dst) { __asm__ ("\n" " subcc %1, %2, %%g0\n" #if 0 " subcc %1, %2, %%o1\n" " srl %%o1, 31, %%o0\n" " sll %%o0, 3, %%o0\n" " addx %%o0, %%g0, %%o0\n" " bvs,a .+8\n" " or %%o0, 0x02, %%o0\n" " subcc %%g0, %%o1, %%g0\n" " addx %%g0, 7, %%o1\n" " and %%o1, 0x04, %%o1\n" " or %%o0, %%o1, %%o0\n" #endif #if 0 " subcc %1, %2, %%o1\n" " srl %%o1, 31, %%o0\n" " sll %%o0, 3, %%o0\n" " addx %%o0, %%g0, %%o0\n" " bvs,pt,a .+8\n" " or %%o0, 0x02, %%o0\n" " subcc %%g0, %%o1, %%g0\n" " addx %%g0, 7, %%o1\n" " and %%o1, 0x04, %%o1\n" " or %%o0, %%o1, %%o0\n" " stb %%o0, [%0]\n" #endif " rd %%ccr, %%o0\n" " stb %%o0, [%0]\n" : /* no outputs */ : "r" (flags), "r" (dst), "r" (src) : "cc", "o0", "o1" ); } #if 1 static inline void sparc_v9_flag_test_8(flag_struct *flags, uae_u32 val) { __asm__ ("\n" " sll %1, 24, %%o0\n" " subcc %%o0, %%g0, %%g0\n" " rd %%ccr, %%o0\n" " stb %%o0, [%0]\n" : /* no outputs */ : "r" (flags), "r" (val) : "cc", "o0" ); } static inline void sparc_v9_flag_test_16(flag_struct *flags, uae_u32 val) { __asm__ ("\n" " sll %1, 16, %%o0\n" " subcc %%o0, %%g0, %%g0\n" " rd %%ccr, %%o0\n" " stb %%o0, [%0]\n" : /* no outputs */ : "r" (flags), "r" (val) : "cc", "o0" ); } static inline void sparc_v9_flag_test_32(flag_struct *flags, uae_u32 val) { __asm__ ("\n" " subcc %1, %%g0, %%g0\n" " rd %%ccr, %%o0\n" " stb %%o0, [%0]\n" : /* no outputs */ : "r" (flags), "r" (val) : "cc", "o0" ); } #else static inline void sparc_v9_flag_test_8(flag_struct *flags, uae_u32 val) { __asm__ ("\n" " sll %1, 24, %%o0\n" " subcc %%o0, %%g0, %%o1\n" " srl %%o1, 31, %%o0\n" " sll %%o0, 3, %%o0\n" " addx %%o0, %%g0, %%o0\n" " bvs,a .+8\n" " or %%o0, 0x02, %%o0\n" " subcc %%g0, %%o1, %%g0\n" " addx %%g0, 7, %%o1\n" " and %%o1, 0x04, %%o1\n" " or %%o0, %%o1, %%o0\n" " stb %%o0, [%0]\n" : /* no outputs */ : "r" (flags), "r" (val) : "cc", "o0", "o1" ); } static inline void sparc_v9_flag_test_16(flag_struct *flags, uae_u32 val) { __asm__ ("\n" " sll %1, 16, %%o0\n" " subcc %%o0, %%g0, %%o1\n" " srl %%o1, 31, %%o0\n" " sll %%o0, 3, %%o0\n" " addx %%o0, %%g0, %%o0\n" " bvs,a .+8\n" " or %%o0, 0x02, %%o0\n" " subcc %%g0, %%o1, %%g0\n" " addx %%g0, 7, %%o1\n" " and %%o1, 0x04, %%o1\n" " or %%o0, %%o1, %%o0\n" " stb %%o0, [%0]\n" : /* no outputs */ : "r" (flags), "r" (val) : "cc", "o0", "o1" ); } static inline void sparc_v9_flag_test_32(flag_struct *flags, uae_u32 val) { __asm__ ("\n" " subcc %1, %%g0, %%o1\n" " srl %%o1, 31, %%o0\n" " sll %%o0, 3, %%o0\n" " addx %%o0, %%g0, %%o0\n" " bvs,a .+8\n" " or %%o0, 0x02, %%o0\n" " subcc %%g0, %%o1, %%g0\n" " addx %%g0, 7, %%o1\n" " and %%o1, 0x04, %%o1\n" " or %%o0, %%o1, %%o0\n" " stb %%o0, [%0]\n" : /* no outputs */ : "r" (flags), "r" (val) : "cc", "o0", "o1" ); } #endif static inline uae_u32 sparc_v9_flag_addx_8(flag_struct *flags, uae_u32 src, uae_u32 dst) { uae_u32 value; __asm__ ("\n" " ldub [%1 + 1], %%o1 ! Get the X Flag\n" " subcc %%g0, %%o1, %%g0 ! Set the SPARC carry flag, if X set\n" " addxcc %2, %3, %0\n" : "=&r" (value) : "r" (flags), "r" (dst), "r" (src) : "cc", "o0", "o1" ); return value; } static inline uae_u32 sparc_v9_flag_addx_32(flag_struct *flags, uae_u32 src, uae_u32 dst) { uae_u32 value; __asm__ ("\n" " ldub [%1 + 1], %%o0 ! Get the X Flag\n" " subcc %%g0, %%o0, %%g0 ! Set the SPARC carry flag, if X set\n" " addxcc %2, %3, %0\n" " ldub [%1], %%o0 ! retreive the old NZVC flags\n" " and %%o0, 0x04, %%o0 ! but keep only Z flag\n" " addx %%o0, %%g0, %%o0 ! X,C flags\n" " bl,a .+8\n" " or %%o0, 0x08, %%o0 ! N flag\n" " bvs,a .+8\n" " or %%o0, 0x02, %%o0 ! V flag\n" " bnz,a .+8\n" " and %%o0, 0x0B, %%o0 ! Z flag cleared if result is non-zero\n" " stb %%o0, [%1]\n" " stb %%o0, [%1 + 1]\n" : "=&r" (value) : "r" (flags), "r" (dst), "r" (src) : "cc", "o0" ); return value; } #endif /* SPARC_V9_ASSEMBLY */ #endif #else struct flag_struct { unsigned int c; unsigned int z; unsigned int n; unsigned int v; unsigned int x; }; extern struct flag_struct regflags; #define ZFLG (regflags.z) #define NFLG (regflags.n) #define CFLG (regflags.c) #define VFLG (regflags.v) #define XFLG (regflags.x) #define SET_CFLG(x) (CFLG = (x)) #define SET_NFLG(x) (NFLG = (x)) #define SET_VFLG(x) (VFLG = (x)) #define SET_ZFLG(x) (ZFLG = (x)) #define SET_XFLG(x) (XFLG = (x)) #define GET_CFLG CFLG #define GET_NFLG NFLG #define GET_VFLG VFLG #define GET_ZFLG ZFLG #define GET_XFLG XFLG #define CLEAR_CZNV do { \ SET_CFLG (0); \ SET_ZFLG (0); \ SET_NFLG (0); \ SET_VFLG (0); \ } while (0) #define COPY_CARRY (SET_XFLG (GET_CFLG)) static __inline__ int cctrue(const int cc) { switch(cc){ case 0: return 1; /* T */ case 1: return 0; /* F */ case 2: return !CFLG && !ZFLG; /* HI */ case 3: return CFLG || ZFLG; /* LS */ case 4: return !CFLG; /* CC */ case 5: return CFLG; /* CS */ case 6: return !ZFLG; /* NE */ case 7: return ZFLG; /* EQ */ case 8: return !VFLG; /* VC */ case 9: return VFLG; /* VS */ case 10:return !NFLG; /* PL */ case 11:return NFLG; /* MI */ case 12:return NFLG == VFLG; /* GE */ case 13:return NFLG != VFLG; /* LT */ case 14:return !ZFLG && (NFLG == VFLG); /* GT */ case 15:return ZFLG || (NFLG != VFLG); /* LE */ } return 0; } #endif /* OPTIMIZED_FLAGS */ #endif /* M68K_FLAGS_H */
17,789
634
// Copyright 2000-2019 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file. package com.intellij.openapi.application.constraints; import javax.annotation.Nullable; import java.util.function.BooleanSupplier; /** * NB. Methods defined in this interface must be used with great care, this is purely to expose internals required for implementing * scheduling methods and coroutine dispatching support. * * @author eldar * * from kotlin */ public interface ConstrainedExecutionScheduler { default void scheduleWithinConstraints(Runnable runnable) { scheduleWithinConstraints(runnable, null); } void scheduleWithinConstraints(Runnable runnable, @Nullable BooleanSupplier condition); }
215
340
<filename>os/winapi/rom_file.c // // os/winapi/rom_file.c // // Functions for mapping ROM images into the address space. // // This file is subject to the terms and conditions defined in // 'LICENSE', which is part of this source code package. // #include "rom_file.h" #include <stddef.h> #include <windows.h> // Unmaps a ROM image from the host address space. int close_rom_file(const struct rom_file *file) { UnmapViewOfFile(file->ptr); CloseHandle(file->mapping); CloseHandle(file->file); return 0; } // Maps a ROM into the host address space, returns a pointer. int open_rom_file(const char *path, struct rom_file *file) { void *ptr; size_t size; HANDLE mapping; HANDLE hfile; // Open the file, get its size. if ((hfile = CreateFile(path, GENERIC_READ, 0, NULL, OPEN_EXISTING, FILE_FLAG_RANDOM_ACCESS, NULL)) == INVALID_HANDLE_VALUE) return -1; size = GetFileSize(hfile, NULL); // Create a mapping and effectively enable it. if ((mapping = CreateFileMapping(hfile, 0, PAGE_READONLY, 0, 0, NULL)) == NULL) { CloseHandle(hfile); return -2; } if ((ptr = MapViewOfFile(mapping, FILE_MAP_READ, 0, 0, 0)) == NULL) { CloseHandle(mapping); CloseHandle(hfile); return -3; } file->ptr = ptr; file->size = size; file->mapping = mapping; file->file = hfile; return 0; }
497
5,133
/* * Copyright MapStruct Authors. * * Licensed under the Apache License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0 */ package org.mapstruct.ap.internal.conversion; import java.math.BigInteger; import java.util.Set; import org.mapstruct.ap.internal.model.common.ConversionContext; import org.mapstruct.ap.internal.model.common.Type; import org.mapstruct.ap.internal.util.NativeTypes; import static org.mapstruct.ap.internal.conversion.ConversionUtils.bigInteger; import static org.mapstruct.ap.internal.util.Collections.asSet; /** * Conversion between {@link BigInteger} and wrappers of native number types. * * @author <NAME> */ public class BigIntegerToWrapperConversion extends SimpleConversion { private final Class<?> targetType; public BigIntegerToWrapperConversion(Class<?> targetType) { this.targetType = NativeTypes.getPrimitiveType( targetType ); } @Override public String getToExpression(ConversionContext conversionContext) { return "<SOURCE>." + targetType.getName() + "Value()"; } @Override public String getFromExpression(ConversionContext conversionContext) { String toLongValueStr = ""; if ( targetType == float.class || targetType == double.class ) { toLongValueStr = ".longValue()"; } return bigInteger( conversionContext ) + ".valueOf( <SOURCE>" + toLongValueStr + " )"; } @Override protected Set<Type> getFromConversionImportTypes(ConversionContext conversionContext) { return asSet( conversionContext.getTypeFactory().getType( BigInteger.class ) ); } }
534
465
# -*- coding: utf8 -*- # Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # CAM签名/鉴权错误。 AUTHFAILURE = 'AuthFailure' # 用户没有权限进行此查询操作。 AUTHFAILURE_CHECKRESOURCERESPONSECODEERROR = 'AuthFailure.CheckResourceResponseCodeError' # 未授权操作。 AUTHFAILURE_UNAUTHORIZEDOPERATION = 'AuthFailure.UnauthorizedOperation' # 操作失败。 FAILEDOPERATION = 'FailedOperation' # 下载音频文件失败。 FAILEDOPERATION_ERRORDOWNFILE = 'FailedOperation.ErrorDownFile' # 识别失败。 FAILEDOPERATION_ERRORRECOGNIZE = 'FailedOperation.ErrorRecognize' # 错误的TaskId。 FAILEDOPERATION_NOSUCHTASK = 'FailedOperation.NoSuchTask' # 账号因为欠费停止服务,请在腾讯云账户充值。 FAILEDOPERATION_SERVICEISOLATE = 'FailedOperation.ServiceIsolate' # 账号本月免费额度已用完。 FAILEDOPERATION_USERHASNOFREEAMOUNT = 'FailedOperation.UserHasNoFreeAmount' # 服务未开通,请在腾讯云官网语音识别控制台开通服务。 FAILEDOPERATION_USERNOTREGISTERED = 'FailedOperation.UserNotRegistered' # 内部错误。 INTERNALERROR = 'InternalError' # 初始化配置失败。 INTERNALERROR_ERRORCONFIGURE = 'InternalError.ErrorConfigure' # 创建日志失败。 INTERNALERROR_ERRORCREATELOG = 'InternalError.ErrorCreateLog' # 下载音频文件失败。 INTERNALERROR_ERRORDOWNFILE = 'InternalError.ErrorDownFile' # 新建数组失败。 INTERNALERROR_ERRORFAILNEWPREQUEST = 'InternalError.ErrorFailNewprequest' # 写入数据库失败。 INTERNALERROR_ERRORFAILWRITETODB = 'InternalError.ErrorFailWritetodb' # 文件无法打开。 INTERNALERROR_ERRORFILECANNOTOPEN = 'InternalError.ErrorFileCannotopen' # 获取路由失败。 INTERNALERROR_ERRORGETROUTE = 'InternalError.ErrorGetRoute' # 创建日志路径失败。 INTERNALERROR_ERRORMAKELOGPATH = 'InternalError.ErrorMakeLogpath' # 识别失败。 INTERNALERROR_ERRORRECOGNIZE = 'InternalError.ErrorRecognize' # 访问数据库失败。 INTERNALERROR_FAILACCESSDATABASE = 'InternalError.FailAccessDatabase' # 访问Redis失败。 INTERNALERROR_FAILACCESSREDIS = 'InternalError.FailAccessRedis' # 参数错误。 INVALIDPARAMETER = 'InvalidParameter' # 请求数据长度无效。 INVALIDPARAMETER_ERRORCONTENTLENGTH = 'InvalidParameter.ErrorContentlength' # 参数不全。 INVALIDPARAMETER_ERRORPARAMSMISSING = 'InvalidParameter.ErrorParamsMissing' # 解析请求数据失败。 INVALIDPARAMETER_ERRORPARSEQUEST = 'InvalidParameter.ErrorParsequest' # 文件编码错误。 INVALIDPARAMETER_FILEENCODE = 'InvalidParameter.FileEncode' # 非法的词表状态。 INVALIDPARAMETER_INVALIDVOCABSTATE = 'InvalidParameter.InvalidVocabState' # 该模型状态不允许删除。 INVALIDPARAMETER_MODELSTATE = 'InvalidParameter.ModelState' # 参数取值错误。 INVALIDPARAMETERVALUE = 'InvalidParameterValue' # AppId无效。 INVALIDPARAMETERVALUE_ERRORINVALIDAPPID = 'InvalidParameterValue.ErrorInvalidAppid' # ClientIp无效。 INVALIDPARAMETERVALUE_ERRORINVALIDCLIENTIP = 'InvalidParameterValue.ErrorInvalidClientip' # EngSerViceType无效。 INVALIDPARAMETERVALUE_ERRORINVALIDENGSERVICE = 'InvalidParameterValue.ErrorInvalidEngservice' # ProjectId无效。 INVALIDPARAMETERVALUE_ERRORINVALIDPROJECTID = 'InvalidParameterValue.ErrorInvalidProjectid' # RequestId无效。 INVALIDPARAMETERVALUE_ERRORINVALIDREQUESTID = 'InvalidParameterValue.ErrorInvalidRequestid' # SourceType无效。 INVALIDPARAMETERVALUE_ERRORINVALIDSOURCETYPE = 'InvalidParameterValue.ErrorInvalidSourcetype' # SubserviceType无效。 INVALIDPARAMETERVALUE_ERRORINVALIDSUBSERVICETYPE = 'InvalidParameterValue.ErrorInvalidSubservicetype' # Url无效。 INVALIDPARAMETERVALUE_ERRORINVALIDURL = 'InvalidParameterValue.ErrorInvalidUrl' # UsrAudioKey无效。 INVALIDPARAMETERVALUE_ERRORINVALIDUSERAUDIOKEY = 'InvalidParameterValue.ErrorInvalidUseraudiokey' # 音频编码格式不支持。 INVALIDPARAMETERVALUE_ERRORINVALIDVOICEFORMAT = 'InvalidParameterValue.ErrorInvalidVoiceFormat' # 音频数据无效。 INVALIDPARAMETERVALUE_ERRORINVALIDVOICEDATA = 'InvalidParameterValue.ErrorInvalidVoicedata' # 音频时长超过限制。 INVALIDPARAMETERVALUE_ERRORVOICEDATATOOLONG = 'InvalidParameterValue.ErrorVoicedataTooLong' # 非法的参数长度。 INVALIDPARAMETERVALUE_INVALIDPARAMETERLENGTH = 'InvalidParameterValue.InvalidParameterLength' # 非法的VocabId。 INVALIDPARAMETERVALUE_INVALIDVOCABID = 'InvalidParameterValue.InvalidVocabId' # 非法的词表状态。 INVALIDPARAMETERVALUE_INVALIDVOCABSTATE = 'InvalidParameterValue.InvalidVocabState' # 词权重不合法。 INVALIDPARAMETERVALUE_INVALIDWORDWEIGHT = 'InvalidParameterValue.InvalidWordWeight' # 非法的WordWeightStr。 INVALIDPARAMETERVALUE_INVALIDWORDWEIGHTSTR = 'InvalidParameterValue.InvalidWordWeightStr' # 模型不存在。 INVALIDPARAMETERVALUE_MODELID = 'InvalidParameterValue.ModelId' # 非法的模型状态。 INVALIDPARAMETERVALUE_TOSTATE = 'InvalidParameterValue.ToState' # 超过配额限制。 LIMITEXCEEDED = 'LimitExceeded' # 自学习模型创建个数已到限制。 LIMITEXCEEDED_CUSTOMIZATIONFULL = 'LimitExceeded.CustomizationFull' # 上线模型个数已到限制。 LIMITEXCEEDED_ONLINEFULL = 'LimitExceeded.OnlineFull' # 热词表数量已到账号限制。 LIMITEXCEEDED_VOCABFULL = 'LimitExceeded.VocabFull' # 缺少参数错误。 MISSINGPARAMETER = 'MissingParameter' # 请求的次数超过了频率限制。 REQUESTLIMITEXCEEDED = 'RequestLimitExceeded' # 未知参数错误。 UNKNOWNPARAMETER = 'UnknownParameter'
2,617
2,151
/**************************************************************************** **************************************************************************** *** *** This header was automatically generated from a Linux kernel header *** of the same name, to make information necessary for userspace to *** call into the kernel available to libc. It contains only constants, *** structures, and macros generated from the original header, and thus, *** contains no copyrightable information. *** *** To edit the content of this header, modify the corresponding *** source file (e.g. under external/kernel-headers/original/) then *** run bionic/libc/kernel/tools/update_all.py *** *** Any manual change here will be lost the next time this script will *** be run. You've been warned! *** **************************************************************************** ****************************************************************************/ #ifndef __MIC_COMMON_H_ #define __MIC_COMMON_H_ #include <linux/virtio_ring.h> #define __mic_align(a,x) (((a) + (x) - 1) & ~((x) - 1)) struct mic_device_desc { __s8 type; __u8 num_vq; __u8 feature_len; __u8 config_len; __u8 status; __le64 config[0]; } __attribute__((aligned(8))); struct mic_device_ctrl { __le64 vdev; __u8 config_change; __u8 vdev_reset; __u8 guest_ack; __u8 host_ack; __u8 used_address_updated; __s8 c2h_vdev_db; __s8 h2c_vdev_db; } __attribute__((aligned(8))); struct mic_bootparam { __le32 magic; __s8 h2c_config_db; __u8 node_id; __u8 h2c_scif_db; __u8 c2h_scif_db; __u64 scif_host_dma_addr; __u64 scif_card_dma_addr; } __attribute__((aligned(8))); struct mic_device_page { struct mic_bootparam bootparam; struct mic_device_desc desc[0]; }; struct mic_vqconfig { __le64 address; __le64 used_address; __le16 num; } __attribute__((aligned(8))); #define MIC_VIRTIO_RING_ALIGN 4096 #define MIC_MAX_VRINGS 4 #define MIC_VRING_ENTRIES 128 #define MIC_MAX_VRING_ENTRIES 128 #define MIC_MAX_DESC_BLK_SIZE 256 struct _mic_vring_info { __u16 avail_idx; __le32 magic; }; struct mic_vring { struct vring vr; struct _mic_vring_info * info; void * va; int len; }; #define mic_aligned_desc_size(d) __mic_align(mic_desc_size(d), 8) #ifndef INTEL_MIC_CARD #endif #define MIC_DP_SIZE 4096 #define MIC_MAGIC 0xc0ffee00 enum mic_states { MIC_READY = 0, MIC_BOOTING, MIC_ONLINE, MIC_SHUTTING_DOWN, MIC_RESETTING, MIC_RESET_FAILED, MIC_LAST }; enum mic_status { MIC_NOP = 0, MIC_CRASHED, MIC_HALTED, MIC_POWER_OFF, MIC_RESTART, MIC_STATUS_LAST }; #endif
937
508
<reponame>sjquant/trading_calendars<gh_stars>100-1000 import pandas as pd from pytz import UTC def T(x): return pd.Timestamp(x, tz=UTC)
61
3,200
<filename>tests/st/ops/cpu/test_boundingbox_decode_op.py<gh_stars>1000+ # Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ import numpy as np import pytest import mindspore import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor from mindspore.ops import operations as P class NetBoundingBoxDecode(nn.Cell): def __init__(self, means=(0.0, 0.0, 0.0, 0.0), stds=(1.0, 1.0, 1.0, 1.0)): super(NetBoundingBoxDecode, self).__init__() self.decode = P.BoundingBoxDecode(max_shape=(768, 1280), means=means, stds=stds, wh_ratio_clip=0.016) def construct(self, anchor, groundtruth): return self.decode(anchor, groundtruth) @pytest.mark.level0 @pytest.mark.platform_x86_cpu @pytest.mark.env_onecard def test_boundingbox_decode(): anchor = np.array([[4, 1, 2, 1], [2, 2, 2, 3]], np.float32) deltas = np.array([[3, 1, 2, 2], [1, 2, 1, 4]], np.float32) means = (0.1, 0.1, 0.2, 0.2) stds = (2.0, 2.0, 3.0, 3.0) anchor_box = Tensor(anchor, mindspore.float32) deltas_box = Tensor(deltas, mindspore.float32) expect_deltas = np.array([[28.6500, 0.0000, 0.0000, 33.8500], [0.0000, 0.0000, 15.8663, 72.7000]], np.float32) error = np.ones(shape=[2, 4]) * 1.0e-4 context.set_context(mode=context.GRAPH_MODE, device_target='CPU') boundingbox_decode = NetBoundingBoxDecode(means, stds) output = boundingbox_decode(anchor_box, deltas_box) diff = output.asnumpy() - expect_deltas assert np.all(abs(diff) < error) context.set_context(mode=context.PYNATIVE_MODE, device_target='CPU') boundingbox_decode = NetBoundingBoxDecode(means, stds) output = boundingbox_decode(anchor_box, deltas_box) diff = output.asnumpy() - expect_deltas assert np.all(abs(diff) < error)
985
679
/************************************************************** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * *************************************************************/ #ifndef SFX_ITEMWRAPPER_HXX #define SFX_ITEMWRAPPER_HXX #include "sal/config.h" #include "sfx2/dllapi.h" #include <svl/eitem.hxx> #include <svl/stritem.hxx> #include <svl/intitem.hxx> #include <svl/itemset.hxx> // ============================================================================ namespace sfx { // ============================================================================ // Helpers // ============================================================================ class SFX2_DLLPUBLIC ItemWrapperHelper { public: /** Returns the WID of the passed SID in the item set. */ static sal_uInt16 GetWhichId( const SfxItemSet& rItemSet, sal_uInt16 nSlot ); /** Returns true, if the passed item set supports the SID. */ static bool IsKnownItem( const SfxItemSet& rItemSet, sal_uInt16 nSlot ); /** Returns an item from an item set, if it is not in "don't know" state. @return Pointer to item, or 0 if it has "don't know" state. */ static const SfxPoolItem* GetUniqueItem( const SfxItemSet& rItemSet, sal_uInt16 nSlot ); /** Returns the default item from the pool of the passed item set. */ static const SfxPoolItem& GetDefaultItem( const SfxItemSet& rItemSet, sal_uInt16 nSlot ); /** Removes an item from rDestSet, if it is default in rOldSet. */ static void RemoveDefaultItem( SfxItemSet& rDestSet, const SfxItemSet& rOldSet, sal_uInt16 nSlot ); }; // ============================================================================ // Item wrappers // ============================================================================ /** Base class wrapping a single item. Objects of this class store the SID of an item. Exchanging data with the item is done with the virtual functions GetItemValue() and SetItemValue(). Derived classes implement these functions according to the item type they work on. The current tree of base classes/templates and standard item wrappers: SingleItemWrapper< ItemT, ValueT > | +- ValueItemWrapper< ItemT, ValueT > [1] | | | +- BoolItemWrapper [1] | +- Int16ItemWrapper [1] | +- UInt16ItemWrapper [1] | +- Int32ItemWrapper [1] | +- UInt32ItemWrapper [1] | +- StringItemWrapper [1] | +- IdentItemWrapper< ItemT > [1] Notes: [1] Standard wrappers ready to use. See documentation of class ItemConnectionBase for more details. */ template< typename ItemT, typename ValueT > class SingleItemWrapper { public: typedef ItemT ItemType; typedef ValueT ItemValueType; typedef SingleItemWrapper< ItemT, ValueT > SingleItemWrapperType; inline explicit SingleItemWrapper( sal_uInt16 nSlot ) : mnSlot( nSlot ) {} /** Returns the SID this wrapper works on. */ inline sal_uInt16 GetSlotId() const { return mnSlot; } /** Returns the item from an item set, if it is not in "don't know" state. @descr Similar to ItemWrapperHelper::GetUniqueItem(), but works always with the own SID and returns the correct item type. @return Pointer to item, or 0 if it has "don't know" state. */ const ItemT* GetUniqueItem( const SfxItemSet& rItemSet ) const; /** Returns the default item from the pool of the passed item set. @descr Similar to ItemWrapperHelper::GetDefaultItem(), but works always with the own SID and returns the correct item type. */ const ItemT& GetDefaultItem( const SfxItemSet& rItemSet ) const; /** Derived classes return the value of the passed item. */ virtual ValueT GetItemValue( const ItemT& rItem ) const = 0; /** Derived classes set the value at the passed item. */ virtual void SetItemValue( ItemT& rItem, ValueT aValue ) const = 0; private: sal_uInt16 mnSlot; /// The SID of this item wrapper. }; // ============================================================================ /** An item wrapper usable for most types of items. The item type must support the following functions: - ValueT ItemT::GetValue() const - void ItemT::SetValue( ValueT ) The template parameter InternalValueT can be used to specify the internal value type of the item, if it differs from ValueT. This parameter has to be used to prevent compiler warnings. */ template< typename ItemT, typename ValueT, typename InternalValueT = ValueT > class ValueItemWrapper : public SingleItemWrapper< ItemT, ValueT > { public: inline explicit ValueItemWrapper( sal_uInt16 nSlot ) : SingleItemWrapper< ItemT, ValueT >( nSlot ) {} virtual ValueT GetItemValue( const ItemT& rItem ) const { return static_cast< ValueT >( rItem.GetValue() ); } virtual void SetItemValue( ItemT& rItem, ValueT aValue ) const { rItem.SetValue( static_cast< InternalValueT >( aValue ) ); } }; // ---------------------------------------------------------------------------- typedef ValueItemWrapper< SfxBoolItem, sal_Bool > BoolItemWrapper; typedef ValueItemWrapper< SfxInt16Item, sal_Int16 > Int16ItemWrapper; typedef ValueItemWrapper< SfxUInt16Item, sal_uInt16 > UInt16ItemWrapper; typedef ValueItemWrapper< SfxInt32Item, sal_Int32 > Int32ItemWrapper; typedef ValueItemWrapper< SfxUInt32Item, sal_uInt32 > UInt32ItemWrapper; typedef ValueItemWrapper< SfxStringItem, const String& > StringItemWrapper; // ============================================================================ /** An item wrapper that uses the item itself as value. */ template< typename ItemT > class IdentItemWrapper : public SingleItemWrapper< ItemT, const ItemT& > { public: inline explicit IdentItemWrapper( sal_uInt16 nSlot ) : SingleItemWrapper< ItemT, const ItemT& >( nSlot ) {} virtual const ItemT& GetItemValue( const ItemT& rItem ) const { return rItem; } virtual void SetItemValue( ItemT& rItem, const ItemT& rValue ) const { rItem = rValue; } }; // ============================================================================ // ============================================================================ // *** Implementation of template functions *** // ============================================================================ // ============================================================================ // Item wrappers // ============================================================================ template< typename ItemT, typename ValueT > const ItemT* SingleItemWrapper< ItemT, ValueT >::GetUniqueItem( const SfxItemSet& rItemSet ) const { return static_cast< const ItemT* >( ItemWrapperHelper::GetUniqueItem( rItemSet, mnSlot ) ); } template< typename ItemT, typename ValueT > const ItemT& SingleItemWrapper< ItemT, ValueT >::GetDefaultItem( const SfxItemSet& rItemSet ) const { return static_cast< const ItemT& >( ItemWrapperHelper::GetDefaultItem( rItemSet, mnSlot ) ); } // ============================================================================ } // namespace sfx #endif
2,679
4,538
<reponame>wstong999/AliOS-Things<filename>components/ucloud_ai/src/model/aliyun-openapi/facebody/src/model/SearchFaceRequest.cc /* * Copyright 2009-2017 Alibaba Cloud All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <alibabacloud/facebody/model/SearchFaceRequest.h> using AlibabaCloud::Facebody::Model::SearchFaceRequest; SearchFaceRequest::SearchFaceRequest() : RpcServiceRequest("facebody", "2019-12-30", "SearchFace") { setMethod(HttpRequest::Method::Post); } SearchFaceRequest::~SearchFaceRequest() {} std::string SearchFaceRequest::getDbNames()const { return dbNames_; } void SearchFaceRequest::setDbNames(const std::string& dbNames) { dbNames_ = dbNames; setParameter("DbNames", dbNames); } std::string SearchFaceRequest::getDbName()const { return dbName_; } void SearchFaceRequest::setDbName(const std::string& dbName) { dbName_ = dbName; setBodyParameter("DbName", dbName); } std::string SearchFaceRequest::getImageUrl()const { return imageUrl_; } void SearchFaceRequest::setImageUrl(const std::string& imageUrl) { imageUrl_ = imageUrl; setBodyParameter("ImageUrl", imageUrl); } int SearchFaceRequest::getLimit()const { return limit_; } void SearchFaceRequest::setLimit(int limit) { limit_ = limit; setBodyParameter("Limit", std::to_string(limit)); }
624
406
<gh_stars>100-1000 package com.braintreepayments.api; import android.content.Context; import android.os.SystemClock; import java.util.concurrent.TimeUnit; public class AnalyticsDatabaseTestUtils { public static void clearAllEvents(Context context) { AnalyticsDatabase database = AnalyticsDatabase.getInstance(context.getApplicationContext()); database.getWritableDatabase().delete("analytics", null, null); database.close(); } /** * Waits for the AnalyticsDatabase AsyncTask queue to empty before continuing. * @param database the database we are awaiting operations on * @throws InterruptedException */ public static void awaitTasksFinished(AnalyticsDatabase database) throws InterruptedException { long timeoutTimestamp = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(5); while (database.mTaskSet.size() > 0) { if (System.currentTimeMillis() > timeoutTimestamp) { throw new InterruptedException("Timeout exceeded waiting for async task queue to complete"); } SystemClock.sleep(5); } } }
389
2,389
{ "id": "image-redundant-alt", "selector": "img", "tags": ["cat.text-alternatives", "best-practice"], "metadata": { "description": "Ensure image alternative is not repeated as text", "help": "Alternative text of images should not be repeated as text" }, "all": [], "any": [], "none": ["duplicate-img-label"] }
117
409
<gh_stars>100-1000 /* This file is a part of libcds - Concurrent Data Structures library (C) Copyright <NAME> (<EMAIL>) 2006-2016 Source code repo: http://github.com/khizmax/libcds/ Download: http://sourceforge.net/projects/libcds/files/ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef CDSUNIT_LOCK_WIN32_LOCK_H #define CDSUNIT_LOCK_WIN32_LOCK_H #if defined(_WIN32) || defined(_WIN64) #define NOMINMAX #include <windows.h> #define UNIT_LOCK_WIN_CS namespace lock { namespace win { // Win32 critical section class CS { CRITICAL_SECTION m_cs; public: CS() { ::InitializeCriticalSection( &m_cs ) ; } ~CS() { ::DeleteCriticalSection( &m_cs ) ; } void lock() { ::EnterCriticalSection( &m_cs ) ; } void unlock() { ::LeaveCriticalSection( &m_cs) ; } bool try_lock() { return ::TryEnterCriticalSection( &m_cs ) != 0 ; } }; class Mutex { HANDLE m_hMutex; public: Mutex() { m_hMutex = ::CreateMutex( nullptr, false, nullptr ); } ~Mutex() { ::CloseHandle( m_hMutex ) ; } void lock() { ::WaitForSingleObject( m_hMutex, INFINITE ); } void unlock() { ::ReleaseMutex( m_hMutex ); } bool try_lock() { return ::WaitForSingleObject( m_hMutex, 0) == WAIT_OBJECT_0; } }; } // namespace win } // namespace lock #endif // defined(_WIN32) || defined(_WIN64) #endif // #ifndef CDSUNIT_LOCK_WIN32_LOCK_H
1,100
1,444
<gh_stars>1000+ package mage.view; import java.io.Serializable; import mage.game.draft.DraftPlayer; /** * * @author <EMAIL> */ public class DraftPickView implements Serializable { private static final long serialVersionUID = 1L; protected SimpleCardsView booster; protected SimpleCardsView picks; protected boolean picking; protected int timeout; public DraftPickView(DraftPlayer player, int timeout) { this.booster = new SimpleCardsView(player.getBooster(), false); this.picks = new SimpleCardsView(player.getDeck().getSideboard(), false); this.picking = player.isPicking(); this.timeout = timeout; } public SimpleCardsView getBooster() { return booster; } public SimpleCardsView getPicks() { return picks; } public boolean isPicking() { return this.picking; } public int getTimeout() { return timeout; } }
343
2,812
#include <iostream> #include <opencv2/core/core.hpp> #include <opencv2/features2d/features2d.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/calib3d/calib3d.hpp> #include <Eigen/Core> #include <g2o/core/base_vertex.h> #include <g2o/core/base_unary_edge.h> #include <g2o/core/sparse_optimizer.h> #include <g2o/core/block_solver.h> #include <g2o/core/solver.h> #include <g2o/core/optimization_algorithm_gauss_newton.h> #include <g2o/solvers/dense/linear_solver_dense.h> #include <sophus/se3.hpp> #include <chrono> using namespace std; using namespace cv; void find_feature_matches( const Mat &img_1, const Mat &img_2, std::vector<KeyPoint> &keypoints_1, std::vector<KeyPoint> &keypoints_2, std::vector<DMatch> &matches); // 像素坐标转相机归一化坐标 Point2d pixel2cam(const Point2d &p, const Mat &K); // BA by g2o typedef vector<Eigen::Vector2d, Eigen::aligned_allocator<Eigen::Vector2d>> VecVector2d; typedef vector<Eigen::Vector3d, Eigen::aligned_allocator<Eigen::Vector3d>> VecVector3d; void bundleAdjustmentG2O( const VecVector3d &points_3d, const VecVector2d &points_2d, const Mat &K, Sophus::SE3d &pose ); // BA by gauss-newton void bundleAdjustmentGaussNewton( const VecVector3d &points_3d, const VecVector2d &points_2d, const Mat &K, Sophus::SE3d &pose ); int main(int argc, char **argv) { if (argc != 5) { cout << "usage: pose_estimation_3d2d img1 img2 depth1 depth2" << endl; return 1; } //-- 读取图像 Mat img_1 = imread(argv[1], CV_LOAD_IMAGE_COLOR); Mat img_2 = imread(argv[2], CV_LOAD_IMAGE_COLOR); assert(img_1.data && img_2.data && "Can not load images!"); vector<KeyPoint> keypoints_1, keypoints_2; vector<DMatch> matches; find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches); cout << "一共找到了" << matches.size() << "组匹配点" << endl; // 建立3D点 Mat d1 = imread(argv[3], CV_LOAD_IMAGE_UNCHANGED); // 深度图为16位无符号数,单通道图像 Mat K = (Mat_<double>(3, 3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1); vector<Point3f> pts_3d; vector<Point2f> pts_2d; for (DMatch m:matches) { ushort d = d1.ptr<unsigned short>(int(keypoints_1[m.queryIdx].pt.y))[int(keypoints_1[m.queryIdx].pt.x)]; if (d == 0) // bad depth continue; float dd = d / 5000.0; Point2d p1 = pixel2cam(keypoints_1[m.queryIdx].pt, K); pts_3d.push_back(Point3f(p1.x * dd, p1.y * dd, dd)); pts_2d.push_back(keypoints_2[m.trainIdx].pt); } cout << "3d-2d pairs: " << pts_3d.size() << endl; chrono::steady_clock::time_point t1 = chrono::steady_clock::now(); Mat r, t; solvePnP(pts_3d, pts_2d, K, Mat(), r, t, false); // 调用OpenCV 的 PnP 求解,可选择EPNP,DLS等方法 Mat R; cv::Rodrigues(r, R); // r为旋转向量形式,用Rodrigues公式转换为矩阵 chrono::steady_clock::time_point t2 = chrono::steady_clock::now(); chrono::duration<double> time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1); cout << "solve pnp in opencv cost time: " << time_used.count() << " seconds." << endl; cout << "R=" << endl << R << endl; cout << "t=" << endl << t << endl; VecVector3d pts_3d_eigen; VecVector2d pts_2d_eigen; for (size_t i = 0; i < pts_3d.size(); ++i) { pts_3d_eigen.push_back(Eigen::Vector3d(pts_3d[i].x, pts_3d[i].y, pts_3d[i].z)); pts_2d_eigen.push_back(Eigen::Vector2d(pts_2d[i].x, pts_2d[i].y)); } cout << "calling bundle adjustment by gauss newton" << endl; Sophus::SE3d pose_gn; t1 = chrono::steady_clock::now(); bundleAdjustmentGaussNewton(pts_3d_eigen, pts_2d_eigen, K, pose_gn); t2 = chrono::steady_clock::now(); time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1); cout << "solve pnp by gauss newton cost time: " << time_used.count() << " seconds." << endl; cout << "calling bundle adjustment by g2o" << endl; Sophus::SE3d pose_g2o; t1 = chrono::steady_clock::now(); bundleAdjustmentG2O(pts_3d_eigen, pts_2d_eigen, K, pose_g2o); t2 = chrono::steady_clock::now(); time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1); cout << "solve pnp by g2o cost time: " << time_used.count() << " seconds." << endl; return 0; } void find_feature_matches(const Mat &img_1, const Mat &img_2, std::vector<KeyPoint> &keypoints_1, std::vector<KeyPoint> &keypoints_2, std::vector<DMatch> &matches) { //-- 初始化 Mat descriptors_1, descriptors_2; // used in OpenCV3 Ptr<FeatureDetector> detector = ORB::create(); Ptr<DescriptorExtractor> descriptor = ORB::create(); // use this if you are in OpenCV2 // Ptr<FeatureDetector> detector = FeatureDetector::create ( "ORB" ); // Ptr<DescriptorExtractor> descriptor = DescriptorExtractor::create ( "ORB" ); Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming"); //-- 第一步:检测 Oriented FAST 角点位置 detector->detect(img_1, keypoints_1); detector->detect(img_2, keypoints_2); //-- 第二步:根据角点位置计算 BRIEF 描述子 descriptor->compute(img_1, keypoints_1, descriptors_1); descriptor->compute(img_2, keypoints_2, descriptors_2); //-- 第三步:对两幅图像中的BRIEF描述子进行匹配,使用 Hamming 距离 vector<DMatch> match; // BFMatcher matcher ( NORM_HAMMING ); matcher->match(descriptors_1, descriptors_2, match); //-- 第四步:匹配点对筛选 double min_dist = 10000, max_dist = 0; //找出所有匹配之间的最小距离和最大距离, 即是最相似的和最不相似的两组点之间的距离 for (int i = 0; i < descriptors_1.rows; i++) { double dist = match[i].distance; if (dist < min_dist) min_dist = dist; if (dist > max_dist) max_dist = dist; } printf("-- Max dist : %f \n", max_dist); printf("-- Min dist : %f \n", min_dist); //当描述子之间的距离大于两倍的最小距离时,即认为匹配有误.但有时候最小距离会非常小,设置一个经验值30作为下限. for (int i = 0; i < descriptors_1.rows; i++) { if (match[i].distance <= max(2 * min_dist, 30.0)) { matches.push_back(match[i]); } } } Point2d pixel2cam(const Point2d &p, const Mat &K) { return Point2d ( (p.x - K.at<double>(0, 2)) / K.at<double>(0, 0), (p.y - K.at<double>(1, 2)) / K.at<double>(1, 1) ); } void bundleAdjustmentGaussNewton( const VecVector3d &points_3d, const VecVector2d &points_2d, const Mat &K, Sophus::SE3d &pose) { typedef Eigen::Matrix<double, 6, 1> Vector6d; const int iterations = 10; double cost = 0, lastCost = 0; double fx = K.at<double>(0, 0); double fy = K.at<double>(1, 1); double cx = K.at<double>(0, 2); double cy = K.at<double>(1, 2); for (int iter = 0; iter < iterations; iter++) { Eigen::Matrix<double, 6, 6> H = Eigen::Matrix<double, 6, 6>::Zero(); Vector6d b = Vector6d::Zero(); cost = 0; // compute cost for (int i = 0; i < points_3d.size(); i++) { Eigen::Vector3d pc = pose * points_3d[i]; double inv_z = 1.0 / pc[2]; double inv_z2 = inv_z * inv_z; Eigen::Vector2d proj(fx * pc[0] / pc[2] + cx, fy * pc[1] / pc[2] + cy); Eigen::Vector2d e = points_2d[i] - proj; cost += e.squaredNorm(); Eigen::Matrix<double, 2, 6> J; J << -fx * inv_z, 0, fx * pc[0] * inv_z2, fx * pc[0] * pc[1] * inv_z2, -fx - fx * pc[0] * pc[0] * inv_z2, fx * pc[1] * inv_z, 0, -fy * inv_z, fy * pc[1] * inv_z2, fy + fy * pc[1] * pc[1] * inv_z2, -fy * pc[0] * pc[1] * inv_z2, -fy * pc[0] * inv_z; H += J.transpose() * J; b += -J.transpose() * e; } Vector6d dx; dx = H.ldlt().solve(b); if (isnan(dx[0])) { cout << "result is nan!" << endl; break; } if (iter > 0 && cost >= lastCost) { // cost increase, update is not good cout << "cost: " << cost << ", last cost: " << lastCost << endl; break; } // update your estimation pose = Sophus::SE3d::exp(dx) * pose; lastCost = cost; cout << "iteration " << iter << " cost=" << std::setprecision(12) << cost << endl; if (dx.norm() < 1e-6) { // converge break; } } cout << "pose by g-n: \n" << pose.matrix() << endl; } /// vertex and edges used in g2o ba class VertexPose : public g2o::BaseVertex<6, Sophus::SE3d> { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW; virtual void setToOriginImpl() override { _estimate = Sophus::SE3d(); } /// left multiplication on SE3 virtual void oplusImpl(const double *update) override { Eigen::Matrix<double, 6, 1> update_eigen; update_eigen << update[0], update[1], update[2], update[3], update[4], update[5]; _estimate = Sophus::SE3d::exp(update_eigen) * _estimate; } virtual bool read(istream &in) override {} virtual bool write(ostream &out) const override {} }; class EdgeProjection : public g2o::BaseUnaryEdge<2, Eigen::Vector2d, VertexPose> { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW; EdgeProjection(const Eigen::Vector3d &pos, const Eigen::Matrix3d &K) : _pos3d(pos), _K(K) {} virtual void computeError() override { const VertexPose *v = static_cast<VertexPose *> (_vertices[0]); Sophus::SE3d T = v->estimate(); Eigen::Vector3d pos_pixel = _K * (T * _pos3d); pos_pixel /= pos_pixel[2]; _error = _measurement - pos_pixel.head<2>(); } virtual void linearizeOplus() override { const VertexPose *v = static_cast<VertexPose *> (_vertices[0]); Sophus::SE3d T = v->estimate(); Eigen::Vector3d pos_cam = T * _pos3d; double fx = _K(0, 0); double fy = _K(1, 1); double cx = _K(0, 2); double cy = _K(1, 2); double X = pos_cam[0]; double Y = pos_cam[1]; double Z = pos_cam[2]; double Z2 = Z * Z; _jacobianOplusXi << -fx / Z, 0, fx * X / Z2, fx * X * Y / Z2, -fx - fx * X * X / Z2, fx * Y / Z, 0, -fy / Z, fy * Y / (Z * Z), fy + fy * Y * Y / Z2, -fy * X * Y / Z2, -fy * X / Z; } virtual bool read(istream &in) override {} virtual bool write(ostream &out) const override {} private: Eigen::Vector3d _pos3d; Eigen::Matrix3d _K; }; void bundleAdjustmentG2O( const VecVector3d &points_3d, const VecVector2d &points_2d, const Mat &K, Sophus::SE3d &pose) { // 构建图优化,先设定g2o typedef g2o::BlockSolver<g2o::BlockSolverTraits<6, 3>> BlockSolverType; // pose is 6, landmark is 3 typedef g2o::LinearSolverDense<BlockSolverType::PoseMatrixType> LinearSolverType; // 线性求解器类型 // 梯度下降方法,可以从GN, LM, DogLeg 中选 auto solver = new g2o::OptimizationAlgorithmGaussNewton( g2o::make_unique<BlockSolverType>(g2o::make_unique<LinearSolverType>())); g2o::SparseOptimizer optimizer; // 图模型 optimizer.setAlgorithm(solver); // 设置求解器 optimizer.setVerbose(true); // 打开调试输出 // vertex VertexPose *vertex_pose = new VertexPose(); // camera vertex_pose vertex_pose->setId(0); vertex_pose->setEstimate(Sophus::SE3d()); optimizer.addVertex(vertex_pose); // K Eigen::Matrix3d K_eigen; K_eigen << K.at<double>(0, 0), K.at<double>(0, 1), K.at<double>(0, 2), K.at<double>(1, 0), K.at<double>(1, 1), K.at<double>(1, 2), K.at<double>(2, 0), K.at<double>(2, 1), K.at<double>(2, 2); // edges int index = 1; for (size_t i = 0; i < points_2d.size(); ++i) { auto p2d = points_2d[i]; auto p3d = points_3d[i]; EdgeProjection *edge = new EdgeProjection(p3d, K_eigen); edge->setId(index); edge->setVertex(0, vertex_pose); edge->setMeasurement(p2d); edge->setInformation(Eigen::Matrix2d::Identity()); optimizer.addEdge(edge); index++; } chrono::steady_clock::time_point t1 = chrono::steady_clock::now(); optimizer.setVerbose(true); optimizer.initializeOptimization(); optimizer.optimize(10); chrono::steady_clock::time_point t2 = chrono::steady_clock::now(); chrono::duration<double> time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1); cout << "optimization costs time: " << time_used.count() << " seconds." << endl; cout << "pose estimated by g2o =\n" << vertex_pose->estimate().matrix() << endl; pose = vertex_pose->estimate(); }
5,688
532
import pandas as pd import numpy as np def optimize_feature_power(df, output_column_name=None, exponents=[2., 1., .8, .5, .25, .1, .01]): """ Plot the correlation coefficient for various exponential scalings of input features >>> np.random.seed(314159) >>> df = pd.DataFrame() >>> df['output'] = np.random.randn(1000) >>> df['x10'] = df.output * 10 >>> df['sq'] = df.output ** 2 >>> df['sqrt'] = df.output ** .5 >>> optimize_feature_power(df, output_column_name='output').round(2) x10 sq sqrt power 2.00 -0.08 1.00 0.83 1.00 1.00 -0.08 0.97 0.80 1.00 0.90 0.99 0.50 0.97 0.83 1.00 0.25 0.93 0.76 0.99 0.10 0.89 0.71 0.97 0.01 0.86 0.67 0.95 Returns: DataFrame: columns are the input_columns from the source dataframe (df) rows are correlation with output for each attempted exponent used to scale the input features """ output_column_name = list(df.columns)[-1] if output_column_name is None else output_column_name input_column_names = [colname for colname in df.columns if output_column_name != colname] results = np.zeros((len(exponents), len(input_column_names))) for rownum, exponent in enumerate(exponents): for colnum, column_name in enumerate(input_column_names): results[rownum, colnum] = (df[output_column_name] ** exponent).corr(df[column_name]) results = pd.DataFrame(results, columns=input_column_names, index=pd.Series(exponents, name='power')) # results.plot(logx=True) return results
654
511
<filename>external/audiocodec/aacdec/sbr_extract_extended_data.c /****************************************************************** * * Copyright 2018 Samsung Electronics All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************/ /* ------------------------------------------------------------------ * Copyright (C) 1998-2009 PacketVideo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * See the License for the specific language governing permissions * and limitations under the License. * ------------------------------------------------------------------- */ /* Filename: sbr_extract_extended_data.c ------------------------------------------------------------------------------ REVISION HISTORY Who: Date: MM/DD/YYYY Description: ------------------------------------------------------------------------------ INPUT AND OUTPUT DEFINITIONS SBR_FRAME_DATA *hFrameData, Destination for extracted data of left channel SBR_FRAME_DATA *hFrameDataRight Destination for extracted data of right channel BIT_BUFFER hBitBuf pointer to bit buffer ------------------------------------------------------------------------------ FUNCTION DESCRIPTION Reads extension data from the bitstream The bitstream format allows up to 4 kinds of extended data element. Extended data may contain several elements, each identified by a 2-bit-ID. ------------------------------------------------------------------------------ REQUIREMENTS ------------------------------------------------------------------------------ REFERENCES SC 29 Software Copyright Licencing Disclaimer: This software module was originally developed by Coding Technologies and edited by - in the course of development of the ISO/IEC 13818-7 and ISO/IEC 14496-3 standards for reference purposes and its performance may not have been optimized. This software module is an implementation of one or more tools as specified by the ISO/IEC 13818-7 and ISO/IEC 14496-3 standards. ISO/IEC gives users free license to this software module or modifications thereof for use in products claiming conformance to audiovisual and image-coding related ITU Recommendations and/or ISO/IEC International Standards. ISO/IEC gives users the same free license to this software module or modifications thereof for research purposes and further ISO/IEC standardisation. Those intending to use this software module in products are advised that its use may infringe existing patents. ISO/IEC have no liability for use of this software module or modifications thereof. Copyright is not released for products that do not conform to audiovisual and image-coding related ITU Recommendations and/or ISO/IEC International Standards. The original developer retains full right to modify and use the code for its own purpose, assign or donate the code to a third party and to inhibit third parties from using the code for products that do not conform to audiovisual and image-coding related ITU Recommendations and/or ISO/IEC International Standards. This copyright notice must be included in all copies or derivative works. Copyright (c) ISO/IEC 2002. ------------------------------------------------------------------------------ PSEUDO-CODE ------------------------------------------------------------------------------ */ /*---------------------------------------------------------------------------- ; INCLUDES ----------------------------------------------------------------------------*/ #ifdef AAC_PLUS #include "sbr_extract_extended_data.h" #include "buf_getbits.h" #ifdef PARAMETRICSTEREO #include "ps_read_data.h" #endif /*---------------------------------------------------------------------------- ; MACROS ; Define module specific macros here ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; DEFINES ; Include all pre-processor statements here. Include conditional ; compile variables also. ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; LOCAL FUNCTION DEFINITIONS ; Function Prototype declaration ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; LOCAL STORE/BUFFER/POINTER DEFINITIONS ; Variable declaration - defined here and used outside this module ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; EXTERNAL FUNCTION REFERENCES ; Declare functions defined elsewhere and referenced in this module ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; EXTERNAL GLOBAL STORE/BUFFER/POINTER REFERENCES ; Declare variables used in this module but defined elsewhere ----------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- ; FUNCTION CODE ----------------------------------------------------------------------------*/ void sbr_extract_extended_data(BIT_BUFFER * hBitBuf #ifdef PARAMETRICSTEREO /* Parametric Stereo Decoder */ , HANDLE_PS_DEC hParametricStereoDec #endif ) { Int32 extended_data; Int32 i; Int32 nBitsLeft; Int32 extension_id; extended_data = buf_get_1bit(hBitBuf); /* SI_SBR_EXTENDED_DATA_BITS */ if (extended_data) { Int32 cnt; cnt = buf_getbits(hBitBuf, SI_SBR_EXTENSION_SIZE_BITS); if (cnt == (1 << SI_SBR_EXTENSION_SIZE_BITS) - 1) { cnt += buf_getbits(hBitBuf, SI_SBR_EXTENSION_ESC_COUNT_BITS); } nBitsLeft = (cnt << 3); while (nBitsLeft > 7) { extension_id = buf_getbits(hBitBuf, SI_SBR_EXTENSION_ID_BITS); nBitsLeft -= SI_SBR_EXTENSION_ID_BITS; switch (extension_id) { #ifdef HQ_SBR #ifdef PARAMETRICSTEREO /* * Parametric Coding supports the Transient, Sinusoidal, Noise, and * Parametric Stereo tools (MPEG4). * 3GPP use aac+ hq along with ps for enhanced aac+ * The PS tool uses complex-value QMF data, therefore can not be used * with low power version of aac+ */ case EXTENSION_ID_PS_CODING: if (hParametricStereoDec != NULL) { if (!hParametricStereoDec->psDetected) { /* parametric stereo detected */ hParametricStereoDec->psDetected = 1; } nBitsLeft -= ps_read_data(hParametricStereoDec, hBitBuf, nBitsLeft); } break; #endif #endif case 0: default: /* An unknown extension id causes the remaining extension data * to be skipped */ cnt = nBitsLeft >> 3; /* number of remaining bytes */ for (i = 0; i < cnt; i++) { buf_getbits(hBitBuf, 8); } nBitsLeft -= (cnt << 3); } } /* read fill bits for byte alignment */ buf_getbits(hBitBuf, nBitsLeft); } } #endif
2,850
811
{ "description": "Element content replacement - Simple combination of direct and indirect adjacent combinators (css3-modsel-90)", "selectors": { "p": "css3-modsel-90.expected0.html", "blockquote ~ div + p": "css3-modsel-90.expected1.html" }, "src": "css3-modsel-90.src.html" }
106
360
<reponame>Yanci0/openGauss-server #!/usr/bin/python3 # -*- coding: utf-8 -*- ############################################################################# # Copyright (c): 2021, Huawei Tech. Co., Ltd. # FileName : source.py # Version : # Date : 2021-4-7 # Description : ############################################################################# class Source: """ This is father class which is used for acquiring mutiple metric data at same time. """ def __init__(self): self._channel_manager = None def start(self): pass def stop(self): pass @property def channel_manager(self): return self._channel_manager @channel_manager.setter def channel_manager(self, channel_manager): self._channel_manager = channel_manager
280
1,144
/* SPDX-License-Identifier: GPL-2.0+ */ /* * (C) Copyright 2011 * <NAME>, HALE electronic GmbH, <EMAIL> */ #ifndef __MC13783_H__ #define __MC13783_H__ /* REG_MODE_0 */ #define VAUDIOEN (1 << 0) #define VAUDIOSTBY (1 << 1) #define VAUDIOMODE (1 << 2) #define VIOHIEN (1 << 3) #define VIOHISTBY (1 << 4) #define VIOHIMODE (1 << 5) #define VIOLOEN (1 << 6) #define VIOLOSTBY (1 << 7) #define VIOLOMODE (1 << 8) #define VDIGEN (1 << 9) #define VDIGSTBY (1 << 10) #define VDIGMODE (1 << 11) #define VGENEN (1 << 12) #define VGENSTBY (1 << 13) #define VGENMODE (1 << 14) #define VRFDIGEN (1 << 15) #define VRFDIGSTBY (1 << 16) #define VRFDIGMODE (1 << 17) #define VRFREFEN (1 << 18) #define VRFREFSTBY (1 << 19) #define VRFREFMODE (1 << 20) #define VRFCPEN (1 << 21) #define VRFCPSTBY (1 << 22) #define VRFCPMODE (1 << 23) /* REG_MODE_1 */ #define VSIMEN (1 << 0) #define VSIMSTBY (1 << 1) #define VSIMMODE (1 << 2) #define VESIMEN (1 << 3) #define VESIMSTBY (1 << 4) #define VESIMMODE (1 << 5) #define VCAMEN (1 << 6) #define VCAMSTBY (1 << 7) #define VCAMMODE (1 << 8) #define VRFBGEN (1 << 9) #define VRFBGSTBY (1 << 10) #define VVIBEN (1 << 11) #define VRF1EN (1 << 12) #define VRF1STBY (1 << 13) #define VRF1MODE (1 << 14) #define VRF2EN (1 << 15) #define VRF2STBY (1 << 16) #define VRF2MODE (1 << 17) #define VMMC1EN (1 << 18) #define VMMC1STBY (1 << 19) #define VMMC1MODE (1 << 20) #define VMMC2EN (1 << 21) #define VMMC2STBY (1 << 22) #define VMMC2MODE (1 << 23) #endif
736
381
<filename>pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c #include <stdlib.h> #include <string.h> #include "src/cjkcodecs/multibytecodec.h" #include "src/cjkcodecs/fixnames.h" struct pypy_cjk_dec_s *pypy_cjk_dec_new(const MultibyteCodec *codec) { struct pypy_cjk_dec_s *d = malloc(sizeof(struct pypy_cjk_dec_s)); if (!d) return NULL; if (codec->decinit != NULL && codec->decinit(&d->state, codec->config) != 0) { free(d); return NULL; } d->codec = codec; d->outbuf_start = NULL; return d; } Py_ssize_t pypy_cjk_dec_init(struct pypy_cjk_dec_s *d, char *inbuf, Py_ssize_t inlen) { d->inbuf_start = (unsigned char *)inbuf; d->inbuf = (unsigned char *)inbuf; d->inbuf_end = (unsigned char *)inbuf + inlen; if (d->outbuf_start == NULL) { d->outbuf_start = (inlen <= (PY_SSIZE_T_MAX / sizeof(Py_UNICODE)) ? malloc(inlen * sizeof(Py_UNICODE)) : NULL); if (d->outbuf_start == NULL) return -1; d->outbuf_end = d->outbuf_start + inlen; } d->outbuf = d->outbuf_start; return 0; } void pypy_cjk_dec_free(struct pypy_cjk_dec_s *d) { free(d->outbuf_start); free(d); } static int expand_decodebuffer(struct pypy_cjk_dec_s *d, Py_ssize_t esize) { Py_ssize_t orgpos, orgsize; Py_UNICODE *newbuf; orgpos = d->outbuf - d->outbuf_start; orgsize = d->outbuf_end - d->outbuf_start; esize = (esize < (orgsize >> 1) ? (orgsize >> 1) | 1 : esize); newbuf = (esize <= (PY_SSIZE_T_MAX / sizeof(Py_UNICODE) - orgsize) ? realloc(d->outbuf_start, (orgsize + esize) * sizeof(Py_UNICODE)) : NULL); if (!newbuf) return -1; d->outbuf_start = newbuf; d->outbuf = newbuf + orgpos; d->outbuf_end = newbuf + orgsize + esize; return 0; } Py_ssize_t pypy_cjk_dec_chunk(struct pypy_cjk_dec_s *d) { while (1) { Py_ssize_t r; Py_ssize_t inleft = (Py_ssize_t)(d->inbuf_end - d->inbuf); Py_ssize_t outleft = (Py_ssize_t)(d->outbuf_end - d->outbuf); if (inleft == 0) return 0; r = d->codec->decode(&d->state, d->codec->config, &d->inbuf, inleft, &d->outbuf, outleft); if (r != MBERR_TOOSMALL) return r; /* output buffer too small; grow it and continue. */ if (expand_decodebuffer(d, -1) == -1) return MBERR_NOMEMORY; } } Py_UNICODE *pypy_cjk_dec_outbuf(struct pypy_cjk_dec_s *d) { return d->outbuf_start; } Py_ssize_t pypy_cjk_dec_outlen(struct pypy_cjk_dec_s *d) { return d->outbuf - d->outbuf_start; } Py_ssize_t pypy_cjk_dec_inbuf_remaining(struct pypy_cjk_dec_s *d) { return d->inbuf_end - d->inbuf; } Py_ssize_t pypy_cjk_dec_inbuf_consumed(struct pypy_cjk_dec_s* d) { return d->inbuf - d->inbuf_start; } Py_ssize_t pypy_cjk_dec_replace_on_error(struct pypy_cjk_dec_s* d, Py_UNICODE *newbuf, Py_ssize_t newlen, Py_ssize_t in_offset) { if (newlen > 0) { if (d->outbuf + newlen > d->outbuf_end) if (expand_decodebuffer(d, newlen) == -1) return MBERR_NOMEMORY; memcpy(d->outbuf, newbuf, newlen * sizeof(Py_UNICODE)); d->outbuf += newlen; } d->inbuf = d->inbuf_start + in_offset; return 0; } /************************************************************/ struct pypy_cjk_enc_s *pypy_cjk_enc_new(const MultibyteCodec *codec) { struct pypy_cjk_enc_s *d = malloc(sizeof(struct pypy_cjk_enc_s)); if (!d) return NULL; if (codec->encinit != NULL && codec->encinit(&d->state, codec->config) != 0) { free(d); return NULL; } d->codec = codec; d->outbuf_start = NULL; return d; } Py_ssize_t pypy_cjk_enc_init(struct pypy_cjk_enc_s *d, Py_UNICODE *inbuf, Py_ssize_t inlen) { Py_ssize_t outlen; d->inbuf_start = inbuf; d->inbuf = inbuf; d->inbuf_end = inbuf + inlen; if (d->outbuf_start == NULL) { if (inlen > (PY_SSIZE_T_MAX - 16) / 2) return -1; outlen = inlen * 2 + 16; d->outbuf_start = malloc(outlen); if (d->outbuf_start == NULL) return -1; d->outbuf_end = d->outbuf_start + outlen; } d->outbuf = d->outbuf_start; return 0; } void pypy_cjk_enc_free(struct pypy_cjk_enc_s *d) { free(d->outbuf_start); free(d); } static int expand_encodebuffer(struct pypy_cjk_enc_s *d, Py_ssize_t esize) { Py_ssize_t orgpos, orgsize; unsigned char *newbuf; orgpos = d->outbuf - d->outbuf_start; orgsize = d->outbuf_end - d->outbuf_start; esize = (esize < (orgsize >> 1) ? (orgsize >> 1) | 1 : esize); newbuf = (esize <= PY_SSIZE_T_MAX - orgsize ? realloc(d->outbuf_start, orgsize + esize) : NULL); if (!newbuf) return -1; d->outbuf_start = newbuf; d->outbuf = newbuf + orgpos; d->outbuf_end = newbuf + orgsize + esize; return 0; } Py_ssize_t pypy_cjk_enc_chunk(struct pypy_cjk_enc_s *d, Py_ssize_t flags) { while (1) { Py_ssize_t r; Py_ssize_t inleft = (Py_ssize_t)(d->inbuf_end - d->inbuf); Py_ssize_t outleft = (Py_ssize_t)(d->outbuf_end - d->outbuf); if (inleft == 0 && !(flags & MBENC_RESET)) return 0; r = d->codec->encode(&d->state, d->codec->config, &d->inbuf, inleft, &d->outbuf, outleft, flags); if (r != MBERR_TOOSMALL) return r; /* output buffer too small; grow it and continue. */ if (expand_encodebuffer(d, -1) == -1) return MBERR_NOMEMORY; } } Py_ssize_t pypy_cjk_enc_reset(struct pypy_cjk_enc_s *d) { if (d->codec->encreset == NULL) return 0; while (1) { Py_ssize_t r; Py_ssize_t outleft = (Py_ssize_t)(d->outbuf_end - d->outbuf); r = d->codec->encreset(&d->state, d->codec->config, &d->outbuf, outleft); if (r != MBERR_TOOSMALL) return r; /* output buffer too small; grow it and continue. */ if (expand_encodebuffer(d, -1) == -1) return MBERR_NOMEMORY; } } char *pypy_cjk_enc_outbuf(struct pypy_cjk_enc_s *d) { return (char *)d->outbuf_start; } Py_ssize_t pypy_cjk_enc_outlen(struct pypy_cjk_enc_s *d) { return d->outbuf - d->outbuf_start; } Py_ssize_t pypy_cjk_enc_inbuf_remaining(struct pypy_cjk_enc_s *d) { return d->inbuf_end - d->inbuf; } Py_ssize_t pypy_cjk_enc_inbuf_consumed(struct pypy_cjk_enc_s* d) { return d->inbuf - d->inbuf_start; } Py_ssize_t pypy_cjk_enc_replace_on_error(struct pypy_cjk_enc_s* d, char *newbuf, Py_ssize_t newlen, Py_ssize_t in_offset) { if (newlen > 0) { if (d->outbuf + newlen > d->outbuf_end) if (expand_encodebuffer(d, newlen) == -1) return MBERR_NOMEMORY; memcpy(d->outbuf, newbuf, newlen); d->outbuf += newlen; } d->inbuf = d->inbuf_start + in_offset; return 0; } const MultibyteCodec *pypy_cjk_enc_getcodec(struct pypy_cjk_enc_s *d) { return d->codec; }
3,722
5,169
<gh_stars>1000+ { "name": "UPennMobileComponentsSDK", "version": "0.3.0", "summary": "SDK that contains common UI elements, utilities, functionality etc, for re-use across all University of Pennsylvania mobile applications.", "description": "SDK that contains common UI elements, utilities, functionality etc, for re-use across all University of Pennsylvania mobile applications. This SDK will make it much easier to implement core components for new applications, and get a better birds-eye-view of what version components a specific application is running, allowing for quick & easy updating.", "homepage": "https://github.com/upennmobile/UPennMobileComponentsSDK", "license": { "type": "MIT", "file": "LICENSE" }, "authors": { "rabdulsal": "<EMAIL>" }, "source": { "git": "https://github.com/upennmobile/UPennMobileComponentsSDK.git", "tag": "0.3.0" }, "swift_versions": "5.0", "platforms": { "ios": "13.0" }, "source_files": "UPennMobileComponentsSDK/Classes/**/*", "resource_bundles": { "UPennMobileComponentsSDK": [ "UPennMobileComponentsSDK/Assets/*.{png,storyboard,xib}" ] }, "dependencies": { "Alamofire": [ "~> 5.2" ], "SVProgressHUD": [ ], "JWTDecode": [ ] }, "swift_version": "5.0" }
471
1,162
<filename>digdag-core/src/test/java/io/digdag/core/workflow/StoreOperatorFactory.java package io.digdag.core.workflow; import com.google.inject.Inject; import io.digdag.client.config.Config; import io.digdag.client.config.ConfigKey; import io.digdag.spi.Operator; import io.digdag.spi.OperatorFactory; import io.digdag.spi.OperatorContext; import io.digdag.spi.TaskRequest; import io.digdag.spi.TaskResult; import java.nio.file.Path; import java.util.List; public class StoreOperatorFactory implements OperatorFactory { @Inject public StoreOperatorFactory() { } public String getType() { return "store"; } @Override public Operator newOperator(OperatorContext context) { return new StoreOperator(context); } private static class StoreOperator implements Operator { private final TaskRequest request; public StoreOperator(OperatorContext context) { this.request = context.getTaskRequest(); } @Override public TaskResult run() { Config params = request.getConfig(); Config storeParams = params.getNestedOrGetEmpty("_command"); List<ConfigKey> resetParams = params.getListOrEmpty("reset", ConfigKey.class); return TaskResult.defaultBuilder(request) .resetStoreParams(resetParams) .storeParams(storeParams) .build(); } } }
625
21,382
<gh_stars>1000+ """Test the reduce API.""" import pytest import cupy as cp import ray from ray.util.collective.types import ReduceOp from ray.util.collective.tests.util import create_collective_multigpu_workers @pytest.mark.parametrize("group_name", ["default", "test", "123?34!"]) @pytest.mark.parametrize("dst_rank", [0, 1]) @pytest.mark.parametrize("dst_gpu_index", [0, 1]) def test_reduce_different_name(ray_start_distributed_multigpu_2_nodes_4_gpus, group_name, dst_rank, dst_gpu_index): world_size = 2 num_gpu_per_worker = 2 actual_world_size = world_size * num_gpu_per_worker actors, _ = create_collective_multigpu_workers( num_workers=world_size, group_name=group_name) results = ray.get([ a.do_reduce_multigpu.remote( group_name, dst_rank=dst_rank, dst_gpu_index=dst_gpu_index) for a in actors ]) for i in range(world_size): for j in range(num_gpu_per_worker): if i == dst_rank and j == dst_gpu_index: assert (results[i][j] == cp.ones( (10, ), dtype=cp.float32) * actual_world_size).all() else: assert (results[i][j] == cp.ones((10, ), dtype=cp.float32)).all() @pytest.mark.parametrize("array_size", [2, 2**5, 2**10, 2**15, 2**20]) @pytest.mark.parametrize("dst_rank", [0, 1]) @pytest.mark.parametrize("dst_gpu_index", [0, 1]) def test_reduce_different_array_size( ray_start_distributed_multigpu_2_nodes_4_gpus, array_size, dst_rank, dst_gpu_index): world_size = 2 num_gpu_per_worker = 2 actual_world_size = world_size * num_gpu_per_worker actors, _ = create_collective_multigpu_workers(num_workers=world_size) ray.get(actors[0].set_buffer.remote(array_size)) ray.get(actors[1].set_buffer.remote(array_size)) results = ray.get([ a.do_reduce_multigpu.remote( dst_rank=dst_rank, dst_gpu_index=dst_gpu_index) for a in actors ]) for i in range(world_size): for j in range(num_gpu_per_worker): if i == dst_rank and j == dst_gpu_index: assert (results[i][j] == cp.ones( (array_size, ), dtype=cp.float32) * actual_world_size).all() else: assert (results[i][j] == cp.ones( (array_size, ), dtype=cp.float32)).all() @pytest.mark.parametrize("dst_rank", [0, 1]) @pytest.mark.parametrize("dst_gpu_index", [0, 1]) def test_reduce_different_op(ray_start_distributed_multigpu_2_nodes_4_gpus, dst_rank, dst_gpu_index): world_size = 2 num_gpu_per_worker = 2 actors, _ = create_collective_multigpu_workers(world_size) # check product ray.get(actors[0].set_buffer.remote([10], value0=2, value1=3)) ray.get(actors[1].set_buffer.remote([10], value0=4, value1=5)) results = ray.get([ a.do_reduce_multigpu.remote( dst_rank=dst_rank, dst_gpu_index=dst_gpu_index, op=ReduceOp.PRODUCT) for a in actors ]) for i in range(world_size): for j in range(num_gpu_per_worker): if i == dst_rank and j == dst_gpu_index: assert (results[i][j] == cp.ones( (10, ), dtype=cp.float32) * 120).all() else: val = (i + 1) * 2 + j assert (results[i][j] == cp.ones( (10, ), dtype=cp.float32) * val).all() # check min ray.get(actors[0].set_buffer.remote([10], value0=2, value1=3)) ray.get(actors[1].set_buffer.remote([10], value0=4, value1=5)) results = ray.get([ a.do_reduce_multigpu.remote( dst_rank=dst_rank, dst_gpu_index=dst_gpu_index, op=ReduceOp.MIN) for a in actors ]) for i in range(world_size): for j in range(num_gpu_per_worker): if i == dst_rank and j == dst_gpu_index: assert (results[i][j] == cp.ones( (10, ), dtype=cp.float32) * 2).all() else: val = (i + 1) * 2 + j assert (results[i][j] == cp.ones( (10, ), dtype=cp.float32) * val).all() # check max ray.get(actors[0].set_buffer.remote([10], value0=2, value1=3)) ray.get(actors[1].set_buffer.remote([10], value0=4, value1=5)) results = ray.get([ a.do_reduce_multigpu.remote( dst_rank=dst_rank, dst_gpu_index=dst_gpu_index, op=ReduceOp.MAX) for a in actors ]) for i in range(world_size): for j in range(num_gpu_per_worker): if i == dst_rank and j == dst_gpu_index: assert (results[i][j] == cp.ones( (10, ), dtype=cp.float32) * 5).all() else: val = (i + 1) * 2 + j assert (results[i][j] == cp.ones( (10, ), dtype=cp.float32) * val).all() @pytest.mark.parametrize("dst_rank", [0, 1]) @pytest.mark.parametrize("dst_gpu_index", [0, 1]) def test_reduce_torch_cupy(ray_start_distributed_multigpu_2_nodes_4_gpus, dst_rank, dst_gpu_index): import torch world_size = 2 num_gpu_per_worker = 2 actors, _ = create_collective_multigpu_workers(world_size) ray.get(actors[0].set_buffer.remote([10], value0=2, value1=3)) ray.get(actors[1].set_buffer.remote( [10], value0=4, value1=5, tensor_type0="torch", tensor_type1="torch")) results = ray.get([ a.do_reduce_multigpu.remote( dst_rank=dst_rank, dst_gpu_index=dst_gpu_index) for a in actors ]) for i in range(world_size): for j in range(num_gpu_per_worker): val = (i + 1) * 2 + j if dst_rank == i and dst_gpu_index == j: if i == 0: assert (results[i][j] == cp.ones([10], dtype=cp.float32) * 14).all() else: assert ( results[i][j] == torch.ones([10]).cuda(j) * 14).all() else: if i == 0: assert (results[i][j] == cp.ones([10], dtype=cp.float32) * val).all() else: assert ( results[i][j] == torch.ones([10]).cuda(j) * val).all() @pytest.mark.parametrize("dst_rank", [3, 4]) @pytest.mark.parametrize("dst_gpu_index", [2, 3]) def test_reduce_invalid_rank(ray_start_distributed_multigpu_2_nodes_4_gpus, dst_rank, dst_gpu_index): world_size = 2 actors, _ = create_collective_multigpu_workers(world_size) with pytest.raises(ValueError): _ = ray.get([ a.do_reduce_multigpu.remote( dst_rank=dst_rank, dst_gpu_index=dst_gpu_index) for a in actors ])
3,610
577
package org.fluentlenium.adapter; import org.fluentlenium.core.FluentControl; import org.fluentlenium.core.FluentDriver; /** * Default {@link FluentControlContainer} implementation, using a simple variable to store the {@link FluentDriver} instance. */ public class DefaultFluentControlContainer implements FluentControlContainer { private FluentControl fluentControl; @Override public FluentControl getFluentControl() { return fluentControl; } @Override public void setFluentControl(FluentControl fluentControl) { this.fluentControl = fluentControl; } }
187
388
<reponame>OctaviantoVyan/jwswing<gh_stars>100-1000 // -*- mode:java; encoding:utf-8 -*- // vim:set fileencoding=utf-8: // @homepage@ package example; import java.awt.*; import javax.swing.*; public final class MainPanel extends JPanel { private MainPanel() { super(new BorderLayout()); JTree tree1 = new JTree(); tree1.setShowsRootHandles(true); JTree tree2 = new JTree(); tree2.setShowsRootHandles(false); JCheckBox check = new JCheckBox("setRootVisible", true); check.addActionListener(e -> { boolean flg = ((JCheckBox) e.getSource()).isSelected(); tree1.setRootVisible(flg); tree2.setRootVisible(flg); }); JPanel p = new JPanel(new GridLayout(1, 2)); p.add(makeTitledPanel("setShowsRootHandles(true)", tree1)); p.add(makeTitledPanel("setShowsRootHandles(false)", tree2)); add(p); add(check, BorderLayout.SOUTH); setPreferredSize(new Dimension(320, 240)); } private static Component makeTitledPanel(String title, JTree tree) { tree.setBorder(BorderFactory.createEmptyBorder(2, 4, 2, 2)); JPanel p = new JPanel(new BorderLayout()); p.setBorder(BorderFactory.createTitledBorder(title)); p.add(new JScrollPane(tree)); return p; } public static void main(String[] args) { EventQueue.invokeLater(MainPanel::createAndShowGui); } private static void createAndShowGui() { try { UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName()); } catch (ClassNotFoundException | InstantiationException | IllegalAccessException | UnsupportedLookAndFeelException ex) { ex.printStackTrace(); Toolkit.getDefaultToolkit().beep(); } JFrame frame = new JFrame("@title@"); frame.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE); frame.getContentPane().add(new MainPanel()); frame.pack(); frame.setLocationRelativeTo(null); frame.setVisible(true); } }
721
368
/* Plugin-SDK (Grand Theft Auto Vice City) header file Authors: GTA Community. See more here https://github.com/DK22Pac/plugin-sdk Do not delete this comment block. Respect others' work! */ #include "CStinger.h" // Converted from thiscall void CStinger::CStinger(void) 0x629B50 CStinger::CStinger() { plugin::CallMethod<0x629B50, CStinger *>(this); } // Converted from thiscall void CStinger::Deploy(CPed *pPed) 0x628EF0 void CStinger::Deploy(CPed* pPed) { plugin::CallMethod<0x628EF0, CStinger *, CPed*>(this, pPed); } // Converted from thiscall void CStinger::CheckForBurstTyres(void) 0x628F70 void CStinger::CheckForBurstTyres() { plugin::CallMethod<0x628F70, CStinger *>(this); } // Converted from thiscall void CStinger::Init(CPed *pPed) 0x6298D0 void CStinger::Init(CPed* pPed) { plugin::CallMethod<0x6298D0, CStinger *, CPed*>(this, pPed); } // Converted from thiscall void CStinger::Process(void) 0x629380 void CStinger::Process() { plugin::CallMethod<0x629380, CStinger *>(this); } // Converted from thiscall void CStinger::Remove(void) 0x629880 void CStinger::Remove() { plugin::CallMethod<0x629880, CStinger *>(this); } // Converted from thiscall void CStinger::~CStinger() 0x629B40 CStinger::~CStinger() { plugin::CallMethod<0x629B40, CStinger *>(this); }
505
812
<filename>src/edu/stanford/nlp/sempre/corenlp/test/CoreNLPSemanticFnTest.java<gh_stars>100-1000 package edu.stanford.nlp.sempre.corenlp.test; import edu.stanford.nlp.sempre.*; import edu.stanford.nlp.sempre.corenlp.CoreNLPAnalyzer; import edu.stanford.nlp.sempre.test.TestUtils; import fig.basic.LispTree; import org.testng.annotations.Test; import java.util.Collections; import java.util.List; import static org.testng.AssertJUnit.assertEquals; /** * Test SemanticFns that depend on CoreNLP (e.g., NumberFn on "one thousand") * @author <NAME> */ public class CoreNLPSemanticFnTest { private static Formula F(String s) { return Formula.fromString(s); } void check(Formula target, DerivationStream derivations) { if (!derivations.hasNext()) throw new RuntimeException("Expected 1 derivation, got " + derivations); assertEquals(target, derivations.next().formula); } void check(Formula target, String utterance, SemanticFn fn, List<Derivation> children) { Example ex = TestUtils.makeSimpleExample(utterance); check(target, fn.call(ex, new SemanticFn.CallInfo(null, 0, ex.numTokens(), Rule.nullRule, children))); } void check(Formula target, String utterance, SemanticFn fn) { List<Derivation> empty = Collections.emptyList(); check(target, utterance, fn, empty); } void checkNumDerivations(DerivationStream derivations, int num) { assertEquals(num, derivations.estimatedSize()); } Derivation D(Formula f) { return (new Derivation.Builder()) .formula(f) .prob(1.0) .createDerivation(); } LispTree T(String str) { return LispTree.proto.parseFromString(str); } // TODO(chaganty): Test bridge fn - requires freebase (?) // TODO(chaganty): Test context fn @Test public void dateFn() { LanguageAnalyzer.setSingleton(new CoreNLPAnalyzer()); check(F("(date 2013 8 7)"), "August 7, 2013", new DateFn()); check(F("(date 1982 -1 -1)"), "1982", new DateFn()); check(F("(date -1 6 4)"), "june 4", new DateFn()); } @Test public void filterNerTagFn() { LanguageAnalyzer.setSingleton(new CoreNLPAnalyzer()); FilterNerSpanFn filter = new FilterNerSpanFn(); filter.init(T("(FilterNerSpanFn token PERSON)")); Derivation child = new Derivation.Builder().createDerivation(); Example ex = TestUtils.makeSimpleExample("where is Obama"); assertEquals(filter.call(ex, new SemanticFn.CallInfo(null, 0, 1, Rule.nullRule, Collections.singletonList(child))).hasNext(), false); assertEquals(filter.call(ex, new SemanticFn.CallInfo(null, 1, 2, Rule.nullRule, Collections.singletonList(child))).hasNext(), false); assertEquals(filter.call(ex, new SemanticFn.CallInfo(null, 2, 3, Rule.nullRule, Collections.singletonList(child))).hasNext(), true); } // TODO(chaganty): Test fuzzy match fn // TODO(chaganty): Test identity fn // TODO(chaganty): Test join fn // TODO(chaganty): Test lexicon fn // TODO(chaganty): Test merge fn @Test public void numberFn() { LanguageAnalyzer.setSingleton(new CoreNLPAnalyzer()); check(F("(number 35000)"), "thirty-five thousand", new NumberFn()); } // TODO(chaganty): Test select fn // TODO(chaganty): Test simple lexicon fn }
1,222
1,444
package mage.cards.r; import java.util.UUID; import mage.abilities.effects.common.AddCombatAndMainPhaseEffect; import mage.abilities.effects.common.DamageTargetEffect; import mage.abilities.effects.common.continuous.GainAbilityControlledEffect; import mage.abilities.keyword.FirstStrikeAbility; import mage.abilities.keyword.VigilanceAbility; import mage.cards.CardSetInfo; import mage.cards.SplitCard; import mage.constants.CardType; import mage.constants.Duration; import mage.constants.SpellAbilityType; import mage.filter.StaticFilters; import mage.target.common.TargetAttackingOrBlockingCreature; /** * * @author TheElk801 */ public final class ResponseResurgence extends SplitCard { public ResponseResurgence(UUID ownerId, CardSetInfo setInfo) { super(ownerId, setInfo, new CardType[]{CardType.INSTANT}, new CardType[]{CardType.SORCERY}, "{R/W}{R/W}", "{3}{R}{W}", SpellAbilityType.SPLIT); // Response // Response deals 5 damage to target attacking or blocking creature. this.getLeftHalfCard().getSpellAbility().addEffect( new DamageTargetEffect(5) ); this.getLeftHalfCard().getSpellAbility().addTarget( new TargetAttackingOrBlockingCreature() ); // Resurgence // Creatures you control gain first strike and vigilance until end of turn. After this main phase, there is an additional combat phase followed by an additional main phase. this.getRightHalfCard().getSpellAbility().addEffect( new GainAbilityControlledEffect( FirstStrikeAbility.getInstance(), Duration.EndOfTurn, StaticFilters.FILTER_CONTROLLED_CREATURE ).setText("Creatures you control gain first strike") ); this.getRightHalfCard().getSpellAbility().addEffect( new GainAbilityControlledEffect( VigilanceAbility.getInstance(), Duration.EndOfTurn, StaticFilters.FILTER_CONTROLLED_CREATURE ).setText("and vigilance until end of turn") ); this.getRightHalfCard().getSpellAbility().addEffect( new AddCombatAndMainPhaseEffect() ); } private ResponseResurgence(final ResponseResurgence card) { super(card); } @Override public ResponseResurgence copy() { return new ResponseResurgence(this); } }
980
14,668
<reponame>zealoussnow/chromium // Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "sandbox/mac/seatbelt.h" #include <unistd.h> extern "C" { #include <sandbox.h> int sandbox_init_with_parameters(const char* profile, uint64_t flags, const char* const parameters[], char** errorbuf); // Not deprecated. The canonical usage to test if sandboxed is // sandbox_check(getpid(), NULL, SANDBOX_FILTER_NONE), which returns // 1 if sandboxed. Note `type` is actually a sandbox_filter_type enum value, but // it is unused currently. int sandbox_check(pid_t pid, const char* operation, int type, ...); } namespace sandbox { // Initialize the static member variables. #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wdeprecated-declarations" const char* Seatbelt::kProfileNoInternet = kSBXProfileNoInternet; const char* Seatbelt::kProfileNoNetwork = kSBXProfileNoNetwork; const char* Seatbelt::kProfileNoWrite = kSBXProfileNoWrite; const char* Seatbelt::kProfileNoWriteExceptTemporary = kSBXProfileNoWriteExceptTemporary; const char* Seatbelt::kProfilePureComputation = kSBXProfilePureComputation; #pragma clang diagnostic pop // static int Seatbelt::Init(const char* profile, uint64_t flags, char** errorbuf) { // OS X deprecated these functions, but did not provide a suitable replacement, // so ignore the deprecation warning. #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wdeprecated-declarations" return ::sandbox_init(profile, flags, errorbuf); #pragma clang diagnostic pop } // static int Seatbelt::InitWithParams(const char* profile, uint64_t flags, const char* const parameters[], char** errorbuf) { return ::sandbox_init_with_parameters(profile, flags, parameters, errorbuf); } // static void Seatbelt::FreeError(char* errorbuf) { // OS X deprecated these functions, but did not provide a suitable replacement, // so ignore the deprecation warning. #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wdeprecated-declarations" return ::sandbox_free_error(errorbuf); #pragma clang diagnostic pop } // static bool Seatbelt::IsSandboxed() { return ::sandbox_check(getpid(), NULL, 0); } } // namespace sandbox
874
1,056
<gh_stars>1000+ /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.netbeans.modules.payara.tooling.server.parser; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.logging.Level; import org.netbeans.modules.payara.tooling.logging.Logger; import org.netbeans.modules.payara.tooling.server.parser.TreeParser.Path; import org.xml.sax.Attributes; import org.xml.sax.SAXException; /** * Reads configuration of network listeners. * For each listener returns one {@link HttpData} object that contains * port number, protocol and information whether this protocol is secured. * <p/> * @author <NAME>, <NAME> */ public class NetworkListenerReader extends TargetConfigReader implements XMLReader { //////////////////////////////////////////////////////////////////////////// // Class attributes // //////////////////////////////////////////////////////////////////////////// /** Logger instance for this class. */ private static final Logger LOGGER = new Logger(NetworkListenerReader.class); public static final String DEFAULT_PATH = "/domain/configs/config/network-config/network-listeners/network-listener"; private String path; private Map<String, HttpData> result; public NetworkListenerReader(String targetConfigName) { this(DEFAULT_PATH, targetConfigName); } public NetworkListenerReader(String path, String targetConfigName) { super(targetConfigName); this.path = path; this.result = new HashMap<String, HttpData>(); } @Override public void readAttributes(String qname, Attributes attributes) throws SAXException { final String METHOD = "readAttributes"; /* <network-listeners> <thread-pool max-thread-pool-size="20" min-thread-pool-size="2" thread-pool-id="http-thread-pool" max-queue-size="4096"></thread-pool> <network-listener port="8080" protocol="http-listener-1" transport="tcp" name="http-listener-1" thread-pool="http-thread-pool"></network-listener> <network-listener port="8181" enabled="false" protocol="http-listener-2" transport="tcp" name="http-listener-2" thread-pool="http-thread-pool"></network-listener> <network-listener port="4848" protocol="admin-listener" transport="tcp" name="admin-listener" thread-pool="http-thread-pool"></network-listener> </network-listeners> */ if (readData) { try { String id = attributes.getValue("name"); if (id != null && id.length() > 0) { if (attributes.getValue("port").startsWith("$")) { // ignore these template entries return; } int port = Integer.parseInt(attributes.getValue("port")); boolean secure = "true".equals(attributes.getValue( "security-enabled")); boolean enabled = !"false".equals(attributes.getValue( "enabled")); LOGGER.log(Level.INFO, METHOD, "port", new Object[] { Integer.toString(port), Boolean.toString(enabled), Boolean.toString(secure)}); if (enabled) { HttpData data = new HttpData(id, port, secure); LOGGER.log(Level.INFO, METHOD, "add", data); result.put(id, data); } } else { LOGGER.log(Level.INFO, METHOD, "noName"); } } catch (NumberFormatException ex) { LOGGER.log(Level.SEVERE, METHOD, "numberFormat", ex); } } } @Override public List<TreeParser.Path> getPathsToListen() { LinkedList<TreeParser.Path> paths = new LinkedList<TreeParser.Path>(); paths.add(new Path(path, this)); paths.add(new Path(CONFIG_PATH, new TargetConfigMarker())); return paths; } public Map<String, HttpData> getResult() { return result; } }
2,045
14,668
<reponame>chromium/chromium // Copyright 2019 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef IOS_CHROME_BROWSER_CRASH_REPORT_BREADCRUMBS_BREADCRUMB_MANAGER_BROWSER_AGENT_H_ #define IOS_CHROME_BROWSER_CRASH_REPORT_BREADCRUMBS_BREADCRUMB_MANAGER_BROWSER_AGENT_H_ #include <string> #include "base/scoped_observation.h" #include "components/breadcrumbs/core/breadcrumb_manager_browser_agent.h" #include "ios/chrome/browser/main/browser_observer.h" #include "ios/chrome/browser/main/browser_user_data.h" #include "ios/chrome/browser/overlays/public/overlay_presenter.h" #include "ios/chrome/browser/overlays/public/overlay_presenter_observer.h" #include "ios/chrome/browser/web_state_list/web_state_list_observer.h" class Browser; class WebStateList; // Name of Overlay initial presentation event. extern const char kBreadcrumbOverlay[]; // Appended to |kBreadcrumbOverlay| event if overlay was re-activated rather // than presented for the first time (f.e. the user has switched to a tab with // an overlay). extern const char kBreadcrumbOverlayActivated[]; // Appended to |kBreadcrumbOverlay| event if overlay is Http Authentication. extern const char kBreadcrumbOverlayHttpAuth[]; // Appended to |kBreadcrumbOverlay| event if overlay is generic app dialog. extern const char kBreadcrumbOverlayAlert[]; // Appended to |kBreadcrumbOverlay| event if overlay is app launch confirmation. extern const char kBreadcrumbOverlayAppLaunch[]; // Appended to |kBreadcrumbOverlay| event if overlay is JavaScript alert. extern const char kBreadcrumbOverlayJsAlert[]; // Appended to |kBreadcrumbOverlay| event if overlay is JavaScript confirm. extern const char kBreadcrumbOverlayJsConfirm[]; // Appended to |kBreadcrumbOverlay| event if overlay is JavaScript prompt. extern const char kBreadcrumbOverlayJsPrompt[]; class BreadcrumbManagerBrowserAgent : public breadcrumbs::BreadcrumbManagerBrowserAgent, BrowserObserver, public OverlayPresenterObserver, public BrowserUserData<BreadcrumbManagerBrowserAgent>, WebStateListObserver { public: BreadcrumbManagerBrowserAgent(const BreadcrumbManagerBrowserAgent&) = delete; BreadcrumbManagerBrowserAgent& operator=( const BreadcrumbManagerBrowserAgent&) = delete; ~BreadcrumbManagerBrowserAgent() override; private: explicit BreadcrumbManagerBrowserAgent(Browser* browser); friend class BrowserUserData<BreadcrumbManagerBrowserAgent>; BROWSER_USER_DATA_KEY_DECL(); // breadcrumbs::BreadcrumbManagerBrowserAgent: void PlatformLogEvent(const std::string& event) override; // BrowserObserver: void BrowserDestroyed(Browser* browser) override; // WebStateListObserver: void WebStateInsertedAt(WebStateList* web_state_list, web::WebState* web_state, int index, bool activating) override; void WebStateMoved(WebStateList* web_state_list, web::WebState* web_state, int from_index, int to_index) override; void WebStateReplacedAt(WebStateList* web_state_list, web::WebState* old_web_state, web::WebState* new_web_state, int index) override; void WillCloseWebStateAt(WebStateList* web_state_list, web::WebState* web_state, int index, bool user_action) override; void WebStateActivatedAt(WebStateList* web_state_list, web::WebState* old_web_state, web::WebState* new_web_state, int active_index, ActiveWebStateChangeReason reason) override; void WillBeginBatchOperation(WebStateList* web_state_list) override; void BatchOperationEnded(WebStateList* web_state_list) override; // OverlayPresenterObserver: void WillShowOverlay(OverlayPresenter* presenter, OverlayRequest* request, bool initial_presentation) override; void OverlayPresenterDestroyed(OverlayPresenter* presenter) override; Browser* browser_ = nullptr; // Keeps track of WebState mutation count to avoid logging every event. // Created in WillBeginBatchOperation and destroyed in BatchOperationEnded. // Final mutation count is logged in BatchOperationEnded. struct BatchOperation { // Number of WebState objects inserted between WillBeginBatchOperation and // BatchOperationEnded callbacks. int insertion_count = 0; // Number of WebState objects closed between WillBeginBatchOperation and // BatchOperationEnded callbacks. int close_count = 0; }; std::unique_ptr<BatchOperation> batch_operation_; // Observes overlays presentation. base::ScopedObservation<OverlayPresenter, OverlayPresenterObserver> overlay_observation_{this}; }; #endif // IOS_CHROME_BROWSER_CRASH_REPORT_BREADCRUMBS_BREADCRUMB_MANAGER_BROWSER_AGENT_H_
1,924
1,338
<gh_stars>1000+ /* * Copyright 2014, Haiku Inc. All Rights Reserved. * Distributed under the terms of the MIT License. */ #ifndef _USB_MSC_H #define _USB_MSC_H // (Partial) USB Class Definitions for Mass Storage Devices (MSC), version 1.0 // Reference: http://www.usb.org/developers/devclass_docs/usbmassbulk_10.pdf #define USB_MASS_STORAGE_DEVICE_CLASS 0x08 #define USB_MASSBULK_CBW_SIGNATURE 0x43425355 #define USB_MASSBULK_CBW_DATA_OUTPUT 0x00 #define USB_MASSBULK_CBW_DATA_INPUT 0x80 #define USB_MASSBULK_CSW_SIGNATURE 0x53425355 #define USB_MASSBULK_CSW_STATUS_COMMAND_PASSED 0x00 #define USB_MASSBULK_CSW_STATUS_COMMAND_FAILED 0x01 #define USB_MASSBULK_CSW_STATUS_PHASE_ERROR 0x02 #define USB_MASSBULK_REQUEST_MASS_STORAGE_RESET 0xff #define USB_MASSBULK_REQUEST_GET_MAX_LUN 0xfe typedef struct { uint32 signature; uint32 tag; uint32 data_transfer_length; uint8 flags; uint8 lun; uint8 command_block_length; uint8 command_block[16]; } _PACKED usb_massbulk_command_block_wrapper; typedef struct { uint32 signature; uint32 tag; uint32 data_residue; uint8 status; } _PACKED usb_massbulk_command_status_wrapper; #endif
530
892
<gh_stars>100-1000 { "schema_version": "1.2.0", "id": "GHSA-r23x-gq8q-5637", "modified": "2021-11-23T00:00:53Z", "published": "2021-11-23T00:00:53Z", "aliases": [ "CVE-2021-38146" ], "details": "The File Download API in Wipro Holmes Orchestrator 20.4.1 (20.4.1_02_11_2020) allows remote attackers to read arbitrary files via absolute path traversal in the SearchString JSON field in /home/download POST data.", "severity": [ ], "affected": [ ], "references": [ { "type": "ADVISORY", "url": "https://nvd.nist.gov/vuln/detail/CVE-2021-38146" }, { "type": "WEB", "url": "https://www.wipro.com/holmes/" }, { "type": "WEB", "url": "http://packetstormsecurity.com/files/164970/Wipro-Holmes-Orchestrator-20.4.1-Arbitrary-File-Download.html" } ], "database_specific": { "cwe_ids": [ ], "severity": null, "github_reviewed": false } }
429
2,293
// Generated by generate_test_data.py using TFL version 2.6.0 as reference. #pragma once #include <stdint.h> const q7_t svdf_2_output_ref[15] = {-53, 45, 27, -24, -53, 26, -82, -38, 11, -85, 94, -16, -32, 31, 4};
97
335
<filename>A/Anticholinergic_adjective.json { "word": "Anticholinergic", "definitions": [ "(chiefly of a drug) inhibiting the physiological action of acetylcholine, especially as a neurotransmitter." ], "parts-of-speech": "Adjective" }
94
17,037
# This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _IBM class _Analytics(_IBM): _type = "analytics" _icon_dir = "resources/ibm/analytics" class Analytics(_Analytics): _icon = "analytics.png" class DataIntegration(_Analytics): _icon = "data-integration.png" class DataRepositories(_Analytics): _icon = "data-repositories.png" class DeviceAnalytics(_Analytics): _icon = "device-analytics.png" class StreamingComputing(_Analytics): _icon = "streaming-computing.png" # Aliases
193
754
/*- * << * UAVStack * == * Copyright (C) 2016 - 2017 UAVStack * == * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * >> */ package com.creditease.uav.threadanalysis.client.action; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import com.creditease.agent.helpers.DataConvertHelper; import com.creditease.agent.helpers.IOHelper; import com.creditease.agent.helpers.JSONHelper; import com.creditease.agent.helpers.RuntimeHelper; import com.creditease.agent.helpers.StringHelper; import com.creditease.agent.http.api.UAVHttpMessage; import com.creditease.agent.spi.AbstractBaseAction; import com.creditease.agent.spi.ActionContext; import com.creditease.agent.spi.IActionEngine; import com.creditease.agent.spi.IConfigurationManager; public class ThreadAnalysisAction extends AbstractBaseAction { private static final String SERVICE_POSTFIX = "/com.creditease.uav/server?action=runSupporter"; // 执行时间map,key为进程号,value为执行时间;用于判断在限定时间段内不需要发起多次请求 private static volatile Map<String, Long> timeIntervalMap = new ConcurrentHashMap<String, Long>(); private String dumpFileDirectory; private long timeInterval = 30000L; // 30s public ThreadAnalysisAction(String cName, String feature, IActionEngine engine) { super(cName, feature, engine); // 线程分析的文件位置,不存在则创建。只有一个MA,存在多个用户的情况,考虑权限问题,设置这个文件夹对别的用户可读写 dumpFileDirectory = getConfigManager().getContext(IConfigurationManager.METADATAPATH) + "thread.analysis"; try { IOHelper.createFolder(dumpFileDirectory); RuntimeHelper.exec(10000, "/bin/sh", "-c", "chmod 777 " + dumpFileDirectory); } catch (Exception ignore) { // ignore } } @Override public void doAction(ActionContext context) throws Exception { try { UAVHttpMessage data = (UAVHttpMessage) context.getParam("msg"); if (!controlConcurrency(data)) { data.putResponse("rs", "ERR"); data.putResponse("msg", "ERR:THREAD DUMP IS RUNNING"); return; } String user = data.getRequest("user"); if (StringHelper.isEmpty(user)) { user = "UNKNOWN"; } String url = data.getRequest("server") + SERVICE_POSTFIX; if (!url.startsWith("http")) { url = "http://" + url; } String param = data.getRequest("actparam"); @SuppressWarnings("unchecked") Map<String, Object> paramMap = JSONHelper.toObject(param, Map.class); @SuppressWarnings("unchecked") List<Object> paramsList = (List<Object>) paramMap.get("param"); paramsList.add(this.dumpFileDirectory); paramMap.put("param", paramsList); ActionContext ac = new ActionContext(); ac.putParam("user", user); ac.putParam("url", url); ac.putParam("paramMap", paramMap); if ("true".equals(data.getRequest("multiple"))) { ac.putParam("multiple", true); int duration = DataConvertHelper.toInt(data.getRequest("duration"), 0); int interval = DataConvertHelper.toInt(data.getRequest("interval"), 5); int times = duration / interval + 1; ac.putParam("times", times); ac.putParam("suspendTime", interval * 1000); } else { ac.putParam("multiple", false); ac.putParam("times", 1); ac.putParam("suspendTime", 0); } ac = getActionEngineMgr().getActionEngine("JTAActionEngine").execute("DumpThreadAction", ac); String ret = (String) ac.getParam("msg"); if (ret.contains("ERR:")) { data.putResponse("rs", "ERR"); data.putResponse("msg", ret); } else { data.putResponse("rs", "OK"); data.putResponse("msg", ret); } } catch (Exception e) { log.err(this, "do thread analysis FAILED.", e); throw e; } } @Override public String getSuccessNextActionId() { return null; } @Override public String getFailureNextActionId() { return null; } @Override public String getExceptionNextActionId() { return null; } /** * controlConcurrency * * @param pid * @param exectime * @return */ private boolean controlConcurrency(UAVHttpMessage data) { String server = data.getRequest("server"); long exectime = System.currentTimeMillis(); long duration = 1000L * DataConvertHelper.toInt(data.getRequest("duration"), 0); // initial if (!timeIntervalMap.containsKey(server)) { synchronized (timeIntervalMap) { if (!timeIntervalMap.containsKey(server)) { // 在exectimeMap记录进程号和执行时间 timeIntervalMap.put(server, exectime + duration); return true; } } } // only one can entrance if ((exectime - timeIntervalMap.get(server)) > timeInterval) { synchronized (timeIntervalMap) { if ((exectime - timeIntervalMap.get(server)) > timeInterval) { // 在exectimeMap记录进程号和执行时间 timeIntervalMap.put(server, exectime + duration); return true; } } } // thread analysis is running, abandon return false; } }
2,917
4,504
<filename>pkg/processor/runtime/nodejs/js/package.json { "name": "@nuclio/nodejs-wrapper", "version": "1.0.0", "description": "Nuclio NodeJS runtime wrapper", "main": "wrapper.js", "scripts": { "test": "mocha wrapper.test.js", "lint": "eslint *.js --fix" }, "author": "nuclio", "license": "MIT", "homepage": "https://github.com/nuclio/nuclio#readme", "dependencies": {}, "devDependencies": { "eslint": "^7.12.1", "eslint-config-standard": "^16.0.1", "eslint-plugin-import": "^2.22.1", "eslint-plugin-node": "^11.1.0", "eslint-plugin-promise": "^4.2.1", "mocha": "^8.2.0", "rewire": "^5.0.0" } }
313
2,230
<reponame>douglasrizzo/ELF /** * Copyright (c) 2017-present, Facebook, Inc. * All rights reserved. * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include "cf_rule_actor.h" bool CFRuleActor::FlagActByState(const GameEnv &env, const vector<int>& state, AssignedCmds *assigned_cmds) { const auto &enemy_troops = _preload.EnemyTroops(); const auto &my_troops = _preload.MyTroops(); if (state[FLAGSTATE_GET_FLAG] == 1) { if (!enemy_troops[FLAG].empty()) { // flag is idle, get it UnitId flag_id = enemy_troops[FLAG][0]->GetId(); for (const Unit *u : my_troops[FLAG_ATHLETE]) { store_cmd(u, CmdBPtr(new CmdGetFlag(INVALID, flag_id)), assigned_cmds); } } } if (state[FLAGSTATE_ATTACK_FLAG] == 1) { for (const Unit *eu : enemy_troops[FLAG_ATHLETE]) { if (eu->HasFlag()) { for (const Unit *u : my_troops[FLAG_ATHLETE]) { store_cmd(u, _A(eu->GetId()), assigned_cmds); } } } } if (state[FLAGSTATE_ESCORT_FLAG] == 1) { for (const Unit *u : my_troops[FLAG_ATHLETE]) { if (u->HasFlag()) { // try to send the flag back to base store_cmd(u, CmdBPtr(new CmdEscortFlagToBase(INVALID)), assigned_cmds); } } } if (state[FLAGSTATE_PROTECT_FLAG] == 1) { UnitId our_flag_athlete_id = INVALID; for (const Unit *u : my_troops[FLAG_ATHLETE]) { if (u->HasFlag()) { our_flag_athlete_id = u->GetId(); } } // others, protect our flag athlete const Unit* our_flag_athlete = env.GetUnit(our_flag_athlete_id); if (our_flag_athlete != nullptr) { UnitId damage_from = our_flag_athlete->GetProperty().GetLastDamageFrom(); const Unit *source = env.GetUnit(damage_from); if (source != nullptr) { for (const Unit *u : my_troops[FLAG_ATHLETE]) { if (!u->HasFlag()) { store_cmd(u, _A(damage_from), assigned_cmds); } } } } } /* if (state[FLAGSTATE_MOVE] == 1) { // move towards center of the map for (const Unit *u : my_troops[FLAG_ATHLETE]) { store_cmd(u, _M(PointF(9.5, 9.5)), assigned_cmds); } } if (state[FLAGSTATE_ATTACK] == 1) { // don't care about flag, just attack enemy if (!enemy_troops[FLAG_ATHLETE].empty()) { UnitId target = enemy_troops[FLAG_ATHLETE][0]->GetId(); for (const Unit *u : my_troops[FLAG_ATHLETE]) { store_cmd(u, _A(target), assigned_cmds); } } }*/ return true; } bool CFRuleActor::GetFlagActSimpleState(const GameEnv &env, vector<int>* state) { vector<int> &_state = *state; const auto &enemy_troops = _preload.EnemyTroops(); const auto &my_troops = _preload.MyTroops(); if (! enemy_troops[FLAG].empty()) { _state[FLAGSTATE_GET_FLAG] = 1; } else { UnitId our_flag_athlete_id = INVALID; for (const Unit *u : my_troops[FLAG_ATHLETE]) { if (u->HasFlag()) { our_flag_athlete_id = u->GetId(); } } // others, protect our flag athlete const Unit* our_flag_athlete = env.GetUnit(our_flag_athlete_id); if (our_flag_athlete != nullptr) { _state[FLAGSTATE_ESCORT_FLAG] = 1; _state[FLAGSTATE_PROTECT_FLAG] = 1; } else { _state[FLAGSTATE_ATTACK_FLAG] = 1; } } return true; }
1,907
354
from pathlib import Path import tempfile import cog class Predictor(cog.Predictor): def setup(self): self.foo = "foo" @cog.input("text", type=str) @cog.input("path", type=Path) def predict(self, text, path): with open(path) as f: output = self.foo + text + f.read() tmp = tempfile.NamedTemporaryFile(suffix=".txt") tmp.close() tmp_path = Path(tmp.name) with tmp_path.open("w") as f: f.write(output) return tmp_path
244
5,169
{ "name": "LGPlaceholderView", "version": "1.0.0", "platforms": { "ios": "6.0" }, "license": "MIT", "homepage": "https://github.com/Friend-LGA/LGPlaceholderView", "authors": { "<NAME>": "<EMAIL>" }, "source": { "git": "https://github.com/Friend-LGA/LGPlaceholderView.git", "tag": "1.0.0" }, "summary": "View covers everything inside view controller, and shows some alert text, progress bar or other view, when you need to hide content", "description": "View covers everything inside view controller, and shows some alert text, progress bar or other view, when you need to hide content. For example when you push view controller and want to load some data from server, you can prepare everything while LGPlaceholderView will show activity indicator for user.", "requires_arc": true, "source_files": "LGPlaceholderView/*.{h,m}" }
274
475
package com.appspot.com.empaemperium.resources; import java.util.List; import javax.ws.rs.GET; import javax.ws.rs.Path; import javax.ws.rs.PathParam; import javax.ws.rs.Produces; import javax.ws.rs.Consumes; import com.google.gson.Gson; @Path("/toplist") public class TopListGsonResources { // private constants private static final int RETURN_IN_PAGE = 10; @GET @Produces("text/plain") public String getTopList() { return getTopList(1); } @GET @Produces("text/plain") @Consumes("text/plain") @Path("{id}") public String getTopList(@PathParam("id") String id) { return getTopList(Utilities.ConvertStringToInt(id)); } private String getTopList(int id) { // check incoming value if(id <= 0) id = 1; int toId = (id * RETURN_IN_PAGE) - 1; int fromId = toId - (RETURN_IN_PAGE - 1); // get objects String query = "select from " + ObjectSimple.class.getName() + " range " + fromId + "," + toId; List<ObjectSimple> objects = Utilities.Datastore.getObjectsSimpleFromDS(query); return new Gson().toJson(objects); } }
465
412
<reponame>dave1667/graphd<filename>libgraphdb/graphdb-set-logging.c /* Copyright 2015 Google Inc. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "libgraphdb/graphdbp.h" /** * @brief Install a libcl-style logging interface. * * This isn't necessary -- in fact, one doesn't have to link * against libcl to link with libgraphdb -- but if you do happen * to be using the libcl framework, this is how you connect to it. * * If you call this function, you need to link against libcl.a. * * @param graphdb handle created with graphdb_create() * @param cl handle created with cl_create() * * Once the call completes, libgraphdb stops logging via its * builtin mechanism (or via any previously installed cl handle), * and starts logging via the cl handle's vlog function. * * The log vector can be changed at any time, as long as the * handle is valid. * If multiple threads are using the same graphdb * handle, it is up to the caller to make sure that * they don't interfere with each other. */ void graphdb_set_logging(graphdb_handle* graphdb, struct cl_handle* cl) { if (!GRAPHDB_IS_HANDLE(graphdb)) return; graphdb->graphdb_cl = cl; graphdb->graphdb_vlog = cl_vlog; /* this pulls in libcl.a */ }
516
1,812
<reponame>0xflotus/DeepMimic #pragma once #include <vector> #include <string> #include <fstream> #include "Eigen/Dense" class cFileUtil { public: static FILE* OpenFile(const std::string& file_name, const char* mode); static FILE* OpenFile(const char* file_name, const char* mode); static void CloseFile(FILE*& f); static void ClearFile(const std::string& file_name); static void CreateFile(const std::string& file_name); static void DeleteFile(const char* file_name); static std::string RemoveExtension(const std::string& filename); static void DeleteFile(const std::string& filename); static long int GetFileSize(const std::string& filename); static std::string GetExtension(const std::string& filename); static std::string GetFilename(const std::string& path); static void FilterFilesByExtension(std::vector<std::string>& files, const std::string& ext); static bool ExistsFile(const std::string& file_name); static void FindLine(std::ifstream& f_stream, int line); static std::string ReadTextFile(const std::string& path); // static bool ReadArray(FILE* f, const std::string& tag_beg, const std::string& tag_end, std::vector<double>& out_buffer); static bool ReadTable(const std::string& filename, std::vector<std::vector<double>>& out_buffer); static bool ReadMatrix(const std::string& filename, Eigen::MatrixXd& out_mat); static bool WriteMatrix(const Eigen::MatrixXd& mat, const std::string& out_filename); static bool AppendText(const std::string& str, const std::string& out_filename); private: static std::string ReadTextFile(FILE* f); };
491
1,899
#include <opencv2/opencv.hpp> // opencv legacy includes #include <opencv2/imgproc/types_c.h> #include <opencv2/imgproc/imgproc_c.h> #include "MEImage.hpp" #include "MEDefs.hpp" #if CV_MAJOR_VERSION >= 2 && CV_MAJOR_VERSION <= 3 //using namespace bgslibrary::algorithms::lbp_mrf; #define ME_CAST_TO_IPLIMAGE(image_ptr) ((IplImage*)image_ptr) #define ME_RELEASE_IPLIMAGE(image_ptr) \ cvReleaseImage((IplImage**)&image_ptr); \ image_ptr = NULL; namespace bgslibrary { namespace algorithms { namespace lbp_mrf { // RGB to YUV transform const float RGBtoYUVMatrix[3][3] = { { 0.299, 0.587, 0.114 }, { -0.147, -0.289, 0.436 }, { 0.615, -0.515, -0.100 } }; // RGB to YIQ transform const float RGBtoYIQMatrix[3][3] = { { 0.299, 0.587, 0.114 }, { 0.596, -0.274, -0.322 }, { 0.212, -0.523, 0.311 } }; MEImage::MEImage(int width, int height, int layers) : cvImg(NULL) { _Init(width, height, layers); } MEImage::MEImage(const MEImage& other) : cvImg(NULL) { _Copy(other); } MEImage::~MEImage() { if (ME_CAST_TO_IPLIMAGE(cvImg)) { ME_RELEASE_IPLIMAGE(cvImg); } } void MEImage::Clear() { cvSetZero(ME_CAST_TO_IPLIMAGE(cvImg)); } void MEImage::GetLayer(MEImage& new_layer, int layer_number) const { int LayerNumber = layer_number; if ((new_layer.GetWidth() != ME_CAST_TO_IPLIMAGE(cvImg)->width) || (new_layer.GetHeight() != ME_CAST_TO_IPLIMAGE(cvImg)->height) || (new_layer.GetLayers() != 1)) { new_layer.Realloc(ME_CAST_TO_IPLIMAGE(cvImg)->width, ME_CAST_TO_IPLIMAGE(cvImg)->height, 1); } if (ME_CAST_TO_IPLIMAGE(cvImg)->nChannels < LayerNumber) { printf("The given layer number is too large (%d > %d)\n", LayerNumber, ME_CAST_TO_IPLIMAGE(cvImg)->nChannels); LayerNumber = ME_CAST_TO_IPLIMAGE(cvImg)->nChannels; } if (LayerNumber <= 0) { printf("The given layer number is too small (%d <= 0)\n", LayerNumber); LayerNumber = 1; } cvSetImageCOI(ME_CAST_TO_IPLIMAGE(cvImg), LayerNumber); cvCopy(ME_CAST_TO_IPLIMAGE(cvImg), (IplImage*)new_layer.GetIplImage(), NULL); cvSetImageCOI(ME_CAST_TO_IPLIMAGE(cvImg), 0); } void MEImage::SetLayer(MEImage& layer, int layer_number) { int LayerNumber = layer_number; if (layer.GetWidth() != ME_CAST_TO_IPLIMAGE(cvImg)->width || layer.GetHeight() != ME_CAST_TO_IPLIMAGE(cvImg)->height) { printf("The dimensions of the layer and " "destination image is different (%dx%d <> %dx%d)\n", layer.GetWidth(), layer.GetHeight(), ME_CAST_TO_IPLIMAGE(cvImg)->width, ME_CAST_TO_IPLIMAGE(cvImg)->height); return; } if (ME_CAST_TO_IPLIMAGE(cvImg)->nChannels < LayerNumber) { printf("The given layer number is too large (%d > %d)\n", LayerNumber, ME_CAST_TO_IPLIMAGE(cvImg)->nChannels); LayerNumber = ME_CAST_TO_IPLIMAGE(cvImg)->nChannels; } if (LayerNumber <= 0) { printf("The given layer number is too small (%d <= 0)\n", LayerNumber); LayerNumber = 1; } if (layer.GetLayers() != 1) { printf("The layer image has not one color channel (1 != %d)\n", layer.GetLayers()); return; } cvSetImageCOI(ME_CAST_TO_IPLIMAGE(cvImg), LayerNumber); cvCopy((IplImage*)layer.GetIplImage(), ME_CAST_TO_IPLIMAGE(cvImg), NULL); cvSetImageCOI(ME_CAST_TO_IPLIMAGE(cvImg), 0); } void MEImage::CopyImageData(unsigned char* data) { memcpy(ME_CAST_TO_IPLIMAGE(cvImg)->imageData, data, ME_CAST_TO_IPLIMAGE(cvImg)->width*ME_CAST_TO_IPLIMAGE(cvImg)->height*ME_CAST_TO_IPLIMAGE(cvImg)->nChannels); } void* MEImage::GetIplImage() const { return (void*)ME_CAST_TO_IPLIMAGE(cvImg); } void MEImage::SetIplImage(void* image) { if (ME_CAST_TO_IPLIMAGE(cvImg)) { ME_RELEASE_IPLIMAGE(cvImg); } cvImg = cvCloneImage((IplImage*)image); // Correct the origin of the image if (ME_CAST_TO_IPLIMAGE(cvImg)->origin == 1) { MirrorVertical(); ME_CAST_TO_IPLIMAGE(cvImg)->origin = 0; } } bool MEImage::operator==(const MEImage& image) { return Equal(image); } bool MEImage::operator!=(const MEImage& image) { return !operator==(image); } MEImage& MEImage::operator=(const MEImage& other_image) { if (&other_image == this) return *this; _Copy(other_image); return *this; } int MEImage::GetWidth() const { return ME_CAST_TO_IPLIMAGE(cvImg) ? ME_CAST_TO_IPLIMAGE(cvImg)->width : 0; } int MEImage::GetRowWidth() const { return ME_CAST_TO_IPLIMAGE(cvImg) ? ME_CAST_TO_IPLIMAGE(cvImg)->widthStep : 0; } int MEImage::GetHeight() const { return ME_CAST_TO_IPLIMAGE(cvImg) ? ME_CAST_TO_IPLIMAGE(cvImg)->height : 0; } int MEImage::GetLayers() const { return ME_CAST_TO_IPLIMAGE(cvImg) ? ME_CAST_TO_IPLIMAGE(cvImg)->nChannels : 0; } int MEImage::GetPixelDataNumber() const { return ME_CAST_TO_IPLIMAGE(cvImg) ? GetWidth()*GetHeight()*GetLayers() : 0; } unsigned char* MEImage::GetImageData() const { return ME_CAST_TO_IPLIMAGE(cvImg) ? (unsigned char*)ME_CAST_TO_IPLIMAGE(cvImg)->imageData : NULL; } void MEImage::SetData(unsigned char* image_data, int width, int height, int channels) { _Init(width, height, channels); for (int y = height - 1; y >= 0; --y) { int Start = GetRowWidth()*y; int Start2 = width*channels*y; memcpy(&ME_CAST_TO_IPLIMAGE(cvImg)->imageData[Start], &image_data[Start2], width*channels); } } float MEImage::GetRatio() const { return ME_CAST_TO_IPLIMAGE(cvImg) ? (float)ME_CAST_TO_IPLIMAGE(cvImg)->height / (float)ME_CAST_TO_IPLIMAGE(cvImg)->width : 0.0; } void MEImage::Realloc(int width, int height) { Realloc(width, height, ME_CAST_TO_IPLIMAGE(cvImg)->nChannels); } void MEImage::Realloc(int width, int height, int layers) { _Init(width, height, layers); } void MEImage::Resize(int new_width, int new_height) { if (new_height < 1) { printf("Invalid new height: %d < 1\n", new_height); return; } if (new_width < 1) { printf("Invalid new width: %d < 1\n", new_width); return; } IplImage* TempImg = cvCreateImage(cvSize(new_width, new_height), 8, ME_CAST_TO_IPLIMAGE(cvImg)->nChannels); cvResize(ME_CAST_TO_IPLIMAGE(cvImg), TempImg, CV_INTER_NN); ME_RELEASE_IPLIMAGE(cvImg); cvImg = TempImg; } void MEImage::ResizeScaleX(int new_width) { if (new_width < 1) { printf("Invalid new width: %d < 1\n", new_width); return; } Resize(new_width, (int)((float)new_width*GetRatio())); } void MEImage::ResizeScaleY(int new_height) { if (new_height < 1) { printf("Invalid new height: %d < 1\n", new_height); return; } Resize((int)((float)new_height * 1 / GetRatio()), new_height); } void MEImage::MirrorHorizontal() { cvFlip(ME_CAST_TO_IPLIMAGE(cvImg), NULL, 1); } void MEImage::MirrorVertical() { cvFlip(ME_CAST_TO_IPLIMAGE(cvImg), NULL, 0); } void MEImage::Crop(int x1, int y1, int x2, int y2) { int NewX1 = x1; int NewY1 = y1; int NewX2 = x2; int NewY2 = y2; NewX1 = (NewX1 < 0) ? 0 : NewX1; NewX1 = (NewX1 > ME_CAST_TO_IPLIMAGE(cvImg)->width) ? ME_CAST_TO_IPLIMAGE(cvImg)->width : NewX1; NewY1 = (NewY1 < 0) ? 0 : NewY1; NewY1 = (NewY1 > ME_CAST_TO_IPLIMAGE(cvImg)->height) ? ME_CAST_TO_IPLIMAGE(cvImg)->height : NewY1; NewX2 = (NewX2 < 0) ? 0 : NewX2; NewX2 = (NewX2 > ME_CAST_TO_IPLIMAGE(cvImg)->width) ? ME_CAST_TO_IPLIMAGE(cvImg)->width : NewX2; NewY2 = (NewY2 < 0) ? 0 : NewY2; NewY2 = (NewY2 > ME_CAST_TO_IPLIMAGE(cvImg)->height) ? ME_CAST_TO_IPLIMAGE(cvImg)->height : NewY2; if ((NewX2 - NewX1) <= 0) { printf("Invalid new width: %d <= 0\n", NewX2 - NewX1); return; } if ((NewY2 - NewY1) <= 0) { printf("Invalid new height: %d <= 0\n", NewY2 - NewY1); return; } IplImage* TempImg = cvCreateImage(cvSize(NewX2 - NewX1, NewY2 - NewY1), 8, ME_CAST_TO_IPLIMAGE(cvImg)->nChannels); cvSetImageROI(ME_CAST_TO_IPLIMAGE(cvImg), cvRect(NewX1, NewY1, NewX2 - NewX1, NewY2 - NewY1)); cvCopy(ME_CAST_TO_IPLIMAGE(cvImg), TempImg); ME_RELEASE_IPLIMAGE(cvImg); cvImg = TempImg; } void MEImage::CopyImageInside(int x, int y, MEImage& source_image) { int NewX = x; int NewY = y; int PasteLengthX = source_image.GetWidth(); int PasteLengthY = source_image.GetHeight(); if (ME_CAST_TO_IPLIMAGE(cvImg)->nChannels != source_image.GetLayers()) { if (source_image.GetLayers() == 1 && ME_CAST_TO_IPLIMAGE(cvImg)->nChannels == 3) { source_image.ConvertGrayscaleToRGB(); } if (ME_CAST_TO_IPLIMAGE(cvImg)->nChannels == 1 && source_image.GetLayers() == 3) { source_image.ConvertToGrayscale(g_OpenCV); } } if (NewX < 0) NewX = 0; if (NewX > ME_CAST_TO_IPLIMAGE(cvImg)->width) NewX = ME_CAST_TO_IPLIMAGE(cvImg)->width; if (NewY < 0) NewY = 0; if (NewY > ME_CAST_TO_IPLIMAGE(cvImg)->height) NewY = ME_CAST_TO_IPLIMAGE(cvImg)->height; if (NewX + PasteLengthX > ME_CAST_TO_IPLIMAGE(cvImg)->width) PasteLengthX = ME_CAST_TO_IPLIMAGE(cvImg)->width - NewX; if (NewY + PasteLengthY > ME_CAST_TO_IPLIMAGE(cvImg)->height) PasteLengthY = ME_CAST_TO_IPLIMAGE(cvImg)->height - NewY; if (PasteLengthX != source_image.GetWidth() || PasteLengthY != source_image.GetHeight()) { source_image.Resize(PasteLengthX, PasteLengthY); } cvSetImageROI(ME_CAST_TO_IPLIMAGE(cvImg), cvRect(NewX, NewY, PasteLengthX, PasteLengthY)); cvCopy((IplImage*)source_image.GetIplImage(), ME_CAST_TO_IPLIMAGE(cvImg)); cvResetImageROI(ME_CAST_TO_IPLIMAGE(cvImg)); } void MEImage::Erode(int iterations) { IplImage* TempImg = cvCreateImage(cvSize(ME_CAST_TO_IPLIMAGE(cvImg)->width, ME_CAST_TO_IPLIMAGE(cvImg)->height), 8, ME_CAST_TO_IPLIMAGE(cvImg)->nChannels); cvErode(ME_CAST_TO_IPLIMAGE(cvImg), TempImg, NULL, iterations); ME_RELEASE_IPLIMAGE(cvImg); cvImg = TempImg; } void MEImage::Dilate(int iterations) { IplImage* TempImg = cvCreateImage(cvSize(ME_CAST_TO_IPLIMAGE(cvImg)->width, ME_CAST_TO_IPLIMAGE(cvImg)->height), 8, ME_CAST_TO_IPLIMAGE(cvImg)->nChannels); cvDilate(ME_CAST_TO_IPLIMAGE(cvImg), TempImg, NULL, iterations); ME_RELEASE_IPLIMAGE(cvImg); cvImg = TempImg; } void MEImage::Smooth() { SmoothAdvanced(s_Median, 3); } void MEImage::SmoothAdvanced(SmoothType filtermode, int filtersize) { IplImage* TempImg = cvCreateImage(cvSize(ME_CAST_TO_IPLIMAGE(cvImg)->width, ME_CAST_TO_IPLIMAGE(cvImg)->height), 8, ME_CAST_TO_IPLIMAGE(cvImg)->nChannels); switch (filtermode) { case s_Blur: cvSmooth(ME_CAST_TO_IPLIMAGE(cvImg), TempImg, CV_BLUR, filtersize, filtersize, 0); break; case s_Median: cvSmooth(ME_CAST_TO_IPLIMAGE(cvImg), TempImg, CV_MEDIAN, filtersize, 0, 0); break; case s_Gaussian: cvSmooth(ME_CAST_TO_IPLIMAGE(cvImg), TempImg, CV_GAUSSIAN, filtersize, filtersize, 0); break; default: cvSmooth(ME_CAST_TO_IPLIMAGE(cvImg), TempImg, CV_MEDIAN, filtersize, 0, 0); break; } ME_RELEASE_IPLIMAGE(cvImg); cvImg = TempImg; } void MEImage::Canny() { if (ME_CAST_TO_IPLIMAGE(cvImg)->nChannels > 1) { ConvertToGrayscale(g_OpenCV); } IplImage* TempImg = cvCreateImage(cvSize(ME_CAST_TO_IPLIMAGE(cvImg)->width, ME_CAST_TO_IPLIMAGE(cvImg)->height), 8, ME_CAST_TO_IPLIMAGE(cvImg)->nChannels); cvCanny(ME_CAST_TO_IPLIMAGE(cvImg), TempImg, 800, 1100, 5); ME_RELEASE_IPLIMAGE(cvImg); cvImg = TempImg; } void MEImage::Laplace() { if (ME_CAST_TO_IPLIMAGE(cvImg)->nChannels != 1) { ConvertToGrayscale(g_OpenCV); } IplImage* TempImg = cvCreateImage(cvSize(ME_CAST_TO_IPLIMAGE(cvImg)->width, ME_CAST_TO_IPLIMAGE(cvImg)->height), IPL_DEPTH_16S, 1); cvLaplace(ME_CAST_TO_IPLIMAGE(cvImg), TempImg, 3); cvConvertScale(TempImg, ME_CAST_TO_IPLIMAGE(cvImg), 1, 0); ME_RELEASE_IPLIMAGE(cvImg); } void MEImage::Quantize(int levels) { if (levels <= 0) { printf("Level number is too small (%d <= 0)\n", levels); return; } if (levels > 256) { printf("Level number is too large (%d > 256)\n", levels); return; } unsigned char* ImageData = (unsigned char*)ME_CAST_TO_IPLIMAGE(cvImg)->imageData; for (int i = ME_CAST_TO_IPLIMAGE(cvImg)->widthStep*ME_CAST_TO_IPLIMAGE(cvImg)->height - 1; i >= 0; --i) { ImageData[i] = ImageData[i] / (256 / levels)*(256 / levels); } } void MEImage::Threshold(int threshold_limit) { if (threshold_limit < 0) { printf("Threshold number is too small (%d <= 0)\n", threshold_limit); return; } if (threshold_limit > 255) { printf("Threshold number is too large (%d > 255)\n", threshold_limit); return; } unsigned char* ImageData = (unsigned char*)ME_CAST_TO_IPLIMAGE(cvImg)->imageData; for (int i = ME_CAST_TO_IPLIMAGE(cvImg)->widthStep*ME_CAST_TO_IPLIMAGE(cvImg)->height - 1; i >= 0; --i) { if (ImageData[i] < threshold_limit) { ImageData[i] = 0; } } } void MEImage::AdaptiveThreshold() { if (ME_CAST_TO_IPLIMAGE(cvImg)->nChannels != 1) { ConvertToGrayscale(g_OpenCV); } IplImage* TempImg = cvCreateImage(cvSize(ME_CAST_TO_IPLIMAGE(cvImg)->width, ME_CAST_TO_IPLIMAGE(cvImg)->height), 8, ME_CAST_TO_IPLIMAGE(cvImg)->nChannels); cvAdaptiveThreshold(ME_CAST_TO_IPLIMAGE(cvImg), TempImg, 25, CV_ADAPTIVE_THRESH_GAUSSIAN_C, CV_THRESH_BINARY, 7, -7); ME_RELEASE_IPLIMAGE(cvImg); cvImg = TempImg; } void MEImage::ThresholdByMask(MEImage& mask_image) { if (mask_image.GetWidth() != ME_CAST_TO_IPLIMAGE(cvImg)->width || mask_image.GetHeight() != ME_CAST_TO_IPLIMAGE(cvImg)->height) { printf("Image properties are different\n"); return; } if (mask_image.GetLayers() != 3 && ME_CAST_TO_IPLIMAGE(cvImg)->nChannels == 3) { mask_image.ConvertGrayscaleToRGB(); } unsigned char* ImageData = (unsigned char*)ME_CAST_TO_IPLIMAGE(cvImg)->imageData; unsigned char* MaskImageData = mask_image.GetImageData(); for (int i = ME_CAST_TO_IPLIMAGE(cvImg)->widthStep*ME_CAST_TO_IPLIMAGE(cvImg)->height - 1; i >= 0; --i) { if (MaskImageData[i] == 0) { ImageData[i] = 0; } } } void MEImage::ColorSpace(ColorSpaceConvertType mode) { IplImage* TempImg = NULL; unsigned char* ImageData = NULL; int WidthStep = 0; int RowStart = 0; if (ME_CAST_TO_IPLIMAGE(cvImg)->nChannels == 1) { printf("No sense to convert: source image is greyscale\n"); ConvertGrayscaleToRGB(); } switch (mode) { case csc_RGBtoXYZCIED65: TempImg = cvCreateImage(cvSize(ME_CAST_TO_IPLIMAGE(cvImg)->width, ME_CAST_TO_IPLIMAGE(cvImg)->height), 8, ME_CAST_TO_IPLIMAGE(cvImg)->nChannels); cvCvtColor(ME_CAST_TO_IPLIMAGE(cvImg), TempImg, CV_RGB2XYZ); ME_RELEASE_IPLIMAGE(cvImg); cvImg = TempImg; break; case csc_XYZCIED65toRGB: TempImg = cvCreateImage(cvSize(ME_CAST_TO_IPLIMAGE(cvImg)->width, ME_CAST_TO_IPLIMAGE(cvImg)->height), 8, ME_CAST_TO_IPLIMAGE(cvImg)->nChannels); cvCvtColor(ME_CAST_TO_IPLIMAGE(cvImg), TempImg, CV_XYZ2RGB); ME_RELEASE_IPLIMAGE(cvImg); cvImg = TempImg; break; case csc_RGBtoHSV: TempImg = cvCreateImage(cvSize(ME_CAST_TO_IPLIMAGE(cvImg)->width, ME_CAST_TO_IPLIMAGE(cvImg)->height), 8, ME_CAST_TO_IPLIMAGE(cvImg)->nChannels); cvCvtColor(ME_CAST_TO_IPLIMAGE(cvImg), TempImg, CV_RGB2HSV); ME_RELEASE_IPLIMAGE(cvImg); cvImg = TempImg; break; case csc_HSVtoRGB: TempImg = cvCreateImage(cvSize(ME_CAST_TO_IPLIMAGE(cvImg)->width, ME_CAST_TO_IPLIMAGE(cvImg)->height), 8, ME_CAST_TO_IPLIMAGE(cvImg)->nChannels); cvCvtColor(ME_CAST_TO_IPLIMAGE(cvImg), TempImg, CV_HSV2RGB); ME_RELEASE_IPLIMAGE(cvImg); cvImg = TempImg; break; case csc_RGBtoHLS: TempImg = cvCreateImage(cvSize(ME_CAST_TO_IPLIMAGE(cvImg)->width, ME_CAST_TO_IPLIMAGE(cvImg)->height), 8, ME_CAST_TO_IPLIMAGE(cvImg)->nChannels); cvCvtColor(ME_CAST_TO_IPLIMAGE(cvImg), TempImg, CV_RGB2HLS); ME_RELEASE_IPLIMAGE(cvImg); cvImg = TempImg; break; case csc_HLStoRGB: TempImg = cvCreateImage(cvSize(ME_CAST_TO_IPLIMAGE(cvImg)->width, ME_CAST_TO_IPLIMAGE(cvImg)->height), 8, ME_CAST_TO_IPLIMAGE(cvImg)->nChannels); cvCvtColor(ME_CAST_TO_IPLIMAGE(cvImg), TempImg, CV_HLS2RGB); ME_RELEASE_IPLIMAGE(cvImg); cvImg = TempImg; break; case csc_RGBtoCIELab: TempImg = cvCreateImage(cvSize(ME_CAST_TO_IPLIMAGE(cvImg)->width, ME_CAST_TO_IPLIMAGE(cvImg)->height), 8, ME_CAST_TO_IPLIMAGE(cvImg)->nChannels); cvCvtColor(ME_CAST_TO_IPLIMAGE(cvImg), TempImg, CV_RGB2Lab); ME_RELEASE_IPLIMAGE(cvImg); cvImg = TempImg; break; case csc_CIELabtoRGB: TempImg = cvCreateImage(cvSize(ME_CAST_TO_IPLIMAGE(cvImg)->width, ME_CAST_TO_IPLIMAGE(cvImg)->height), 8, ME_CAST_TO_IPLIMAGE(cvImg)->nChannels); cvCvtColor(ME_CAST_TO_IPLIMAGE(cvImg), TempImg, CV_Lab2RGB); ME_RELEASE_IPLIMAGE(cvImg); cvImg = TempImg; break; case csc_RGBtoCIELuv: TempImg = cvCreateImage(cvSize(ME_CAST_TO_IPLIMAGE(cvImg)->width, ME_CAST_TO_IPLIMAGE(cvImg)->height), 8, ME_CAST_TO_IPLIMAGE(cvImg)->nChannels); cvCvtColor(ME_CAST_TO_IPLIMAGE(cvImg), TempImg, CV_RGB2Luv); ME_RELEASE_IPLIMAGE(cvImg); cvImg = TempImg; break; case csc_CIELuvtoRGB: TempImg = cvCreateImage(cvSize(ME_CAST_TO_IPLIMAGE(cvImg)->width, ME_CAST_TO_IPLIMAGE(cvImg)->height), 8, ME_CAST_TO_IPLIMAGE(cvImg)->nChannels); cvCvtColor(ME_CAST_TO_IPLIMAGE(cvImg), TempImg, CV_Luv2RGB); ME_RELEASE_IPLIMAGE(cvImg); cvImg = TempImg; break; case csc_RGBtoYUV: ComputeColorSpace(csc_RGBtoYUV); break; case csc_RGBtoYIQ: ComputeColorSpace(csc_RGBtoYIQ); break; case csc_RGBtorgI: ImageData = (unsigned char*)ME_CAST_TO_IPLIMAGE(cvImg)->imageData; WidthStep = ME_CAST_TO_IPLIMAGE(cvImg)->widthStep; RowStart = 0; for (int y = ME_CAST_TO_IPLIMAGE(cvImg)->height - 1; y >= 0; --y) { for (int x = (ME_CAST_TO_IPLIMAGE(cvImg)->width - 1) * 3; x >= 0; x -= 3) { int r = 0; int g = 0; int I = 0; I = (int)ImageData[RowStart + x] + (int)ImageData[RowStart + x + 1] + (int)ImageData[RowStart + x + 2]; r = (int)((float)ImageData[RowStart + x] / I * 255); g = (int)((float)ImageData[RowStart + x + 1] / I * 255); ImageData[RowStart + x] = (unsigned char)r; ImageData[RowStart + x + 1] = (unsigned char)g; ImageData[RowStart + x + 2] = (unsigned char)(I / 3); } RowStart += WidthStep; } break; default: break; } } void MEImage::ConvertToGrayscale(GrayscaleType grayscale_mode) { if (ME_CAST_TO_IPLIMAGE(cvImg)->nChannels == 1) { printf("Image is already grayscale\n"); return; } IplImage* TempImg = NULL; unsigned char* ImgData = (unsigned char*)ME_CAST_TO_IPLIMAGE(cvImg)->imageData; unsigned char* ImageData = NULL; switch (grayscale_mode) { case g_Average: TempImg = cvCreateImage(cvSize(ME_CAST_TO_IPLIMAGE(cvImg)->width, ME_CAST_TO_IPLIMAGE(cvImg)->height), 8, 1); ImageData = (unsigned char*)TempImg->imageData; for (int i = ME_CAST_TO_IPLIMAGE(cvImg)->widthStep*ME_CAST_TO_IPLIMAGE(cvImg)->height - 3; i >= 0; i -= 3) { ImageData[i / 3] = (ImgData[i] + ImgData[i + 1] + ImgData[i + 2]) / 3; } ME_RELEASE_IPLIMAGE(cvImg); cvImg = TempImg; break; case g_OpenCV: TempImg = cvCreateImage(cvSize(ME_CAST_TO_IPLIMAGE(cvImg)->width, ME_CAST_TO_IPLIMAGE(cvImg)->height), 8, 1); cvCvtColor(ME_CAST_TO_IPLIMAGE(cvImg), TempImg, CV_RGB2GRAY); ME_RELEASE_IPLIMAGE(cvImg); cvImg = TempImg; break; default: break; } } void MEImage::ConvertGrayscaleToRGB() { if (ME_CAST_TO_IPLIMAGE(cvImg)->nChannels != 1) { return; } IplImage* TempImg = cvCreateImage(cvSize(ME_CAST_TO_IPLIMAGE(cvImg)->width, ME_CAST_TO_IPLIMAGE(cvImg)->height), 8, 3); cvCvtColor(ME_CAST_TO_IPLIMAGE(cvImg), TempImg, CV_GRAY2RGB); ME_RELEASE_IPLIMAGE(cvImg); cvImg = TempImg; } void MEImage::ConvertBGRToRGB() { if (ME_CAST_TO_IPLIMAGE(cvImg)->nChannels != 3) { return; } cvCvtColor(ME_CAST_TO_IPLIMAGE(cvImg), ME_CAST_TO_IPLIMAGE(cvImg), CV_RGB2BGR); } void MEImage::LBP(LBPType mode) { if (ME_CAST_TO_IPLIMAGE(cvImg)->nChannels > 1) { ConvertToGrayscale(g_OpenCV); } unsigned char* ImageData = (unsigned char*)ME_CAST_TO_IPLIMAGE(cvImg)->imageData; IplImage* TempImg = cvCreateImage(cvSize(ME_CAST_TO_IPLIMAGE(cvImg)->width, ME_CAST_TO_IPLIMAGE(cvImg)->height), 8, 1); unsigned char* TempImgData = (unsigned char*)TempImg->imageData; int WidthStep = ME_CAST_TO_IPLIMAGE(cvImg)->widthStep; int WidthStep_2 = ME_CAST_TO_IPLIMAGE(cvImg)->widthStep * 2; cvSetZero(TempImg); switch (mode) { case lbp_Normal: for (int i = ME_CAST_TO_IPLIMAGE(cvImg)->widthStep*(ME_CAST_TO_IPLIMAGE(cvImg)->height - 2) - 1; i >= ME_CAST_TO_IPLIMAGE(cvImg)->widthStep + 1; --i) { TempImgData[i] = (ImageData[i] <= ImageData[i - ME_CAST_TO_IPLIMAGE(cvImg)->widthStep - 1]) + ((ImageData[i] <= ImageData[i - ME_CAST_TO_IPLIMAGE(cvImg)->widthStep]) * 2) + ((ImageData[i] <= ImageData[i - ME_CAST_TO_IPLIMAGE(cvImg)->widthStep + 1]) * 4) + ((ImageData[i] <= ImageData[i - 1]) * 8) + ((ImageData[i] <= ImageData[i + 1]) * 16) + ((ImageData[i] <= ImageData[i + ME_CAST_TO_IPLIMAGE(cvImg)->widthStep - 1]) * 32) + ((ImageData[i] <= ImageData[i + ME_CAST_TO_IPLIMAGE(cvImg)->widthStep]) * 64) + ((ImageData[i] <= ImageData[i + ME_CAST_TO_IPLIMAGE(cvImg)->widthStep + 1]) * 128); } break; case lbp_Special: for (int i = ME_CAST_TO_IPLIMAGE(cvImg)->widthStep*(ME_CAST_TO_IPLIMAGE(cvImg)->height - 3) - 2; i >= ME_CAST_TO_IPLIMAGE(cvImg)->widthStep * 2 + 2; --i) { int CenterPixel = (ImageData[i + 1] + ImageData[i - 1] + ImageData[i - WidthStep] + ImageData[i + WidthStep]) / 4; TempImgData[i] = ((CenterPixel <= (ImageData[i - (WidthStep_2)-2] + ImageData[i - (WidthStep_2)-1] + ImageData[i - WidthStep - 2] + ImageData[i - WidthStep - 1]) / 4)) + ((CenterPixel <= (ImageData[i - WidthStep] + ImageData[i - (WidthStep_2)]) / 2) * 2) + ((CenterPixel <= ((ImageData[i - (WidthStep_2)+2] + ImageData[i - (WidthStep_2)+1] + ImageData[i - WidthStep + 2] + ImageData[i - WidthStep + 1]) / 4)) * 4) + ((CenterPixel <= (ImageData[i - 1] + ImageData[i - 2]) / 2) * 8) + ((CenterPixel <= (ImageData[i + 1] + ImageData[i + 2]) / 2) * 16) + ((CenterPixel <= ((ImageData[i + (WidthStep_2)-2] + ImageData[i + (WidthStep_2)-1] + ImageData[i + WidthStep - 2] + ImageData[i + WidthStep - 1]) / 4)) * 32) + ((CenterPixel <= (ImageData[i + WidthStep] + ImageData[i - WidthStep_2]) / 2) * 64) + ((CenterPixel <= ((ImageData[i + (WidthStep_2)+2] + ImageData[i + (WidthStep_2)+1] + ImageData[i + WidthStep + 2] + ImageData[i + WidthStep + 1]) / 4)) * 128); } break; default: break; } ME_RELEASE_IPLIMAGE(cvImg); cvImg = TempImg; } void MEImage::Binarize(int threshold) { unsigned char* ImageData = (unsigned char*)ME_CAST_TO_IPLIMAGE(cvImg)->imageData; for (int i = ME_CAST_TO_IPLIMAGE(cvImg)->height*ME_CAST_TO_IPLIMAGE(cvImg)->widthStep - 1; i >= 0; --i) { if (ImageData[i] >= threshold) { ImageData[i] = 255; } else { ImageData[i] = 0; } } } void MEImage::Subtract(MEImage& source, SubtractModeType mode) { if (source.GetWidth() != ME_CAST_TO_IPLIMAGE(cvImg)->width || source.GetHeight() != ME_CAST_TO_IPLIMAGE(cvImg)->height || source.GetLayers() != ME_CAST_TO_IPLIMAGE(cvImg)->nChannels) { printf("Image properties are different.\n"); return; } unsigned char* ImageData = NULL; unsigned char* DstData = NULL; int WidthStep = ME_CAST_TO_IPLIMAGE(cvImg)->widthStep; int RowStart = 0; switch (mode) { case sub_Normal: ImageData = (unsigned char*)ME_CAST_TO_IPLIMAGE(cvImg)->imageData; DstData = source.GetImageData(); RowStart = 0; for (int y = ME_CAST_TO_IPLIMAGE(cvImg)->height - 1; y >= 0; --y) { for (int x = ME_CAST_TO_IPLIMAGE(cvImg)->width*ME_CAST_TO_IPLIMAGE(cvImg)->nChannels - 1; x >= 0; --x) { ImageData[RowStart + x] = ImageData[RowStart + x] - DstData[RowStart + x] < 0 ? 0 : ImageData[RowStart + x] - DstData[RowStart + x]; } RowStart += WidthStep; } break; case sub_Absolut: ImageData = (unsigned char*)ME_CAST_TO_IPLIMAGE(cvImg)->imageData; DstData = source.GetImageData(); RowStart = 0; for (int y = ME_CAST_TO_IPLIMAGE(cvImg)->height - 1; y >= 0; --y) { for (int x = ME_CAST_TO_IPLIMAGE(cvImg)->width*ME_CAST_TO_IPLIMAGE(cvImg)->nChannels - 1; x >= 0; --x) { ImageData[RowStart + x] = ImageData[RowStart + x] - DstData[RowStart + x] < 0 ? -ImageData[RowStart + x] + DstData[RowStart + x] : ImageData[RowStart + x] - DstData[RowStart + x]; } RowStart += WidthStep; } break; default: break; } } void MEImage::Multiple(MEImage& source, MultiplicationType mode) { if (source.GetWidth() != ME_CAST_TO_IPLIMAGE(cvImg)->width || source.GetHeight() != ME_CAST_TO_IPLIMAGE(cvImg)->height || source.GetLayers() != ME_CAST_TO_IPLIMAGE(cvImg)->nChannels) { printf("Image properties are different.\n"); return; } float Result = 0.0; IplImage* TempImg = NULL; unsigned char* ImageData = NULL; unsigned char* ImageData2 = NULL; unsigned char* ImageData3 = NULL; unsigned char* DstData = NULL; switch (mode) { case m_Normal: Result = 0; ImageData = (unsigned char*)ME_CAST_TO_IPLIMAGE(cvImg)->imageData; DstData = source.GetImageData(); for (int i = ME_CAST_TO_IPLIMAGE(cvImg)->height*ME_CAST_TO_IPLIMAGE(cvImg)->widthStep - 1; i >= 0; --i) { if ((ImageData[i] >= 128) && (DstData[i] >= 128)) { Result = (float)ImageData[i] / 128 * (float)DstData[i] / 128; if (Result >= 1) { ImageData[i] = 255; } else { ImageData[i] = 0; } } else { ImageData[i] = 0; } } break; case m_Neighbourhood: TempImg = cvCreateImage(cvSize(ME_CAST_TO_IPLIMAGE(cvImg)->width, ME_CAST_TO_IPLIMAGE(cvImg)->height), 8, ME_CAST_TO_IPLIMAGE(cvImg)->nChannels); ImageData2 = (unsigned char*)ME_CAST_TO_IPLIMAGE(cvImg)->imageData; DstData = source.GetImageData(); ImageData3 = (unsigned char*)TempImg->imageData; for (int y = ME_CAST_TO_IPLIMAGE(cvImg)->height - 1; y >= 0; --y) for (int x = ME_CAST_TO_IPLIMAGE(cvImg)->width - 1; x >= 0; --x) for (int l = ME_CAST_TO_IPLIMAGE(cvImg)->nChannels - 1; l >= 0; --l) { if (((DstData[y*ME_CAST_TO_IPLIMAGE(cvImg)->width*ME_CAST_TO_IPLIMAGE(cvImg)->nChannels + x*ME_CAST_TO_IPLIMAGE(cvImg)->nChannels + l] == 255) || (ImageData2[y*ME_CAST_TO_IPLIMAGE(cvImg)->width*ME_CAST_TO_IPLIMAGE(cvImg)->nChannels + x*ME_CAST_TO_IPLIMAGE(cvImg)->nChannels + l] == 255)) && (NeighbourhoodCounter(x - 2, y - 2, n_5x5) > 3) && (source.NeighbourhoodCounter(x - 2, y - 2, n_5x5) > 3)) { ImageData3[y*ME_CAST_TO_IPLIMAGE(cvImg)->width*ME_CAST_TO_IPLIMAGE(cvImg)->nChannels + x*ME_CAST_TO_IPLIMAGE(cvImg)->nChannels + l] = 255; } } ME_RELEASE_IPLIMAGE(cvImg); cvImg = TempImg; break; default: break; } } void MEImage::Addition(MEImage& source, AdditionType mode) { if (source.GetWidth() != ME_CAST_TO_IPLIMAGE(cvImg)->width || source.GetHeight() != ME_CAST_TO_IPLIMAGE(cvImg)->height || source.GetLayers() != ME_CAST_TO_IPLIMAGE(cvImg)->nChannels) { printf("Image properties are different.\n"); return; } unsigned char* ImageData = (unsigned char*)ME_CAST_TO_IPLIMAGE(cvImg)->imageData; unsigned char* DstData = source.GetImageData(); switch (mode) { case a_Average: for (int i = ME_CAST_TO_IPLIMAGE(cvImg)->height*ME_CAST_TO_IPLIMAGE(cvImg)->widthStep - 1; i >= 0; --i) { ImageData[i] = (ImageData[i] + DstData[i]) / 2; } break; case a_Union: for (int i = ME_CAST_TO_IPLIMAGE(cvImg)->height*ME_CAST_TO_IPLIMAGE(cvImg)->widthStep - 1; i >= 0; --i) { if (DstData[i] > ImageData[i]) { ImageData[i] = DstData[i]; } } break; default: break; } } void MEImage::EliminateSinglePixels() { IplImage* TempImg = cvCreateImage(cvSize(ME_CAST_TO_IPLIMAGE(cvImg)->width, ME_CAST_TO_IPLIMAGE(cvImg)->height), 8, ME_CAST_TO_IPLIMAGE(cvImg)->nChannels); unsigned char* ImageData = (unsigned char*)ME_CAST_TO_IPLIMAGE(cvImg)->imageData; unsigned char* DstData = (unsigned char*)TempImg->imageData; int sum = 0; int xy = 0; int ywidth = ME_CAST_TO_IPLIMAGE(cvImg)->widthStep; for (int y = ME_CAST_TO_IPLIMAGE(cvImg)->height - 1; y >= 0; --y) for (int x = ME_CAST_TO_IPLIMAGE(cvImg)->width - 1; x >= 0; --x) { xy = y*ywidth + x*ME_CAST_TO_IPLIMAGE(cvImg)->nChannels; for (int l = ME_CAST_TO_IPLIMAGE(cvImg)->nChannels - 1; l >= 0; --l) { if ((ImageData[xy + l] > 0) && (x > 0) && (y > 0) && (x < ME_CAST_TO_IPLIMAGE(cvImg)->width - 1) && (y < ME_CAST_TO_IPLIMAGE(cvImg)->height - 1)) { sum = (ImageData[xy - ywidth - ME_CAST_TO_IPLIMAGE(cvImg)->nChannels + l] > 0) + (ImageData[xy - ywidth + l] > 0) + (ImageData[xy - ywidth + ME_CAST_TO_IPLIMAGE(cvImg)->nChannels + l] > 0) + (ImageData[xy - ME_CAST_TO_IPLIMAGE(cvImg)->nChannels + l] > 0) + (ImageData[xy + ME_CAST_TO_IPLIMAGE(cvImg)->nChannels + l] > 0) + (ImageData[xy + ywidth - ME_CAST_TO_IPLIMAGE(cvImg)->nChannels + l] > 0) + (ImageData[xy + ywidth + l] > 0) + (ImageData[xy + ywidth + ME_CAST_TO_IPLIMAGE(cvImg)->nChannels + l] > 0); if (sum > 3) { DstData[xy + l] = 255; } else { DstData[xy + l] = 0; } } else { DstData[xy + l] = 0; } } } ME_RELEASE_IPLIMAGE(cvImg); cvImg = TempImg; } float MEImage::DifferenceAreas(MEImage& reference, int difference) const { if (reference.GetWidth() != GetWidth() || reference.GetHeight() != GetHeight() || reference.GetLayers() != GetLayers()) { printf("Image dimensions or channels are different\n"); return -1.0; } float PixelDiff = 0.0; int Pixels = 0; unsigned char* OrigImgData = (unsigned char*)ME_CAST_TO_IPLIMAGE(cvImg)->imageData; unsigned char* RefImgData = reference.GetImageData(); int WidthStep = ME_CAST_TO_IPLIMAGE(cvImg)->widthStep; int RowStart = 0; for (int y = ME_CAST_TO_IPLIMAGE(cvImg)->height - 1; y >= 0; --y) { for (int x = ME_CAST_TO_IPLIMAGE(cvImg)->width*ME_CAST_TO_IPLIMAGE(cvImg)->nChannels - 1; x >= 0; --x) { if (abs(OrigImgData[RowStart + x] - RefImgData[RowStart + x]) > difference) Pixels++; } RowStart += WidthStep; } PixelDiff = (float)Pixels / (ME_CAST_TO_IPLIMAGE(cvImg)->height*ME_CAST_TO_IPLIMAGE(cvImg)->widthStep) * 100; return PixelDiff; } int MEImage::AverageDifference(MEImage& reference) const { if (reference.GetWidth() != GetWidth() || reference.GetHeight() != GetHeight() || reference.GetLayers() != GetLayers()) { printf("Image dimensions or channels are different\n"); return -1; } int Difference = 0; unsigned char* OrigImgData = (unsigned char*)ME_CAST_TO_IPLIMAGE(cvImg)->imageData; unsigned char* RefImgData = reference.GetImageData(); int WidthStep = ME_CAST_TO_IPLIMAGE(cvImg)->widthStep; int RowStart = 0; for (int y = ME_CAST_TO_IPLIMAGE(cvImg)->height - 1; y >= 0; --y) { for (int x = ME_CAST_TO_IPLIMAGE(cvImg)->width*ME_CAST_TO_IPLIMAGE(cvImg)->nChannels - 1; x >= 0; --x) { Difference += abs(OrigImgData[RowStart + x] - RefImgData[RowStart + x]); } RowStart += WidthStep; } Difference = Difference / (ME_CAST_TO_IPLIMAGE(cvImg)->height*ME_CAST_TO_IPLIMAGE(cvImg)->widthStep); return Difference; } void MEImage::Minimum(MEImage& image) { if (image.GetWidth() != ME_CAST_TO_IPLIMAGE(cvImg)->width || image.GetHeight() != ME_CAST_TO_IPLIMAGE(cvImg)->height || image.GetLayers() != ME_CAST_TO_IPLIMAGE(cvImg)->nChannels) { printf("Image properties are different\n"); return; } unsigned char* ImageData = (unsigned char*)ME_CAST_TO_IPLIMAGE(cvImg)->imageData; unsigned char* SecData = image.GetImageData(); int WidthStep = ME_CAST_TO_IPLIMAGE(cvImg)->widthStep; int RowStart = 0; for (int y = ME_CAST_TO_IPLIMAGE(cvImg)->height - 1; y >= 0; --y) { for (int x = ME_CAST_TO_IPLIMAGE(cvImg)->width*ME_CAST_TO_IPLIMAGE(cvImg)->nChannels - 1; x >= 0; --x) { ImageData[RowStart + x] = ImageData[RowStart + x] > SecData[RowStart + x] ? SecData[RowStart + x] : ImageData[RowStart + x]; } RowStart += WidthStep; } } float MEImage::AverageBrightnessLevel() const { unsigned char* ImageData = (unsigned char*)ME_CAST_TO_IPLIMAGE(cvImg)->imageData; int WidthStep = ME_CAST_TO_IPLIMAGE(cvImg)->widthStep; int RowStart = 0; int BrightnessLevel = 0; for (int y = ME_CAST_TO_IPLIMAGE(cvImg)->height - 1; y >= 0; --y) { for (int x = ME_CAST_TO_IPLIMAGE(cvImg)->width*ME_CAST_TO_IPLIMAGE(cvImg)->nChannels - 1; x >= 0; --x) { BrightnessLevel += (int)ImageData[RowStart + x]; } RowStart += WidthStep; } return BrightnessLevel / (GetWidth()*GetHeight()*GetLayers()); } bool MEImage::Equal(const MEImage& reference) const { return Equal(reference, 1); } bool MEImage::Equal(const MEImage& reference, int maxabsdiff) const { bool Ret = true; if (reference.GetWidth() != ME_CAST_TO_IPLIMAGE(cvImg)->width || reference.GetHeight() != ME_CAST_TO_IPLIMAGE(cvImg)->height || reference.GetLayers() != ME_CAST_TO_IPLIMAGE(cvImg)->nChannels) { printf("Image properties are different\n"); return false; } unsigned char* ImageData = (unsigned char*)ME_CAST_TO_IPLIMAGE(cvImg)->imageData; unsigned char* RefData = reference.GetImageData(); int WidthStep = ME_CAST_TO_IPLIMAGE(cvImg)->widthStep; int RowStart = 0; for (int y = ME_CAST_TO_IPLIMAGE(cvImg)->height - 1; y >= 0; --y) { for (int x = ME_CAST_TO_IPLIMAGE(cvImg)->width*ME_CAST_TO_IPLIMAGE(cvImg)->nChannels - 1; x >= 0; --x) { if (abs(ImageData[RowStart + x] - RefData[RowStart + x]) >= maxabsdiff) { Ret = false; return Ret; } } RowStart += WidthStep; } return Ret; } unsigned char MEImage::GrayscalePixel(int x, int y) const { int NewX = x; int NewY = y; NewX = NewX < 0 ? 0 : NewX; NewX = NewX > ME_CAST_TO_IPLIMAGE(cvImg)->width - 1 ? ME_CAST_TO_IPLIMAGE(cvImg)->width - 1 : NewX; NewY = NewY < 0 ? 0 : NewY; NewY = NewY > ME_CAST_TO_IPLIMAGE(cvImg)->height - 1 ? ME_CAST_TO_IPLIMAGE(cvImg)->height - 1 : NewY; float Sum = 0; unsigned char* ImageData = (unsigned char*)ME_CAST_TO_IPLIMAGE(cvImg)->imageData; for (int l = 0; l < ME_CAST_TO_IPLIMAGE(cvImg)->nChannels; l++) { Sum = Sum + (int)ImageData[NewY*ME_CAST_TO_IPLIMAGE(cvImg)->width*ME_CAST_TO_IPLIMAGE(cvImg)->nChannels + NewX*ME_CAST_TO_IPLIMAGE(cvImg)->nChannels + l]; } Sum = Sum / ME_CAST_TO_IPLIMAGE(cvImg)->nChannels; return (unsigned char)(Sum); } int MEImage::NeighbourhoodCounter(int startx, int starty, NeighbourhoodType neighbourhood) const { int IterX = 0; int IterY = 0; int Counter = 0; // Determine the iteration numbers switch (neighbourhood) { case n_2x2: IterX = 2; IterY = 2; break; case n_3x3: IterX = 3; IterY = 3; break; case n_3x2: IterX = 2; IterY = 3; break; case n_5x5: IterX = 5; IterY = 5; break; case n_7x7: IterX = 7; IterY = 7; break; default: IterX = 3; IterY = 3; break; } int NewStartX = startx; int NewStartY = starty; NewStartX = startx < 0 ? 0 : startx; NewStartX = startx >= ME_CAST_TO_IPLIMAGE(cvImg)->width - IterX ? ME_CAST_TO_IPLIMAGE(cvImg)->width - IterX - 1 : startx; NewStartY = starty < 0 ? 0 : starty; NewStartY = starty >= ME_CAST_TO_IPLIMAGE(cvImg)->height - IterY ? ME_CAST_TO_IPLIMAGE(cvImg)->height - IterY - 1 : starty; int Value = 0; unsigned char* ImageData = (unsigned char*)ME_CAST_TO_IPLIMAGE(cvImg)->imageData; for (int x = NewStartX; x < NewStartX + IterX; x++) for (int y = NewStartY; y < NewStartY + IterY; y++) { Value = ((int)ImageData[y*ME_CAST_TO_IPLIMAGE(cvImg)->width*ME_CAST_TO_IPLIMAGE(cvImg)->nChannels + x*ME_CAST_TO_IPLIMAGE(cvImg)->nChannels] + (int)ImageData[y*ME_CAST_TO_IPLIMAGE(cvImg)->width*ME_CAST_TO_IPLIMAGE(cvImg)->nChannels + x*ME_CAST_TO_IPLIMAGE(cvImg)->nChannels + 1] + (int)ImageData[y*ME_CAST_TO_IPLIMAGE(cvImg)->width*ME_CAST_TO_IPLIMAGE(cvImg)->nChannels + x*ME_CAST_TO_IPLIMAGE(cvImg)->nChannels + 2]) / 3; if (Value == 255) { Counter++; } } return Counter; } void MEImage::GradientVector(bool smooth, int x, int y, int mask_size, int& result_x, int& result_y) { int Results[8]; int DiagonalMaskSize = (int)((float)mask_size / sqrtf(2)); if (ME_CAST_TO_IPLIMAGE(cvImg)->nChannels > 1) { ConvertToGrayscale(g_OpenCV); } if (smooth) { SmoothAdvanced(s_Gaussian, mask_size * 3 - (mask_size * 3 - 1) % 2); } Results[0] = (int)GrayscalePixel(x, y) - (int)GrayscalePixel(x, y - mask_size); Results[1] = (int)GrayscalePixel(x, y) - (int)GrayscalePixel(x + DiagonalMaskSize, y - DiagonalMaskSize); Results[2] = (int)GrayscalePixel(x, y) - (int)GrayscalePixel(x + mask_size, y); Results[3] = (int)GrayscalePixel(x, y) - (int)GrayscalePixel(x + DiagonalMaskSize, y + DiagonalMaskSize); Results[4] = (int)GrayscalePixel(x, y) - (int)GrayscalePixel(x, y + mask_size); Results[5] = (int)GrayscalePixel(x, y) - (int)GrayscalePixel(x - DiagonalMaskSize, y + DiagonalMaskSize); Results[6] = (int)GrayscalePixel(x, y) - (int)GrayscalePixel(x - mask_size, y); Results[7] = (int)GrayscalePixel(x, y) - (int)GrayscalePixel(x + DiagonalMaskSize, y - DiagonalMaskSize); result_x = (DiagonalMaskSize*Results[1] + mask_size*Results[2] + DiagonalMaskSize*Results[3] - DiagonalMaskSize*Results[5] - mask_size*Results[6] + DiagonalMaskSize*Results[7]) / 256; result_y = (-mask_size*Results[0] - DiagonalMaskSize*Results[1] + DiagonalMaskSize*Results[3] + mask_size*Results[4] + DiagonalMaskSize*Results[5] - DiagonalMaskSize*Results[7]) / 256; } void MEImage::GradientVisualize(int vector_x, int vector_y) { if (vector_x <= 0) { printf("vectorx: wrong parameter (%d <= 0)\n", vector_x); return; } if (vector_y <= 0) { printf("vectory: wrong parameter (%d <= 0)\n", vector_y); return; } if (ME_CAST_TO_IPLIMAGE(cvImg)->nChannels > 1) { ConvertToGrayscale(g_OpenCV); } int masksize = (ME_CAST_TO_IPLIMAGE(cvImg)->width < ME_CAST_TO_IPLIMAGE(cvImg)->height) ? ME_CAST_TO_IPLIMAGE(cvImg)->width / (vector_x + 1) : ME_CAST_TO_IPLIMAGE(cvImg)->height / (vector_y + 1); SmoothAdvanced(s_Gaussian, masksize * 2 - 1); for (int i = 1; i < vector_x; i++) for (int i1 = 1; i1 < vector_y; i1++) { int Resultx = 0, Resulty = 0; int x = (int)(((float)ME_CAST_TO_IPLIMAGE(cvImg)->width*i / (vector_x))); int y = (int)(((float)ME_CAST_TO_IPLIMAGE(cvImg)->height*i1 / (vector_y))); GradientVector(false, x, y, (int)(0.707*masksize), Resultx, Resulty); CvPoint Point1; CvPoint Point2; Point1.x = x - Resultx / 2; Point1.y = y - Resulty / 2; Point2.x = x + Resultx / 2; Point2.y = y + Resulty / 2; cvLine(ME_CAST_TO_IPLIMAGE(cvImg), Point1, Point2, CV_RGB(255, 255, 255), 1, 8); } } bool MEImage::_Copy(const MEImage& other_image) { if (&other_image == this) return true; if (ME_CAST_TO_IPLIMAGE(cvImg)) { ME_RELEASE_IPLIMAGE(cvImg); } cvImg = cvCloneImage((IplImage*)other_image.GetIplImage()); return true; } void MEImage::_Init(int width, int height, int layers) { if (width < 1) { printf("Given width for the new image is too small (%d <= 0)\n", width); return; } if (height < 1) { printf("Given height for the new image is (%d <= 0)\n", height); return; } if ((layers != 1) && (layers != 3)) { printf("Only one or three (%d != 1 or 3) layer allowed\n", layers); return; } if (ME_CAST_TO_IPLIMAGE(cvImg)) { ME_RELEASE_IPLIMAGE(cvImg); } cvImg = cvCreateImage(cvSize(width, height), 8, layers); } void MEImage::ComputeColorSpace(ColorSpaceConvertType mode) { if (ME_CAST_TO_IPLIMAGE(cvImg)->nChannels != 3) { printf("Image has to have three color channels (%d != 3)\n", ME_CAST_TO_IPLIMAGE(cvImg)->nChannels); return; } IplImage* TempImg = cvCreateImage(cvSize(ME_CAST_TO_IPLIMAGE(cvImg)->width, ME_CAST_TO_IPLIMAGE(cvImg)->height), 8, ME_CAST_TO_IPLIMAGE(cvImg)->nChannels); for (int i = 0; i < 3; i++) for (int i1 = 0; i1 < 3; i1++) { if (mode == csc_RGBtoYUV) TransformMatrix[i][i1] = RGBtoYUVMatrix[i][i1]; if (mode == csc_RGBtoYIQ) TransformMatrix[i][i1] = RGBtoYIQMatrix[i][i1]; } float x = 0.0; float y = 0.0; float z = 0.0; float xmin = 0.0; float xmax = 0.0; float ymin = 0.0; float ymax = 0.0; float zmin = 0.0; float zmax = 0.0; if (mode == csc_RGBtoYUV) { xmin = 0.0; xmax = 255.0; ymin = -111.18; ymax = 111.18; zmin = -156.825; zmax = 156.825; } if (mode == csc_RGBtoYIQ) { xmin = 0.0; xmax = 255.0; ymin = -151.98; ymax = 151.98; zmin = -133.365; zmax = 133.365; } unsigned char* SrcData = (unsigned char*)ME_CAST_TO_IPLIMAGE(cvImg)->imageData; unsigned char* DstData = (unsigned char*)TempImg->imageData; for (int i = ME_CAST_TO_IPLIMAGE(cvImg)->widthStep*ME_CAST_TO_IPLIMAGE(cvImg)->height - 1; i >= 0; i -= 3) { x = (float)SrcData[i] * TransformMatrix[0][0] + (float)SrcData[i + 1] * TransformMatrix[0][1] + (float)SrcData[i + 2] * TransformMatrix[0][2]; y = (float)SrcData[i] * TransformMatrix[1][0] + (float)SrcData[i + 1] * TransformMatrix[1][1] + (float)SrcData[i + 2] * TransformMatrix[1][2]; z = (float)SrcData[i] * TransformMatrix[2][0] + (float)SrcData[i + 1] * TransformMatrix[2][1] + (float)SrcData[i + 2] * TransformMatrix[2][2]; x = xmax - xmin != 0.0 ? 255.0 : (x - xmin) / (xmax - xmin)*255.0; y = ymax - ymin != 0.0 ? 255.0 : (y - xmin) / (ymax - ymin)*255.0; z = zmax - zmin != 0.0 ? 255.0 : (z - xmin) / (zmax - zmin)*255.0; DstData[i] = (unsigned char)MEBound(0, (int)x, 255); DstData[i + 1] = (unsigned char)MEBound(0, (int)y, 255); DstData[i + 2] = (unsigned char)MEBound(0, (int)z, 255); } ME_RELEASE_IPLIMAGE(cvImg); cvImg = TempImg; } } } } #endif
27,438
2,690
# Copyright ClusterHQ Inc. See LICENSE file for details. """ Testing infrastructure for integration tests. """ from ..testtools import require_cluster, create_dataset from ...testtools import AsyncTestCase, random_name def make_dataset_integration_testcase(image_name, volume_path, internal_port, insert_data, assert_inserted): """ Create a ``TestCase`` that tests a particular container can successfully use Flocker datasets as volumes. :param unicode image_name: The image to run. :param FilePath volume_path: The path within the container where a volume should be mounted. :param int internal_port: The port the container listens on. :param insert_data: Callable that given test instance, host and port, connects using an appropriate client and inserts some data. Should return ``Deferred`` that fires on success. :param assert_inserted: Callable that given test instance, host and port asserts that data was inserted by ``insert_data``. Should return ``Deferred`` that fires on success. :return: ``TestCase`` subclass. """ class IntegrationTests(AsyncTestCase): """ Test that the given application can start and restart with Flocker datasets as volumes. """ def _start_container(self, name, dataset_id, external_port, cluster, cleanup=True): """ Start a container with a volume. :param unicode name: The container name. :param UUID dataset_id: The dataset ID. :param cluster: The ``Cluster``. :param int external_port: External port to expose on the container. :param bool cleanup: If true, delete container when test is over. :return: ``Deferred`` that fires when the container has been started. """ app = { u"name": name, u"node_uuid": cluster.nodes[0].uuid, u"image": image_name, u"ports": [{u"internal": internal_port, u"external": external_port}], u'restart_policy': {u'name': u'never'}, u"volumes": [{u"dataset_id": unicode(dataset_id), u"mountpoint": volume_path.path}], } created = cluster.create_container(app) if cleanup: created.addCallback(lambda _: self.addCleanup( cluster.remove_container, name)) return created # TODO: this test doesn't actually require the container agent, it just # uses it do to the setup. It should be ported to the docker API. @require_cluster(1, require_container_agent=True) def test_start(self, cluster): """ The specified application can be started with a Docker dataset configured as its volume. This ensures a newly created dataset meets the requirements of the application being tested. For example, some Docker containers can require a completely empty volume, or one that is writeable by non-root users, etc.. """ host = cluster.nodes[0].public_address port = 12345 creating_dataset = create_dataset(self, cluster) creating_dataset.addCallback( lambda dataset: self._start_container(random_name(self), dataset.dataset_id, port, cluster)) creating_dataset.addCallback( lambda _: insert_data(self, host, port)) creating_dataset.addCallback( lambda _: assert_inserted(self, host, port)) return creating_dataset # TODO: this test don't actually require the container agent, it just # uses it do to the setup. It should be ported to the docker API. @require_cluster(1, require_container_agent=True) def test_restart(self, cluster): """ The specified application can be started with a Docker dataset configured as its volume that has already been used by the same application previously. """ host = cluster.nodes[0].public_address port = 12345 another_port = 12366 first_container = random_name(self) creating_dataset = create_dataset(self, cluster) def created(dataset): started = self._start_container(first_container, dataset.dataset_id, port, cluster, cleanup=False) started.addCallback( lambda _: insert_data(self, host, port)) restarting = started.addCallback( lambda _: cluster.remove_container(first_container)) restarting.addCallback( lambda _: self._start_container(random_name(self), dataset.dataset_id, another_port, cluster)) return restarting creating_dataset.addCallback(created) creating_dataset.addCallback( lambda _: assert_inserted(self, host, another_port)) return creating_dataset return IntegrationTests
2,560
9,684
import os.path import pytest jinja2 = pytest.importorskip("jinja2") from dask.utils import format_bytes from dask.widgets import FILTERS, TEMPLATE_PATHS, get_environment, get_template @pytest.fixture(autouse=True) def setup_testing(): TEMPLATE_PATHS.append( os.path.join(os.path.dirname(os.path.abspath(__file__)), "templates") ) FILTERS["custom_filter"] = lambda x: "baz" def test_widgets(): template = get_template("example.html.j2") assert isinstance(template, jinja2.Template) rendered = template.render(foo="bar") assert "Hello bar" in rendered def test_environment(): environment = get_environment() assert isinstance(environment, jinja2.Environment) def test_unknown_template(): with pytest.raises(jinja2.TemplateNotFound) as e: get_template("does_not_exist.html.j2") # The error should contain all the registered template directories to help the user # understand where jinja2 is looking. Including the one we registered in the fixture. assert os.path.dirname(os.path.abspath(__file__)) in str(e) def test_filters(): template = get_template("bytes.html.j2") assert format_bytes in FILTERS.values() assert format_bytes(2e9) in template.render(foo=2e9) template = get_template("custom_filter.html.j2") assert "baz" in template.render(foo=None)
502
4,065
package com.zzhoujay.richtext.ig; import android.graphics.Bitmap; import android.graphics.BitmapFactory; import android.graphics.Movie; import com.zzhoujay.richtext.ImageHolder; import com.zzhoujay.richtext.drawable.GifDrawable; import com.zzhoujay.richtext.ext.ImageKit; import java.io.BufferedInputStream; import java.io.IOException; import java.io.InputStream; /** * Created by zhou on 2017/2/21. * 图片解析工具 */ abstract class SourceDecode<T> { static SourceDecode<byte[]> BASE64_SOURCE_DECODE = new SourceDecode<byte[]>() { @Override void decodeSize(byte[] bytes, BitmapFactory.Options options) { BitmapFactory.decodeByteArray(bytes, 0, bytes.length, options); } @Override public ImageWrapper decodeAsBitmap(byte[] bytes, BitmapFactory.Options options) { return ImageWrapper.createAsBitmap(BitmapFactory.decodeByteArray(bytes, 0, bytes.length, options)); } @Override ImageWrapper decodeAsGif(byte[] bytes, BitmapFactory.Options options) { return ImageWrapper.createAsGif(new GifDrawable(Movie.decodeByteArray(bytes, 0, bytes.length), options.outHeight, options.outWidth)); } @Override boolean isGif(byte[] bytes, BitmapFactory.Options options) { return ImageKit.isGif(bytes); } }; static SourceDecode<String> LOCAL_FILE_SOURCE_DECODE = new SourceDecode<String>() { @Override void decodeSize(String s, BitmapFactory.Options options) { BitmapFactory.decodeFile(s, options); } @Override public ImageWrapper decodeAsBitmap(String s, BitmapFactory.Options options) { return ImageWrapper.createAsBitmap(BitmapFactory.decodeFile(s, options)); } @Override ImageWrapper decodeAsGif(String s, BitmapFactory.Options options) { return ImageWrapper.createAsGif(new GifDrawable(Movie.decodeFile(s), options.outHeight, options.outWidth)); } @Override boolean isGif(String s, BitmapFactory.Options options) { return ImageKit.isGif(s); } }; static SourceDecode<InputStream> INPUT_STREAM_DECODE = new SourceDecode<InputStream>() { private static final int MARK_POSITION = 1024 * 1024; @Override void decodeSize(InputStream inputStream, BitmapFactory.Options options) { BufferedInputStream stream; if (inputStream instanceof BufferedInputStream) { stream = (BufferedInputStream) inputStream; } else { stream = new BufferedInputStream(inputStream); } if (options.inJustDecodeBounds) { stream.mark(MARK_POSITION); } BitmapFactory.decodeStream(stream, null, options); if (options.inJustDecodeBounds) { try { stream.reset(); } catch (IOException e) { e.printStackTrace(); } } } @Override public ImageWrapper decodeAsBitmap(InputStream inputStream, BitmapFactory.Options options) { BufferedInputStream stream; if (inputStream instanceof BufferedInputStream) { stream = (BufferedInputStream) inputStream; } else { stream = new BufferedInputStream(inputStream); } if (options.inJustDecodeBounds) { stream.mark(MARK_POSITION); } Bitmap bitmap = BitmapFactory.decodeStream(stream, null, options); if (options.inJustDecodeBounds) { try { stream.reset(); } catch (IOException e) { e.printStackTrace(); } } return ImageWrapper.createAsBitmap(bitmap); } @Override ImageWrapper decodeAsGif(InputStream inputStream, BitmapFactory.Options options) { return ImageWrapper.createAsGif(new GifDrawable(Movie.decodeStream(inputStream), options.outHeight, options.outWidth)); } @Override boolean isGif(InputStream inputStream, BitmapFactory.Options options) { return ImageKit.isGif(inputStream); } }; ImageWrapper decode(ImageHolder holder, T t, BitmapFactory.Options options) { if (holder.isAutoPlay() && (holder.isGif() || isGif(t, options))) { holder.setIsGif(true); return decodeAsGif(t, options); } else { return decodeAsBitmap(t, options); } } abstract boolean isGif(T t, BitmapFactory.Options options); abstract void decodeSize(T t, BitmapFactory.Options options); abstract ImageWrapper decodeAsBitmap(T t, BitmapFactory.Options options); abstract ImageWrapper decodeAsGif(T t, BitmapFactory.Options options); }
2,174
1,853
package org.dromara.hmily.config.consul; import com.google.common.net.HostAndPort; import com.orbitz.consul.Consul; import org.dromara.hmily.common.utils.StringUtils; import org.dromara.hmily.config.api.ConfigEnv; import org.dromara.hmily.config.api.ConfigScan; import org.dromara.hmily.config.api.entity.HmilyConfig; import org.dromara.hmily.config.api.event.EventConsumer; import org.dromara.hmily.config.api.event.ModifyData; import org.dromara.hmily.config.loader.ConfigLoader; import org.dromara.hmily.config.loader.ServerConfigLoader; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import sun.misc.IOUtils; import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.concurrent.ExecutionException; /** * @author lilang * @date 2020-09-23 23:39 **/ public class ConsulRealTest { private ConsulConfig config; private Consul client; private ConsulConfigLoader consulConfigLoader; @Before public void setUp() throws Exception { config = buildConfig(); client = Consul.builder().withHostAndPort(HostAndPort.fromString(config.getHostAndPort())).build().newClient(); consulConfigLoader = new ConsulConfigLoader(); } private Collection<HostAndPort> buildHostAndPortList(String hostAndPorts) { if (StringUtils.isNoneBlank(hostAndPorts)) { String[] hostAndPortArray = hostAndPorts.split(","); List<HostAndPort> hostAndPortList = new ArrayList<>(); for (String hostAndPort : hostAndPortArray) { hostAndPortList.add(HostAndPort.fromString(hostAndPort)); } return hostAndPortList; } else { return Collections.emptyList(); } } @Test public void testPull() throws IOException, ExecutionException, InterruptedException { InputStream resourceAsStream = getClass().getResourceAsStream("/hmily-consul.yml"); int available = resourceAsStream.available(); byte[] bytes = IOUtils.readFully(resourceAsStream, available, false); client.keyValueClient().putValue(config.getKey(), new String(bytes)); ConsulClient consulClient = ConsulClient.getInstance(buildConfig()); InputStream is = consulClient.pull(config); byte[] remoteConfig = IOUtils.readFully(is, available, false); Assert.assertArrayEquals(bytes, remoteConfig); client.keyValueClient().deleteKey(config.getKey()); } @Test public void testLoad() throws InterruptedException, IOException, ExecutionException { ConfigScan.scan(); ConfigEnv.getInstance().addEvent(new EventConsumer<ModifyData>() { @Override public void accept(ModifyData data) { System.out.println(data); } @Override public String regex() { return "hmily.config.*"; } }); ServerConfigLoader loader = new ServerConfigLoader(); loader.load(ConfigLoader.Context::new, ((context, config1) -> { System.out.println("config:---->" + config1); if (config1 != null) { consulConfigLoader.load(context, (context1, cfg) -> { System.out.println("config: ==> " + cfg); }); } })); Thread.sleep(5000); changeRemoteData(); } private void changeRemoteData() throws IOException, ExecutionException, InterruptedException { InputStream resourceAsStream = getClass().getResourceAsStream("/hmily-consul-update.yml"); int available = resourceAsStream.available(); byte[] bytes = IOUtils.readFully(resourceAsStream, available, false); client.keyValueClient().putValue(config.getKey(), new String(bytes)); try { Thread.sleep(1000); } catch (InterruptedException e) { e.printStackTrace(); } Assert.assertEquals("xiaoyu1", ConfigEnv.getInstance().getConfig(HmilyConfig.class).getAppName()); Assert.assertEquals("kryo1", ConfigEnv.getInstance().getConfig(HmilyConfig.class).getSerializer()); Assert.assertEquals("threadLocal1", ConfigEnv.getInstance().getConfig(HmilyConfig.class).getContextTransmittalMode()); resourceAsStream = getClass().getResourceAsStream("/hmily-consul.yml"); available = resourceAsStream.available(); bytes = IOUtils.readFully(resourceAsStream, available, false); client.keyValueClient().putValue(config.getKey(), new String(bytes)); try { Thread.sleep(1000); } catch (InterruptedException e) { e.printStackTrace(); } Assert.assertEquals("xiaoyu", ConfigEnv.getInstance().getConfig(HmilyConfig.class).getAppName()); Assert.assertEquals("kryo", ConfigEnv.getInstance().getConfig(HmilyConfig.class).getSerializer()); Assert.assertEquals("threadLocal", ConfigEnv.getInstance().getConfig(HmilyConfig.class).getContextTransmittalMode()); client.keyValueClient().deleteKey(config.getKey()); // 删除 目前相当于无操作,当前生效的还是删除前的配置 try { Thread.sleep(1000); } catch (InterruptedException e) { e.printStackTrace(); } Assert.assertEquals("xiaoyu", ConfigEnv.getInstance().getConfig(HmilyConfig.class).getAppName()); Assert.assertEquals("kryo", ConfigEnv.getInstance().getConfig(HmilyConfig.class).getSerializer()); Assert.assertEquals("threadLocal", ConfigEnv.getInstance().getConfig(HmilyConfig.class).getContextTransmittalMode()); // test add after delete resourceAsStream = getClass().getResourceAsStream("/hmily-consul-update.yml"); available = resourceAsStream.available(); bytes = IOUtils.readFully(resourceAsStream, available, false); client.keyValueClient().putValue(config.getKey(), new String(bytes)); try { Thread.sleep(1000); } catch (InterruptedException e) { e.printStackTrace(); } Assert.assertEquals("xiaoyu1", ConfigEnv.getInstance().getConfig(HmilyConfig.class).getAppName()); Assert.assertEquals("kryo1", ConfigEnv.getInstance().getConfig(HmilyConfig.class).getSerializer()); Assert.assertEquals("threadLocal1", ConfigEnv.getInstance().getConfig(HmilyConfig.class).getContextTransmittalMode()); } private ConsulConfig buildConfig() { ConsulConfig consulConfig = new ConsulConfig(); consulConfig.setHostAndPort("localhost:8500"); consulConfig.setKey("test"); consulConfig.setFileExtension("yml"); return consulConfig; } }
2,733
678
/** * This header is generated by class-dump-z 0.2b. * * Source: /System/Library/PrivateFrameworks/IMCore.framework/Frameworks/IMDAppleServices.framework/IMDAppleServices */ @protocol IMDAppleEmailInterfaceListener @optional - (void)center:(id)center foundEmail:(id)email vettingToken:(id)token forRegistrationInfo:(id)registrationInfo; @end
103
23,901
<gh_stars>1000+ # coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Random Controller that proposes random topologies.""" import pyglove as pg from es_enas.controllers import base_controller class RandomController(base_controller.BaseController): """Random Search Controller.""" def __init__(self, dna_spec, batch_size, **kwargs): """Initialization. See base class for more details.""" super().__init__(dna_spec, batch_size) del kwargs def propose_dna(self): """Proposes a topology dna using stored template. Args: None. Returns: dna: A proposed dna. """ return pg.random_dna(self._dna_spec) def collect_rewards_and_train(self, reward_vector, dna_list): """Collects rewards and sends them to the replay buffer. Args: reward_vector: list of reward floats. dna_list: list of dna's from the proposal function. Returns: None. """ del reward_vector del dna_list pass
502
850
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function import tensorflow as tf import tensorflow.contrib.slim as slim class ResNetPytorchBackbone(object): def __init__(self, cfgs): self.cfgs = cfgs self._weights_dict = {} self.freeze_blocks_node_index = {'resnext50_32x4d': 356, 'resnet50': 356, 'resnext101_32x8d': 662, 'resnet34': 243} self.is_training = False self.scope_name = 'resnet50' def load_weights(self, weight_file): import numpy as np if weight_file is None: return try: weights_dict = np.load(weight_file, allow_pickle=True).item() except: weights_dict = np.load(weight_file, allow_pickle=True, encoding='bytes').item() return weights_dict def resnext50_32x4d(self, inputs, weight_file=None): feature_dict = {} self._weights_dict = self.load_weights(weight_file) # inputs = tf.placeholder(tf.float32, shape=(None, 224, 224, 3), name='inputs') node321_pad = tf.pad(inputs, paddings=[[0, 0], [3, 3], [3, 3], [0, 0]]) node321 = self.convolution(node321_pad, group=1, strides=[2, 2], padding='VALID', name='node321') node322 = self.batch_normalization(node321, variance_epsilon=9.999999747378752e-06, name='node322') node323 = tf.nn.relu(node322, name='node323') node324_pad = tf.pad(node323, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]], constant_values=float('-Inf')) node324 = tf.nn.max_pool(node324_pad, [1, 3, 3, 1], [1, 2, 2, 1], padding='VALID', name='node324') feature_dict["C1"] = node324 node325 = self.convolution(node324, group=1, strides=[1, 1], padding='VALID', name='node325') node333 = self.convolution(node324, group=1, strides=[1, 1], padding='VALID', name='node333') node326 = self.batch_normalization(node325, variance_epsilon=9.999999747378752e-06, name='node326') node334 = self.batch_normalization(node333, variance_epsilon=9.999999747378752e-06, name='node334') node327 = tf.nn.relu(node326, name='node327') node328_pad = tf.pad(node327, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node328 = self.convolution(node328_pad, group=32, strides=[1, 1], padding='VALID', name='node328') node329 = self.batch_normalization(node328, variance_epsilon=9.999999747378752e-06, name='node329') node330 = tf.nn.relu(node329, name='node330') node331 = self.convolution(node330, group=1, strides=[1, 1], padding='VALID', name='node331') node332 = self.batch_normalization(node331, variance_epsilon=9.999999747378752e-06, name='node332') node335 = node332 + node334 node336 = tf.nn.relu(node335, name='node336') node337 = self.convolution(node336, group=1, strides=[1, 1], padding='VALID', name='node337') node338 = self.batch_normalization(node337, variance_epsilon=9.999999747378752e-06, name='node338') node339 = tf.nn.relu(node338, name='node339') node340_pad = tf.pad(node339, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node340 = self.convolution(node340_pad, group=32, strides=[1, 1], padding='VALID', name='node340') node341 = self.batch_normalization(node340, variance_epsilon=9.999999747378752e-06, name='node341') node342 = tf.nn.relu(node341, name='node342') node343 = self.convolution(node342, group=1, strides=[1, 1], padding='VALID', name='node343') node344 = self.batch_normalization(node343, variance_epsilon=9.999999747378752e-06, name='node344') node345 = node344 + node336 node346 = tf.nn.relu(node345, name='node346') node347 = self.convolution(node346, group=1, strides=[1, 1], padding='VALID', name='node347') node348 = self.batch_normalization(node347, variance_epsilon=9.999999747378752e-06, name='node348') node349 = tf.nn.relu(node348, name='node349') node350_pad = tf.pad(node349, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node350 = self.convolution(node350_pad, group=32, strides=[1, 1], padding='VALID', name='node350') node351 = self.batch_normalization(node350, variance_epsilon=9.999999747378752e-06, name='node351') node352 = tf.nn.relu(node351, name='node352') node353 = self.convolution(node352, group=1, strides=[1, 1], padding='VALID', name='node353') node354 = self.batch_normalization(node353, variance_epsilon=9.999999747378752e-06, name='node354') node355 = node354 + node346 node356 = tf.nn.relu(node355, name='node356') feature_dict['C2'] = node356 node357 = self.convolution(node356, group=1, strides=[1, 1], padding='VALID', name='node357') node365 = self.convolution(node356, group=1, strides=[2, 2], padding='VALID', name='node365') node358 = self.batch_normalization(node357, variance_epsilon=9.999999747378752e-06, name='node358') node366 = self.batch_normalization(node365, variance_epsilon=9.999999747378752e-06, name='node366') node359 = tf.nn.relu(node358, name='node359') node360_pad = tf.pad(node359, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node360 = self.convolution(node360_pad, group=32, strides=[2, 2], padding='VALID', name='node360') node361 = self.batch_normalization(node360, variance_epsilon=9.999999747378752e-06, name='node361') node362 = tf.nn.relu(node361, name='node362') node363 = self.convolution(node362, group=1, strides=[1, 1], padding='VALID', name='node363') node364 = self.batch_normalization(node363, variance_epsilon=9.999999747378752e-06, name='node364') node367 = node364 + node366 node368 = tf.nn.relu(node367, name='node368') node369 = self.convolution(node368, group=1, strides=[1, 1], padding='VALID', name='node369') node370 = self.batch_normalization(node369, variance_epsilon=9.999999747378752e-06, name='node370') node371 = tf.nn.relu(node370, name='node371') node372_pad = tf.pad(node371, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node372 = self.convolution(node372_pad, group=32, strides=[1, 1], padding='VALID', name='node372') node373 = self.batch_normalization(node372, variance_epsilon=9.999999747378752e-06, name='node373') node374 = tf.nn.relu(node373, name='node374') node375 = self.convolution(node374, group=1, strides=[1, 1], padding='VALID', name='node375') node376 = self.batch_normalization(node375, variance_epsilon=9.999999747378752e-06, name='node376') node377 = node376 + node368 node378 = tf.nn.relu(node377, name='node378') node379 = self.convolution(node378, group=1, strides=[1, 1], padding='VALID', name='node379') node380 = self.batch_normalization(node379, variance_epsilon=9.999999747378752e-06, name='node380') node381 = tf.nn.relu(node380, name='node381') node382_pad = tf.pad(node381, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node382 = self.convolution(node382_pad, group=32, strides=[1, 1], padding='VALID', name='node382') node383 = self.batch_normalization(node382, variance_epsilon=9.999999747378752e-06, name='node383') node384 = tf.nn.relu(node383, name='node384') node385 = self.convolution(node384, group=1, strides=[1, 1], padding='VALID', name='node385') node386 = self.batch_normalization(node385, variance_epsilon=9.999999747378752e-06, name='node386') node387 = node386 + node378 node388 = tf.nn.relu(node387, name='node388') node389 = self.convolution(node388, group=1, strides=[1, 1], padding='VALID', name='node389') node390 = self.batch_normalization(node389, variance_epsilon=9.999999747378752e-06, name='node390') node391 = tf.nn.relu(node390, name='node391') node392_pad = tf.pad(node391, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node392 = self.convolution(node392_pad, group=32, strides=[1, 1], padding='VALID', name='node392') node393 = self.batch_normalization(node392, variance_epsilon=9.999999747378752e-06, name='node393') node394 = tf.nn.relu(node393, name='node394') node395 = self.convolution(node394, group=1, strides=[1, 1], padding='VALID', name='node395') node396 = self.batch_normalization(node395, variance_epsilon=9.999999747378752e-06, name='node396') node397 = node396 + node388 node398 = tf.nn.relu(node397, name='node398') feature_dict['C3'] = node398 node399 = self.convolution(node398, group=1, strides=[1, 1], padding='VALID', name='node399') node407 = self.convolution(node398, group=1, strides=[2, 2], padding='VALID', name='node407') node400 = self.batch_normalization(node399, variance_epsilon=9.999999747378752e-06, name='node400') node408 = self.batch_normalization(node407, variance_epsilon=9.999999747378752e-06, name='node408') node401 = tf.nn.relu(node400, name='node401') node402_pad = tf.pad(node401, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node402 = self.convolution(node402_pad, group=32, strides=[2, 2], padding='VALID', name='node402') node403 = self.batch_normalization(node402, variance_epsilon=9.999999747378752e-06, name='node403') node404 = tf.nn.relu(node403, name='node404') node405 = self.convolution(node404, group=1, strides=[1, 1], padding='VALID', name='node405') node406 = self.batch_normalization(node405, variance_epsilon=9.999999747378752e-06, name='node406') node409 = node406 + node408 node410 = tf.nn.relu(node409, name='node410') node411 = self.convolution(node410, group=1, strides=[1, 1], padding='VALID', name='node411') node412 = self.batch_normalization(node411, variance_epsilon=9.999999747378752e-06, name='node412') node413 = tf.nn.relu(node412, name='node413') node414_pad = tf.pad(node413, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node414 = self.convolution(node414_pad, group=32, strides=[1, 1], padding='VALID', name='node414') node415 = self.batch_normalization(node414, variance_epsilon=9.999999747378752e-06, name='node415') node416 = tf.nn.relu(node415, name='node416') node417 = self.convolution(node416, group=1, strides=[1, 1], padding='VALID', name='node417') node418 = self.batch_normalization(node417, variance_epsilon=9.999999747378752e-06, name='node418') node419 = node418 + node410 node420 = tf.nn.relu(node419, name='node420') node421 = self.convolution(node420, group=1, strides=[1, 1], padding='VALID', name='node421') node422 = self.batch_normalization(node421, variance_epsilon=9.999999747378752e-06, name='node422') node423 = tf.nn.relu(node422, name='node423') node424_pad = tf.pad(node423, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node424 = self.convolution(node424_pad, group=32, strides=[1, 1], padding='VALID', name='node424') node425 = self.batch_normalization(node424, variance_epsilon=9.999999747378752e-06, name='node425') node426 = tf.nn.relu(node425, name='node426') node427 = self.convolution(node426, group=1, strides=[1, 1], padding='VALID', name='node427') node428 = self.batch_normalization(node427, variance_epsilon=9.999999747378752e-06, name='node428') node429 = node428 + node420 node430 = tf.nn.relu(node429, name='node430') node431 = self.convolution(node430, group=1, strides=[1, 1], padding='VALID', name='node431') node432 = self.batch_normalization(node431, variance_epsilon=9.999999747378752e-06, name='node432') node433 = tf.nn.relu(node432, name='node433') node434_pad = tf.pad(node433, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node434 = self.convolution(node434_pad, group=32, strides=[1, 1], padding='VALID', name='node434') node435 = self.batch_normalization(node434, variance_epsilon=9.999999747378752e-06, name='node435') node436 = tf.nn.relu(node435, name='node436') node437 = self.convolution(node436, group=1, strides=[1, 1], padding='VALID', name='node437') node438 = self.batch_normalization(node437, variance_epsilon=9.999999747378752e-06, name='node438') node439 = node438 + node430 node440 = tf.nn.relu(node439, name='node440') node441 = self.convolution(node440, group=1, strides=[1, 1], padding='VALID', name='node441') node442 = self.batch_normalization(node441, variance_epsilon=9.999999747378752e-06, name='node442') node443 = tf.nn.relu(node442, name='node443') node444_pad = tf.pad(node443, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node444 = self.convolution(node444_pad, group=32, strides=[1, 1], padding='VALID', name='node444') node445 = self.batch_normalization(node444, variance_epsilon=9.999999747378752e-06, name='node445') node446 = tf.nn.relu(node445, name='node446') node447 = self.convolution(node446, group=1, strides=[1, 1], padding='VALID', name='node447') node448 = self.batch_normalization(node447, variance_epsilon=9.999999747378752e-06, name='node448') node449 = node448 + node440 node450 = tf.nn.relu(node449, name='node450') node451 = self.convolution(node450, group=1, strides=[1, 1], padding='VALID', name='node451') node452 = self.batch_normalization(node451, variance_epsilon=9.999999747378752e-06, name='node452') node453 = tf.nn.relu(node452, name='node453') node454_pad = tf.pad(node453, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node454 = self.convolution(node454_pad, group=32, strides=[1, 1], padding='VALID', name='node454') node455 = self.batch_normalization(node454, variance_epsilon=9.999999747378752e-06, name='node455') node456 = tf.nn.relu(node455, name='node456') node457 = self.convolution(node456, group=1, strides=[1, 1], padding='VALID', name='node457') node458 = self.batch_normalization(node457, variance_epsilon=9.999999747378752e-06, name='node458') node459 = node458 + node450 node460 = tf.nn.relu(node459, name='node460') feature_dict['C4'] = node460 node461 = self.convolution(node460, group=1, strides=[1, 1], padding='VALID', name='node461') node469 = self.convolution(node460, group=1, strides=[2, 2], padding='VALID', name='node469') node462 = self.batch_normalization(node461, variance_epsilon=9.999999747378752e-06, name='node462') node470 = self.batch_normalization(node469, variance_epsilon=9.999999747378752e-06, name='node470') node463 = tf.nn.relu(node462, name='node463') node464_pad = tf.pad(node463, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node464 = self.convolution(node464_pad, group=32, strides=[2, 2], padding='VALID', name='node464') node465 = self.batch_normalization(node464, variance_epsilon=9.999999747378752e-06, name='node465') node466 = tf.nn.relu(node465, name='node466') node467 = self.convolution(node466, group=1, strides=[1, 1], padding='VALID', name='node467') node468 = self.batch_normalization(node467, variance_epsilon=9.999999747378752e-06, name='node468') node471 = node468 + node470 node472 = tf.nn.relu(node471, name='node472') node473 = self.convolution(node472, group=1, strides=[1, 1], padding='VALID', name='node473') node474 = self.batch_normalization(node473, variance_epsilon=9.999999747378752e-06, name='node474') node475 = tf.nn.relu(node474, name='node475') node476_pad = tf.pad(node475, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node476 = self.convolution(node476_pad, group=32, strides=[1, 1], padding='VALID', name='node476') node477 = self.batch_normalization(node476, variance_epsilon=9.999999747378752e-06, name='node477') node478 = tf.nn.relu(node477, name='node478') node479 = self.convolution(node478, group=1, strides=[1, 1], padding='VALID', name='node479') node480 = self.batch_normalization(node479, variance_epsilon=9.999999747378752e-06, name='node480') node481 = node480 + node472 node482 = tf.nn.relu(node481, name='node482') node483 = self.convolution(node482, group=1, strides=[1, 1], padding='VALID', name='node483') node484 = self.batch_normalization(node483, variance_epsilon=9.999999747378752e-06, name='node484') node485 = tf.nn.relu(node484, name='node485') node486_pad = tf.pad(node485, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node486 = self.convolution(node486_pad, group=32, strides=[1, 1], padding='VALID', name='node486') node487 = self.batch_normalization(node486, variance_epsilon=9.999999747378752e-06, name='node487') node488 = tf.nn.relu(node487, name='node488') node489 = self.convolution(node488, group=1, strides=[1, 1], padding='VALID', name='node489') node490 = self.batch_normalization(node489, variance_epsilon=9.999999747378752e-06, name='node490') node491 = node490 + node482 node492 = tf.nn.relu(node491, name='node492') feature_dict['C5'] = node492 # node493 = tf.nn.avg_pool(node492, [1, 7, 7, 1], [1, 1, 1, 1], padding='VALID', name='node493') # node494 = tf.contrib.layers.flatten(node493) # node495_flatten = tf.contrib.layers.flatten(node494) # node495 = tf.layers.dense(node495_flatten, 1000, # kernel_initializer=tf.constant_initializer(_weights_dict['node495']['weights']), # bias_initializer=tf.constant_initializer(_weights_dict['node495']['bias']), # use_bias=True) return feature_dict def resnext101_32x8d(self, inputs, weight_file=None): feature_dict = {} global _weights_dict self._weights_dict = self.load_weights(weight_file) # inputs = tf.placeholder(tf.float32, shape=(None, 224, 224, 3), name='inputs') node627_pad = tf.pad(inputs, paddings=[[0, 0], [3, 3], [3, 3], [0, 0]]) node627 = self.convolution(node627_pad, group=1, strides=[2, 2], padding='VALID', name='node627') node628 = self.batch_normalization(node627, variance_epsilon=9.999999747378752e-06, name='node628') node629 = tf.nn.relu(node628, name='node629') node630_pad = tf.pad(node629, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]], constant_values=float('-Inf')) node630 = tf.nn.max_pool(node630_pad, [1, 3, 3, 1], [1, 2, 2, 1], padding='VALID', name='node630') feature_dict['C1'] = node630 node631 = self.convolution(node630, group=1, strides=[1, 1], padding='VALID', name='node631') node639 = self.convolution(node630, group=1, strides=[1, 1], padding='VALID', name='node639') node632 = self.batch_normalization(node631, variance_epsilon=9.999999747378752e-06, name='node632') node640 = self.batch_normalization(node639, variance_epsilon=9.999999747378752e-06, name='node640') node633 = tf.nn.relu(node632, name='node633') node634_pad = tf.pad(node633, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node634 = self.convolution(node634_pad, group=32, strides=[1, 1], padding='VALID', name='node634') node635 = self.batch_normalization(node634, variance_epsilon=9.999999747378752e-06, name='node635') node636 = tf.nn.relu(node635, name='node636') node637 = self.convolution(node636, group=1, strides=[1, 1], padding='VALID', name='node637') node638 = self.batch_normalization(node637, variance_epsilon=9.999999747378752e-06, name='node638') node641 = node638 + node640 node642 = tf.nn.relu(node641, name='node642') node643 = self.convolution(node642, group=1, strides=[1, 1], padding='VALID', name='node643') node644 = self.batch_normalization(node643, variance_epsilon=9.999999747378752e-06, name='node644') node645 = tf.nn.relu(node644, name='node645') node646_pad = tf.pad(node645, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node646 = self.convolution(node646_pad, group=32, strides=[1, 1], padding='VALID', name='node646') node647 = self.batch_normalization(node646, variance_epsilon=9.999999747378752e-06, name='node647') node648 = tf.nn.relu(node647, name='node648') node649 = self.convolution(node648, group=1, strides=[1, 1], padding='VALID', name='node649') node650 = self.batch_normalization(node649, variance_epsilon=9.999999747378752e-06, name='node650') node651 = node650 + node642 node652 = tf.nn.relu(node651, name='node652') node653 = self.convolution(node652, group=1, strides=[1, 1], padding='VALID', name='node653') node654 = self.batch_normalization(node653, variance_epsilon=9.999999747378752e-06, name='node654') node655 = tf.nn.relu(node654, name='node655') node656_pad = tf.pad(node655, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node656 = self.convolution(node656_pad, group=32, strides=[1, 1], padding='VALID', name='node656') node657 = self.batch_normalization(node656, variance_epsilon=9.999999747378752e-06, name='node657') node658 = tf.nn.relu(node657, name='node658') node659 = self.convolution(node658, group=1, strides=[1, 1], padding='VALID', name='node659') node660 = self.batch_normalization(node659, variance_epsilon=9.999999747378752e-06, name='node660') node661 = node660 + node652 node662 = tf.nn.relu(node661, name='node662') feature_dict['C2'] = node662 node663 = self.convolution(node662, group=1, strides=[1, 1], padding='VALID', name='node663') node671 = self.convolution(node662, group=1, strides=[2, 2], padding='VALID', name='node671') node664 = self.batch_normalization(node663, variance_epsilon=9.999999747378752e-06, name='node664') node672 = self.batch_normalization(node671, variance_epsilon=9.999999747378752e-06, name='node672') node665 = tf.nn.relu(node664, name='node665') node666_pad = tf.pad(node665, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node666 = self.convolution(node666_pad, group=32, strides=[2, 2], padding='VALID', name='node666') node667 = self.batch_normalization(node666, variance_epsilon=9.999999747378752e-06, name='node667') node668 = tf.nn.relu(node667, name='node668') node669 = self.convolution(node668, group=1, strides=[1, 1], padding='VALID', name='node669') node670 = self.batch_normalization(node669, variance_epsilon=9.999999747378752e-06, name='node670') node673 = node670 + node672 node674 = tf.nn.relu(node673, name='node674') node675 = self.convolution(node674, group=1, strides=[1, 1], padding='VALID', name='node675') node676 = self.batch_normalization(node675, variance_epsilon=9.999999747378752e-06, name='node676') node677 = tf.nn.relu(node676, name='node677') node678_pad = tf.pad(node677, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node678 = self.convolution(node678_pad, group=32, strides=[1, 1], padding='VALID', name='node678') node679 = self.batch_normalization(node678, variance_epsilon=9.999999747378752e-06, name='node679') node680 = tf.nn.relu(node679, name='node680') node681 = self.convolution(node680, group=1, strides=[1, 1], padding='VALID', name='node681') node682 = self.batch_normalization(node681, variance_epsilon=9.999999747378752e-06, name='node682') node683 = node682 + node674 node684 = tf.nn.relu(node683, name='node684') node685 = self.convolution(node684, group=1, strides=[1, 1], padding='VALID', name='node685') node686 = self.batch_normalization(node685, variance_epsilon=9.999999747378752e-06, name='node686') node687 = tf.nn.relu(node686, name='node687') node688_pad = tf.pad(node687, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node688 = self.convolution(node688_pad, group=32, strides=[1, 1], padding='VALID', name='node688') node689 = self.batch_normalization(node688, variance_epsilon=9.999999747378752e-06, name='node689') node690 = tf.nn.relu(node689, name='node690') node691 = self.convolution(node690, group=1, strides=[1, 1], padding='VALID', name='node691') node692 = self.batch_normalization(node691, variance_epsilon=9.999999747378752e-06, name='node692') node693 = node692 + node684 node694 = tf.nn.relu(node693, name='node694') node695 = self.convolution(node694, group=1, strides=[1, 1], padding='VALID', name='node695') node696 = self.batch_normalization(node695, variance_epsilon=9.999999747378752e-06, name='node696') node697 = tf.nn.relu(node696, name='node697') node698_pad = tf.pad(node697, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node698 = self.convolution(node698_pad, group=32, strides=[1, 1], padding='VALID', name='node698') node699 = self.batch_normalization(node698, variance_epsilon=9.999999747378752e-06, name='node699') node700 = tf.nn.relu(node699, name='node700') node701 = self.convolution(node700, group=1, strides=[1, 1], padding='VALID', name='node701') node702 = self.batch_normalization(node701, variance_epsilon=9.999999747378752e-06, name='node702') node703 = node702 + node694 node704 = tf.nn.relu(node703, name='node704') feature_dict['C3'] = node704 node705 = self.convolution(node704, group=1, strides=[1, 1], padding='VALID', name='node705') node713 = self.convolution(node704, group=1, strides=[2, 2], padding='VALID', name='node713') node706 = self.batch_normalization(node705, variance_epsilon=9.999999747378752e-06, name='node706') node714 = self.batch_normalization(node713, variance_epsilon=9.999999747378752e-06, name='node714') node707 = tf.nn.relu(node706, name='node707') node708_pad = tf.pad(node707, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node708 = self.convolution(node708_pad, group=32, strides=[2, 2], padding='VALID', name='node708') node709 = self.batch_normalization(node708, variance_epsilon=9.999999747378752e-06, name='node709') node710 = tf.nn.relu(node709, name='node710') node711 = self.convolution(node710, group=1, strides=[1, 1], padding='VALID', name='node711') node712 = self.batch_normalization(node711, variance_epsilon=9.999999747378752e-06, name='node712') node715 = node712 + node714 node716 = tf.nn.relu(node715, name='node716') node717 = self.convolution(node716, group=1, strides=[1, 1], padding='VALID', name='node717') node718 = self.batch_normalization(node717, variance_epsilon=9.999999747378752e-06, name='node718') node719 = tf.nn.relu(node718, name='node719') node720_pad = tf.pad(node719, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node720 = self.convolution(node720_pad, group=32, strides=[1, 1], padding='VALID', name='node720') node721 = self.batch_normalization(node720, variance_epsilon=9.999999747378752e-06, name='node721') node722 = tf.nn.relu(node721, name='node722') node723 = self.convolution(node722, group=1, strides=[1, 1], padding='VALID', name='node723') node724 = self.batch_normalization(node723, variance_epsilon=9.999999747378752e-06, name='node724') node725 = node724 + node716 node726 = tf.nn.relu(node725, name='node726') node727 = self.convolution(node726, group=1, strides=[1, 1], padding='VALID', name='node727') node728 = self.batch_normalization(node727, variance_epsilon=9.999999747378752e-06, name='node728') node729 = tf.nn.relu(node728, name='node729') node730_pad = tf.pad(node729, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node730 = self.convolution(node730_pad, group=32, strides=[1, 1], padding='VALID', name='node730') node731 = self.batch_normalization(node730, variance_epsilon=9.999999747378752e-06, name='node731') node732 = tf.nn.relu(node731, name='node732') node733 = self.convolution(node732, group=1, strides=[1, 1], padding='VALID', name='node733') node734 = self.batch_normalization(node733, variance_epsilon=9.999999747378752e-06, name='node734') node735 = node734 + node726 node736 = tf.nn.relu(node735, name='node736') node737 = self.convolution(node736, group=1, strides=[1, 1], padding='VALID', name='node737') node738 = self.batch_normalization(node737, variance_epsilon=9.999999747378752e-06, name='node738') node739 = tf.nn.relu(node738, name='node739') node740_pad = tf.pad(node739, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node740 = self.convolution(node740_pad, group=32, strides=[1, 1], padding='VALID', name='node740') node741 = self.batch_normalization(node740, variance_epsilon=9.999999747378752e-06, name='node741') node742 = tf.nn.relu(node741, name='node742') node743 = self.convolution(node742, group=1, strides=[1, 1], padding='VALID', name='node743') node744 = self.batch_normalization(node743, variance_epsilon=9.999999747378752e-06, name='node744') node745 = node744 + node736 node746 = tf.nn.relu(node745, name='node746') node747 = self.convolution(node746, group=1, strides=[1, 1], padding='VALID', name='node747') node748 = self.batch_normalization(node747, variance_epsilon=9.999999747378752e-06, name='node748') node749 = tf.nn.relu(node748, name='node749') node750_pad = tf.pad(node749, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node750 = self.convolution(node750_pad, group=32, strides=[1, 1], padding='VALID', name='node750') node751 = self.batch_normalization(node750, variance_epsilon=9.999999747378752e-06, name='node751') node752 = tf.nn.relu(node751, name='node752') node753 = self.convolution(node752, group=1, strides=[1, 1], padding='VALID', name='node753') node754 = self.batch_normalization(node753, variance_epsilon=9.999999747378752e-06, name='node754') node755 = node754 + node746 node756 = tf.nn.relu(node755, name='node756') node757 = self.convolution(node756, group=1, strides=[1, 1], padding='VALID', name='node757') node758 = self.batch_normalization(node757, variance_epsilon=9.999999747378752e-06, name='node758') node759 = tf.nn.relu(node758, name='node759') node760_pad = tf.pad(node759, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node760 = self.convolution(node760_pad, group=32, strides=[1, 1], padding='VALID', name='node760') node761 = self.batch_normalization(node760, variance_epsilon=9.999999747378752e-06, name='node761') node762 = tf.nn.relu(node761, name='node762') node763 = self.convolution(node762, group=1, strides=[1, 1], padding='VALID', name='node763') node764 = self.batch_normalization(node763, variance_epsilon=9.999999747378752e-06, name='node764') node765 = node764 + node756 node766 = tf.nn.relu(node765, name='node766') node767 = self.convolution(node766, group=1, strides=[1, 1], padding='VALID', name='node767') node768 = self.batch_normalization(node767, variance_epsilon=9.999999747378752e-06, name='node768') node769 = tf.nn.relu(node768, name='node769') node770_pad = tf.pad(node769, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node770 = self.convolution(node770_pad, group=32, strides=[1, 1], padding='VALID', name='node770') node771 = self.batch_normalization(node770, variance_epsilon=9.999999747378752e-06, name='node771') node772 = tf.nn.relu(node771, name='node772') node773 = self.convolution(node772, group=1, strides=[1, 1], padding='VALID', name='node773') node774 = self.batch_normalization(node773, variance_epsilon=9.999999747378752e-06, name='node774') node775 = node774 + node766 node776 = tf.nn.relu(node775, name='node776') node777 = self.convolution(node776, group=1, strides=[1, 1], padding='VALID', name='node777') node778 = self.batch_normalization(node777, variance_epsilon=9.999999747378752e-06, name='node778') node779 = tf.nn.relu(node778, name='node779') node780_pad = tf.pad(node779, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node780 = self.convolution(node780_pad, group=32, strides=[1, 1], padding='VALID', name='node780') node781 = self.batch_normalization(node780, variance_epsilon=9.999999747378752e-06, name='node781') node782 = tf.nn.relu(node781, name='node782') node783 = self.convolution(node782, group=1, strides=[1, 1], padding='VALID', name='node783') node784 = self.batch_normalization(node783, variance_epsilon=9.999999747378752e-06, name='node784') node785 = node784 + node776 node786 = tf.nn.relu(node785, name='node786') node787 = self.convolution(node786, group=1, strides=[1, 1], padding='VALID', name='node787') node788 = self.batch_normalization(node787, variance_epsilon=9.999999747378752e-06, name='node788') node789 = tf.nn.relu(node788, name='node789') node790_pad = tf.pad(node789, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node790 = self.convolution(node790_pad, group=32, strides=[1, 1], padding='VALID', name='node790') node791 = self.batch_normalization(node790, variance_epsilon=9.999999747378752e-06, name='node791') node792 = tf.nn.relu(node791, name='node792') node793 = self.convolution(node792, group=1, strides=[1, 1], padding='VALID', name='node793') node794 = self.batch_normalization(node793, variance_epsilon=9.999999747378752e-06, name='node794') node795 = node794 + node786 node796 = tf.nn.relu(node795, name='node796') node797 = self.convolution(node796, group=1, strides=[1, 1], padding='VALID', name='node797') node798 = self.batch_normalization(node797, variance_epsilon=9.999999747378752e-06, name='node798') node799 = tf.nn.relu(node798, name='node799') node800_pad = tf.pad(node799, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node800 = self.convolution(node800_pad, group=32, strides=[1, 1], padding='VALID', name='node800') node801 = self.batch_normalization(node800, variance_epsilon=9.999999747378752e-06, name='node801') node802 = tf.nn.relu(node801, name='node802') node803 = self.convolution(node802, group=1, strides=[1, 1], padding='VALID', name='node803') node804 = self.batch_normalization(node803, variance_epsilon=9.999999747378752e-06, name='node804') node805 = node804 + node796 node806 = tf.nn.relu(node805, name='node806') node807 = self.convolution(node806, group=1, strides=[1, 1], padding='VALID', name='node807') node808 = self.batch_normalization(node807, variance_epsilon=9.999999747378752e-06, name='node808') node809 = tf.nn.relu(node808, name='node809') node810_pad = tf.pad(node809, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node810 = self.convolution(node810_pad, group=32, strides=[1, 1], padding='VALID', name='node810') node811 = self.batch_normalization(node810, variance_epsilon=9.999999747378752e-06, name='node811') node812 = tf.nn.relu(node811, name='node812') node813 = self.convolution(node812, group=1, strides=[1, 1], padding='VALID', name='node813') node814 = self.batch_normalization(node813, variance_epsilon=9.999999747378752e-06, name='node814') node815 = node814 + node806 node816 = tf.nn.relu(node815, name='node816') node817 = self.convolution(node816, group=1, strides=[1, 1], padding='VALID', name='node817') node818 = self.batch_normalization(node817, variance_epsilon=9.999999747378752e-06, name='node818') node819 = tf.nn.relu(node818, name='node819') node820_pad = tf.pad(node819, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node820 = self.convolution(node820_pad, group=32, strides=[1, 1], padding='VALID', name='node820') node821 = self.batch_normalization(node820, variance_epsilon=9.999999747378752e-06, name='node821') node822 = tf.nn.relu(node821, name='node822') node823 = self.convolution(node822, group=1, strides=[1, 1], padding='VALID', name='node823') node824 = self.batch_normalization(node823, variance_epsilon=9.999999747378752e-06, name='node824') node825 = node824 + node816 node826 = tf.nn.relu(node825, name='node826') node827 = self.convolution(node826, group=1, strides=[1, 1], padding='VALID', name='node827') node828 = self.batch_normalization(node827, variance_epsilon=9.999999747378752e-06, name='node828') node829 = tf.nn.relu(node828, name='node829') node830_pad = tf.pad(node829, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node830 = self.convolution(node830_pad, group=32, strides=[1, 1], padding='VALID', name='node830') node831 = self.batch_normalization(node830, variance_epsilon=9.999999747378752e-06, name='node831') node832 = tf.nn.relu(node831, name='node832') node833 = self.convolution(node832, group=1, strides=[1, 1], padding='VALID', name='node833') node834 = self.batch_normalization(node833, variance_epsilon=9.999999747378752e-06, name='node834') node835 = node834 + node826 node836 = tf.nn.relu(node835, name='node836') node837 = self.convolution(node836, group=1, strides=[1, 1], padding='VALID', name='node837') node838 = self.batch_normalization(node837, variance_epsilon=9.999999747378752e-06, name='node838') node839 = tf.nn.relu(node838, name='node839') node840_pad = tf.pad(node839, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node840 = self.convolution(node840_pad, group=32, strides=[1, 1], padding='VALID', name='node840') node841 = self.batch_normalization(node840, variance_epsilon=9.999999747378752e-06, name='node841') node842 = tf.nn.relu(node841, name='node842') node843 = self.convolution(node842, group=1, strides=[1, 1], padding='VALID', name='node843') node844 = self.batch_normalization(node843, variance_epsilon=9.999999747378752e-06, name='node844') node845 = node844 + node836 node846 = tf.nn.relu(node845, name='node846') node847 = self.convolution(node846, group=1, strides=[1, 1], padding='VALID', name='node847') node848 = self.batch_normalization(node847, variance_epsilon=9.999999747378752e-06, name='node848') node849 = tf.nn.relu(node848, name='node849') node850_pad = tf.pad(node849, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node850 = self.convolution(node850_pad, group=32, strides=[1, 1], padding='VALID', name='node850') node851 = self.batch_normalization(node850, variance_epsilon=9.999999747378752e-06, name='node851') node852 = tf.nn.relu(node851, name='node852') node853 = self.convolution(node852, group=1, strides=[1, 1], padding='VALID', name='node853') node854 = self.batch_normalization(node853, variance_epsilon=9.999999747378752e-06, name='node854') node855 = node854 + node846 node856 = tf.nn.relu(node855, name='node856') node857 = self.convolution(node856, group=1, strides=[1, 1], padding='VALID', name='node857') node858 = self.batch_normalization(node857, variance_epsilon=9.999999747378752e-06, name='node858') node859 = tf.nn.relu(node858, name='node859') node860_pad = tf.pad(node859, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node860 = self.convolution(node860_pad, group=32, strides=[1, 1], padding='VALID', name='node860') node861 = self.batch_normalization(node860, variance_epsilon=9.999999747378752e-06, name='node861') node862 = tf.nn.relu(node861, name='node862') node863 = self.convolution(node862, group=1, strides=[1, 1], padding='VALID', name='node863') node864 = self.batch_normalization(node863, variance_epsilon=9.999999747378752e-06, name='node864') node865 = node864 + node856 node866 = tf.nn.relu(node865, name='node866') node867 = self.convolution(node866, group=1, strides=[1, 1], padding='VALID', name='node867') node868 = self.batch_normalization(node867, variance_epsilon=9.999999747378752e-06, name='node868') node869 = tf.nn.relu(node868, name='node869') node870_pad = tf.pad(node869, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node870 = self.convolution(node870_pad, group=32, strides=[1, 1], padding='VALID', name='node870') node871 = self.batch_normalization(node870, variance_epsilon=9.999999747378752e-06, name='node871') node872 = tf.nn.relu(node871, name='node872') node873 = self.convolution(node872, group=1, strides=[1, 1], padding='VALID', name='node873') node874 = self.batch_normalization(node873, variance_epsilon=9.999999747378752e-06, name='node874') node875 = node874 + node866 node876 = tf.nn.relu(node875, name='node876') node877 = self.convolution(node876, group=1, strides=[1, 1], padding='VALID', name='node877') node878 = self.batch_normalization(node877, variance_epsilon=9.999999747378752e-06, name='node878') node879 = tf.nn.relu(node878, name='node879') node880_pad = tf.pad(node879, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node880 = self.convolution(node880_pad, group=32, strides=[1, 1], padding='VALID', name='node880') node881 = self.batch_normalization(node880, variance_epsilon=9.999999747378752e-06, name='node881') node882 = tf.nn.relu(node881, name='node882') node883 = self.convolution(node882, group=1, strides=[1, 1], padding='VALID', name='node883') node884 = self.batch_normalization(node883, variance_epsilon=9.999999747378752e-06, name='node884') node885 = node884 + node876 node886 = tf.nn.relu(node885, name='node886') node887 = self.convolution(node886, group=1, strides=[1, 1], padding='VALID', name='node887') node888 = self.batch_normalization(node887, variance_epsilon=9.999999747378752e-06, name='node888') node889 = tf.nn.relu(node888, name='node889') node890_pad = tf.pad(node889, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node890 = self.convolution(node890_pad, group=32, strides=[1, 1], padding='VALID', name='node890') node891 = self.batch_normalization(node890, variance_epsilon=9.999999747378752e-06, name='node891') node892 = tf.nn.relu(node891, name='node892') node893 = self.convolution(node892, group=1, strides=[1, 1], padding='VALID', name='node893') node894 = self.batch_normalization(node893, variance_epsilon=9.999999747378752e-06, name='node894') node895 = node894 + node886 node896 = tf.nn.relu(node895, name='node896') node897 = self.convolution(node896, group=1, strides=[1, 1], padding='VALID', name='node897') node898 = self.batch_normalization(node897, variance_epsilon=9.999999747378752e-06, name='node898') node899 = tf.nn.relu(node898, name='node899') node900_pad = tf.pad(node899, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node900 = self.convolution(node900_pad, group=32, strides=[1, 1], padding='VALID', name='node900') node901 = self.batch_normalization(node900, variance_epsilon=9.999999747378752e-06, name='node901') node902 = tf.nn.relu(node901, name='node902') node903 = self.convolution(node902, group=1, strides=[1, 1], padding='VALID', name='node903') node904 = self.batch_normalization(node903, variance_epsilon=9.999999747378752e-06, name='node904') node905 = node904 + node896 node906 = tf.nn.relu(node905, name='node906') node907 = self.convolution(node906, group=1, strides=[1, 1], padding='VALID', name='node907') node908 = self.batch_normalization(node907, variance_epsilon=9.999999747378752e-06, name='node908') node909 = tf.nn.relu(node908, name='node909') node910_pad = tf.pad(node909, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node910 = self.convolution(node910_pad, group=32, strides=[1, 1], padding='VALID', name='node910') node911 = self.batch_normalization(node910, variance_epsilon=9.999999747378752e-06, name='node911') node912 = tf.nn.relu(node911, name='node912') node913 = self.convolution(node912, group=1, strides=[1, 1], padding='VALID', name='node913') node914 = self.batch_normalization(node913, variance_epsilon=9.999999747378752e-06, name='node914') node915 = node914 + node906 node916 = tf.nn.relu(node915, name='node916') node917 = self.convolution(node916, group=1, strides=[1, 1], padding='VALID', name='node917') node918 = self.batch_normalization(node917, variance_epsilon=9.999999747378752e-06, name='node918') node919 = tf.nn.relu(node918, name='node919') node920_pad = tf.pad(node919, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node920 = self.convolution(node920_pad, group=32, strides=[1, 1], padding='VALID', name='node920') node921 = self.batch_normalization(node920, variance_epsilon=9.999999747378752e-06, name='node921') node922 = tf.nn.relu(node921, name='node922') node923 = self.convolution(node922, group=1, strides=[1, 1], padding='VALID', name='node923') node924 = self.batch_normalization(node923, variance_epsilon=9.999999747378752e-06, name='node924') node925 = node924 + node916 node926 = tf.nn.relu(node925, name='node926') node927 = self.convolution(node926, group=1, strides=[1, 1], padding='VALID', name='node927') node928 = self.batch_normalization(node927, variance_epsilon=9.999999747378752e-06, name='node928') node929 = tf.nn.relu(node928, name='node929') node930_pad = tf.pad(node929, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node930 = self.convolution(node930_pad, group=32, strides=[1, 1], padding='VALID', name='node930') node931 = self.batch_normalization(node930, variance_epsilon=9.999999747378752e-06, name='node931') node932 = tf.nn.relu(node931, name='node932') node933 = self.convolution(node932, group=1, strides=[1, 1], padding='VALID', name='node933') node934 = self.batch_normalization(node933, variance_epsilon=9.999999747378752e-06, name='node934') node935 = node934 + node926 node936 = tf.nn.relu(node935, name='node936') feature_dict['C4'] = node936 node937 = self.convolution(node936, group=1, strides=[1, 1], padding='VALID', name='node937') node945 = self.convolution(node936, group=1, strides=[2, 2], padding='VALID', name='node945') node938 = self.batch_normalization(node937, variance_epsilon=9.999999747378752e-06, name='node938') node946 = self.batch_normalization(node945, variance_epsilon=9.999999747378752e-06, name='node946') node939 = tf.nn.relu(node938, name='node939') node940_pad = tf.pad(node939, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node940 = self.convolution(node940_pad, group=32, strides=[2, 2], padding='VALID', name='node940') node941 = self.batch_normalization(node940, variance_epsilon=9.999999747378752e-06, name='node941') node942 = tf.nn.relu(node941, name='node942') node943 = self.convolution(node942, group=1, strides=[1, 1], padding='VALID', name='node943') node944 = self.batch_normalization(node943, variance_epsilon=9.999999747378752e-06, name='node944') node947 = node944 + node946 node948 = tf.nn.relu(node947, name='node948') node949 = self.convolution(node948, group=1, strides=[1, 1], padding='VALID', name='node949') node950 = self.batch_normalization(node949, variance_epsilon=9.999999747378752e-06, name='node950') node951 = tf.nn.relu(node950, name='node951') node952_pad = tf.pad(node951, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node952 = self.convolution(node952_pad, group=32, strides=[1, 1], padding='VALID', name='node952') node953 = self.batch_normalization(node952, variance_epsilon=9.999999747378752e-06, name='node953') node954 = tf.nn.relu(node953, name='node954') node955 = self.convolution(node954, group=1, strides=[1, 1], padding='VALID', name='node955') node956 = self.batch_normalization(node955, variance_epsilon=9.999999747378752e-06, name='node956') node957 = node956 + node948 node958 = tf.nn.relu(node957, name='node958') node959 = self.convolution(node958, group=1, strides=[1, 1], padding='VALID', name='node959') node960 = self.batch_normalization(node959, variance_epsilon=9.999999747378752e-06, name='node960') node961 = tf.nn.relu(node960, name='node961') node962_pad = tf.pad(node961, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node962 = self.convolution(node962_pad, group=32, strides=[1, 1], padding='VALID', name='node962') node963 = self.batch_normalization(node962, variance_epsilon=9.999999747378752e-06, name='node963') node964 = tf.nn.relu(node963, name='node964') node965 = self.convolution(node964, group=1, strides=[1, 1], padding='VALID', name='node965') node966 = self.batch_normalization(node965, variance_epsilon=9.999999747378752e-06, name='node966') node967 = node966 + node958 node968 = tf.nn.relu(node967, name='node968') feature_dict['C5'] = node968 # node969 = tf.nn.avg_pool(node968, [1, 7, 7, 1], [1, 1, 1, 1], padding='VALID', name='node969') # node970 = tf.contrib.layers.flatten(node969) # node971_flatten = tf.contrib.layers.flatten(node970) # node971 = tf.layers.dense(node971_flatten, 1000, # kernel_initializer=tf.constant_initializer(_weights_dict['node971']['weights']), # bias_initializer=tf.constant_initializer(_weights_dict['node971']['bias']), # use_bias=True) return feature_dict def resnet34(self, inputs, weight_file=None): feature_dict = {} global _weights_dict self._weights_dict = self.load_weights(weight_file) # inputs = tf.placeholder(tf.float32, shape=(None, 224, 224, 3), name='inputs') node219_pad = tf.pad(inputs, paddings=[[0, 0], [3, 3], [3, 3], [0, 0]]) node219 = self.convolution(node219_pad, group=1, strides=[2, 2], padding='VALID', name='node219') node220 = self.batch_normalization(node219, variance_epsilon=9.999999747378752e-06, name='node220') node221 = tf.nn.relu(node220, name='node221') node222_pad = tf.pad(node221, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]], constant_values=float('-Inf')) node222 = tf.nn.max_pool(node222_pad, [1, 3, 3, 1], [1, 2, 2, 1], padding='VALID', name='node222') feature_dict['C1'] = node222 node223_pad = tf.pad(node222, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node223 = self.convolution(node223_pad, group=1, strides=[1, 1], padding='VALID', name='node223') node224 = self.batch_normalization(node223, variance_epsilon=9.999999747378752e-06, name='node224') node225 = tf.nn.relu(node224, name='node225') node226_pad = tf.pad(node225, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node226 = self.convolution(node226_pad, group=1, strides=[1, 1], padding='VALID', name='node226') node227 = self.batch_normalization(node226, variance_epsilon=9.999999747378752e-06, name='node227') node228 = node227 + node222 node229 = tf.nn.relu(node228, name='node229') node230_pad = tf.pad(node229, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node230 = self.convolution(node230_pad, group=1, strides=[1, 1], padding='VALID', name='node230') node231 = self.batch_normalization(node230, variance_epsilon=9.999999747378752e-06, name='node231') node232 = tf.nn.relu(node231, name='node232') node233_pad = tf.pad(node232, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node233 = self.convolution(node233_pad, group=1, strides=[1, 1], padding='VALID', name='node233') node234 = self.batch_normalization(node233, variance_epsilon=9.999999747378752e-06, name='node234') node235 = node234 + node229 node236 = tf.nn.relu(node235, name='node236') node237_pad = tf.pad(node236, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node237 = self.convolution(node237_pad, group=1, strides=[1, 1], padding='VALID', name='node237') node238 = self.batch_normalization(node237, variance_epsilon=9.999999747378752e-06, name='node238') node239 = tf.nn.relu(node238, name='node239') node240_pad = tf.pad(node239, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node240 = self.convolution(node240_pad, group=1, strides=[1, 1], padding='VALID', name='node240') node241 = self.batch_normalization(node240, variance_epsilon=9.999999747378752e-06, name='node241') node242 = node241 + node236 node243 = tf.nn.relu(node242, name='node243') feature_dict['C1'] = node243 node244_pad = tf.pad(node243, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node244 = self.convolution(node244_pad, group=1, strides=[2, 2], padding='VALID', name='node244') node249 = self.convolution(node243, group=1, strides=[2, 2], padding='VALID', name='node249') node245 = self.batch_normalization(node244, variance_epsilon=9.999999747378752e-06, name='node245') node250 = self.batch_normalization(node249, variance_epsilon=9.999999747378752e-06, name='node250') node246 = tf.nn.relu(node245, name='node246') node247_pad = tf.pad(node246, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node247 = self.convolution(node247_pad, group=1, strides=[1, 1], padding='VALID', name='node247') node248 = self.batch_normalization(node247, variance_epsilon=9.999999747378752e-06, name='node248') node251 = node248 + node250 node252 = tf.nn.relu(node251, name='node252') node253_pad = tf.pad(node252, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node253 = self.convolution(node253_pad, group=1, strides=[1, 1], padding='VALID', name='node253') node254 = self.batch_normalization(node253, variance_epsilon=9.999999747378752e-06, name='node254') node255 = tf.nn.relu(node254, name='node255') node256_pad = tf.pad(node255, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node256 = self.convolution(node256_pad, group=1, strides=[1, 1], padding='VALID', name='node256') node257 = self.batch_normalization(node256, variance_epsilon=9.999999747378752e-06, name='node257') node258 = node257 + node252 node259 = tf.nn.relu(node258, name='node259') node260_pad = tf.pad(node259, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node260 = self.convolution(node260_pad, group=1, strides=[1, 1], padding='VALID', name='node260') node261 = self.batch_normalization(node260, variance_epsilon=9.999999747378752e-06, name='node261') node262 = tf.nn.relu(node261, name='node262') node263_pad = tf.pad(node262, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node263 = self.convolution(node263_pad, group=1, strides=[1, 1], padding='VALID', name='node263') node264 = self.batch_normalization(node263, variance_epsilon=9.999999747378752e-06, name='node264') node265 = node264 + node259 node266 = tf.nn.relu(node265, name='node266') node267_pad = tf.pad(node266, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node267 = self.convolution(node267_pad, group=1, strides=[1, 1], padding='VALID', name='node267') node268 = self.batch_normalization(node267, variance_epsilon=9.999999747378752e-06, name='node268') node269 = tf.nn.relu(node268, name='node269') node270_pad = tf.pad(node269, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node270 = self.convolution(node270_pad, group=1, strides=[1, 1], padding='VALID', name='node270') node271 = self.batch_normalization(node270, variance_epsilon=9.999999747378752e-06, name='node271') node272 = node271 + node266 node273 = tf.nn.relu(node272, name='node273') feature_dict['C3'] = node273 node274_pad = tf.pad(node273, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node274 = self.convolution(node274_pad, group=1, strides=[2, 2], padding='VALID', name='node274') node279 = self.convolution(node273, group=1, strides=[2, 2], padding='VALID', name='node279') node275 = self.batch_normalization(node274, variance_epsilon=9.999999747378752e-06, name='node275') node280 = self.batch_normalization(node279, variance_epsilon=9.999999747378752e-06, name='node280') node276 = tf.nn.relu(node275, name='node276') node277_pad = tf.pad(node276, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node277 = self.convolution(node277_pad, group=1, strides=[1, 1], padding='VALID', name='node277') node278 = self.batch_normalization(node277, variance_epsilon=9.999999747378752e-06, name='node278') node281 = node278 + node280 node282 = tf.nn.relu(node281, name='node282') node283_pad = tf.pad(node282, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node283 = self.convolution(node283_pad, group=1, strides=[1, 1], padding='VALID', name='node283') node284 = self.batch_normalization(node283, variance_epsilon=9.999999747378752e-06, name='node284') node285 = tf.nn.relu(node284, name='node285') node286_pad = tf.pad(node285, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node286 = self.convolution(node286_pad, group=1, strides=[1, 1], padding='VALID', name='node286') node287 = self.batch_normalization(node286, variance_epsilon=9.999999747378752e-06, name='node287') node288 = node287 + node282 node289 = tf.nn.relu(node288, name='node289') node290_pad = tf.pad(node289, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node290 = self.convolution(node290_pad, group=1, strides=[1, 1], padding='VALID', name='node290') node291 = self.batch_normalization(node290, variance_epsilon=9.999999747378752e-06, name='node291') node292 = tf.nn.relu(node291, name='node292') node293_pad = tf.pad(node292, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node293 = self.convolution(node293_pad, group=1, strides=[1, 1], padding='VALID', name='node293') node294 = self.batch_normalization(node293, variance_epsilon=9.999999747378752e-06, name='node294') node295 = node294 + node289 node296 = tf.nn.relu(node295, name='node296') node297_pad = tf.pad(node296, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node297 = self.convolution(node297_pad, group=1, strides=[1, 1], padding='VALID', name='node297') node298 = self.batch_normalization(node297, variance_epsilon=9.999999747378752e-06, name='node298') node299 = tf.nn.relu(node298, name='node299') node300_pad = tf.pad(node299, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node300 = self.convolution(node300_pad, group=1, strides=[1, 1], padding='VALID', name='node300') node301 = self.batch_normalization(node300, variance_epsilon=9.999999747378752e-06, name='node301') node302 = node301 + node296 node303 = tf.nn.relu(node302, name='node303') node304_pad = tf.pad(node303, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node304 = self.convolution(node304_pad, group=1, strides=[1, 1], padding='VALID', name='node304') node305 = self.batch_normalization(node304, variance_epsilon=9.999999747378752e-06, name='node305') node306 = tf.nn.relu(node305, name='node306') node307_pad = tf.pad(node306, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node307 = self.convolution(node307_pad, group=1, strides=[1, 1], padding='VALID', name='node307') node308 = self.batch_normalization(node307, variance_epsilon=9.999999747378752e-06, name='node308') node309 = node308 + node303 node310 = tf.nn.relu(node309, name='node310') node311_pad = tf.pad(node310, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node311 = self.convolution(node311_pad, group=1, strides=[1, 1], padding='VALID', name='node311') node312 = self.batch_normalization(node311, variance_epsilon=9.999999747378752e-06, name='node312') node313 = tf.nn.relu(node312, name='node313') node314_pad = tf.pad(node313, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node314 = self.convolution(node314_pad, group=1, strides=[1, 1], padding='VALID', name='node314') node315 = self.batch_normalization(node314, variance_epsilon=9.999999747378752e-06, name='node315') node316 = node315 + node310 node317 = tf.nn.relu(node316, name='node317') feature_dict['C4'] = node317 node318_pad = tf.pad(node317, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node318 = self.convolution(node318_pad, group=1, strides=[2, 2], padding='VALID', name='node318') node323 = self.convolution(node317, group=1, strides=[2, 2], padding='VALID', name='node323') node319 = self.batch_normalization(node318, variance_epsilon=9.999999747378752e-06, name='node319') node324 = self.batch_normalization(node323, variance_epsilon=9.999999747378752e-06, name='node324') node320 = tf.nn.relu(node319, name='node320') node321_pad = tf.pad(node320, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node321 = self.convolution(node321_pad, group=1, strides=[1, 1], padding='VALID', name='node321') node322 = self.batch_normalization(node321, variance_epsilon=9.999999747378752e-06, name='node322') node325 = node322 + node324 node326 = tf.nn.relu(node325, name='node326') node327_pad = tf.pad(node326, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node327 = self.convolution(node327_pad, group=1, strides=[1, 1], padding='VALID', name='node327') node328 = self.batch_normalization(node327, variance_epsilon=9.999999747378752e-06, name='node328') node329 = tf.nn.relu(node328, name='node329') node330_pad = tf.pad(node329, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node330 = self.convolution(node330_pad, group=1, strides=[1, 1], padding='VALID', name='node330') node331 = self.batch_normalization(node330, variance_epsilon=9.999999747378752e-06, name='node331') node332 = node331 + node326 node333 = tf.nn.relu(node332, name='node333') node334_pad = tf.pad(node333, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node334 = self.convolution(node334_pad, group=1, strides=[1, 1], padding='VALID', name='node334') node335 = self.batch_normalization(node334, variance_epsilon=9.999999747378752e-06, name='node335') node336 = tf.nn.relu(node335, name='node336') node337_pad = tf.pad(node336, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node337 = self.convolution(node337_pad, group=1, strides=[1, 1], padding='VALID', name='node337') node338 = self.batch_normalization(node337, variance_epsilon=9.999999747378752e-06, name='node338') node339 = node338 + node333 node340 = tf.nn.relu(node339, name='node340') feature_dict['C5'] = node340 # node341 = tf.nn.avg_pool(node340, [1, 7, 7, 1], [1, 1, 1, 1], padding='VALID', name='node341') # node342 = tf.contrib.layers.flatten(node341) # node343_flatten = tf.contrib.layers.flatten(node342) # node343 = tf.layers.dense(node343_flatten, 1000, # kernel_initializer=tf.constant_initializer(_weights_dict['node343']['weights']), # bias_initializer=tf.constant_initializer(_weights_dict['node343']['bias']), # use_bias=True) return feature_dict def resnet50(self, inputs, weight_file=None): feature_dict = {} global _weights_dict self._weights_dict = self.load_weights(weight_file) # inputs = tf.placeholder(tf.float32, shape=(None, 224, 224, 3), name='inputs') node321_pad = tf.pad(inputs, paddings=[[0, 0], [3, 3], [3, 3], [0, 0]]) node321 = self.convolution(node321_pad, group=1, strides=[2, 2], padding='VALID', name='node321') node322 = self.batch_normalization(node321, variance_epsilon=9.999999747378752e-06, name='node322') node323 = tf.nn.relu(node322, name='node323') node324_pad = tf.pad(node323, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]], constant_values=float('-Inf')) node324 = tf.nn.max_pool(node324_pad, [1, 3, 3, 1], [1, 2, 2, 1], padding='VALID', name='node324') feature_dict["C1"] = node324 node325 = self.convolution(node324, group=1, strides=[1, 1], padding='VALID', name='node325') node333 = self.convolution(node324, group=1, strides=[1, 1], padding='VALID', name='node333') node326 = self.batch_normalization(node325, variance_epsilon=9.999999747378752e-06, name='node326') node334 = self.batch_normalization(node333, variance_epsilon=9.999999747378752e-06, name='node334') node327 = tf.nn.relu(node326, name='node327') node328_pad = tf.pad(node327, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node328 = self.convolution(node328_pad, group=1, strides=[1, 1], padding='VALID', name='node328') node329 = self.batch_normalization(node328, variance_epsilon=9.999999747378752e-06, name='node329') node330 = tf.nn.relu(node329, name='node330') node331 = self.convolution(node330, group=1, strides=[1, 1], padding='VALID', name='node331') node332 = self.batch_normalization(node331, variance_epsilon=9.999999747378752e-06, name='node332') node335 = node332 + node334 node336 = tf.nn.relu(node335, name='node336') node337 = self.convolution(node336, group=1, strides=[1, 1], padding='VALID', name='node337') node338 = self.batch_normalization(node337, variance_epsilon=9.999999747378752e-06, name='node338') node339 = tf.nn.relu(node338, name='node339') node340_pad = tf.pad(node339, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node340 = self.convolution(node340_pad, group=1, strides=[1, 1], padding='VALID', name='node340') node341 = self.batch_normalization(node340, variance_epsilon=9.999999747378752e-06, name='node341') node342 = tf.nn.relu(node341, name='node342') node343 = self.convolution(node342, group=1, strides=[1, 1], padding='VALID', name='node343') node344 = self.batch_normalization(node343, variance_epsilon=9.999999747378752e-06, name='node344') node345 = node344 + node336 node346 = tf.nn.relu(node345, name='node346') node347 = self.convolution(node346, group=1, strides=[1, 1], padding='VALID', name='node347') node348 = self.batch_normalization(node347, variance_epsilon=9.999999747378752e-06, name='node348') node349 = tf.nn.relu(node348, name='node349') node350_pad = tf.pad(node349, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node350 = self.convolution(node350_pad, group=1, strides=[1, 1], padding='VALID', name='node350') node351 = self.batch_normalization(node350, variance_epsilon=9.999999747378752e-06, name='node351') node352 = tf.nn.relu(node351, name='node352') node353 = self.convolution(node352, group=1, strides=[1, 1], padding='VALID', name='node353') node354 = self.batch_normalization(node353, variance_epsilon=9.999999747378752e-06, name='node354') node355 = node354 + node346 node356 = tf.nn.relu(node355, name='node356') feature_dict['C2'] = node356 node357 = self.convolution(node356, group=1, strides=[1, 1], padding='VALID', name='node357') node365 = self.convolution(node356, group=1, strides=[2, 2], padding='VALID', name='node365') node358 = self.batch_normalization(node357, variance_epsilon=9.999999747378752e-06, name='node358') node366 = self.batch_normalization(node365, variance_epsilon=9.999999747378752e-06, name='node366') node359 = tf.nn.relu(node358, name='node359') node360_pad = tf.pad(node359, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node360 = self.convolution(node360_pad, group=1, strides=[2, 2], padding='VALID', name='node360') node361 = self.batch_normalization(node360, variance_epsilon=9.999999747378752e-06, name='node361') node362 = tf.nn.relu(node361, name='node362') node363 = self.convolution(node362, group=1, strides=[1, 1], padding='VALID', name='node363') node364 = self.batch_normalization(node363, variance_epsilon=9.999999747378752e-06, name='node364') node367 = node364 + node366 node368 = tf.nn.relu(node367, name='node368') node369 = self.convolution(node368, group=1, strides=[1, 1], padding='VALID', name='node369') node370 = self.batch_normalization(node369, variance_epsilon=9.999999747378752e-06, name='node370') node371 = tf.nn.relu(node370, name='node371') node372_pad = tf.pad(node371, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node372 = self.convolution(node372_pad, group=1, strides=[1, 1], padding='VALID', name='node372') node373 = self.batch_normalization(node372, variance_epsilon=9.999999747378752e-06, name='node373') node374 = tf.nn.relu(node373, name='node374') node375 = self.convolution(node374, group=1, strides=[1, 1], padding='VALID', name='node375') node376 = self.batch_normalization(node375, variance_epsilon=9.999999747378752e-06, name='node376') node377 = node376 + node368 node378 = tf.nn.relu(node377, name='node378') node379 = self.convolution(node378, group=1, strides=[1, 1], padding='VALID', name='node379') node380 = self.batch_normalization(node379, variance_epsilon=9.999999747378752e-06, name='node380') node381 = tf.nn.relu(node380, name='node381') node382_pad = tf.pad(node381, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node382 = self.convolution(node382_pad, group=1, strides=[1, 1], padding='VALID', name='node382') node383 = self.batch_normalization(node382, variance_epsilon=9.999999747378752e-06, name='node383') node384 = tf.nn.relu(node383, name='node384') node385 = self.convolution(node384, group=1, strides=[1, 1], padding='VALID', name='node385') node386 = self.batch_normalization(node385, variance_epsilon=9.999999747378752e-06, name='node386') node387 = node386 + node378 node388 = tf.nn.relu(node387, name='node388') node389 = self.convolution(node388, group=1, strides=[1, 1], padding='VALID', name='node389') node390 = self.batch_normalization(node389, variance_epsilon=9.999999747378752e-06, name='node390') node391 = tf.nn.relu(node390, name='node391') node392_pad = tf.pad(node391, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node392 = self.convolution(node392_pad, group=1, strides=[1, 1], padding='VALID', name='node392') node393 = self.batch_normalization(node392, variance_epsilon=9.999999747378752e-06, name='node393') node394 = tf.nn.relu(node393, name='node394') node395 = self.convolution(node394, group=1, strides=[1, 1], padding='VALID', name='node395') node396 = self.batch_normalization(node395, variance_epsilon=9.999999747378752e-06, name='node396') node397 = node396 + node388 node398 = tf.nn.relu(node397, name='node398') feature_dict['C3'] = node398 node399 = self.convolution(node398, group=1, strides=[1, 1], padding='VALID', name='node399') node407 = self.convolution(node398, group=1, strides=[2, 2], padding='VALID', name='node407') node400 = self.batch_normalization(node399, variance_epsilon=9.999999747378752e-06, name='node400') node408 = self.batch_normalization(node407, variance_epsilon=9.999999747378752e-06, name='node408') node401 = tf.nn.relu(node400, name='node401') node402_pad = tf.pad(node401, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node402 = self.convolution(node402_pad, group=1, strides=[2, 2], padding='VALID', name='node402') node403 = self.batch_normalization(node402, variance_epsilon=9.999999747378752e-06, name='node403') node404 = tf.nn.relu(node403, name='node404') node405 = self.convolution(node404, group=1, strides=[1, 1], padding='VALID', name='node405') node406 = self.batch_normalization(node405, variance_epsilon=9.999999747378752e-06, name='node406') node409 = node406 + node408 node410 = tf.nn.relu(node409, name='node410') node411 = self.convolution(node410, group=1, strides=[1, 1], padding='VALID', name='node411') node412 = self.batch_normalization(node411, variance_epsilon=9.999999747378752e-06, name='node412') node413 = tf.nn.relu(node412, name='node413') node414_pad = tf.pad(node413, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node414 = self.convolution(node414_pad, group=1, strides=[1, 1], padding='VALID', name='node414') node415 = self.batch_normalization(node414, variance_epsilon=9.999999747378752e-06, name='node415') node416 = tf.nn.relu(node415, name='node416') node417 = self.convolution(node416, group=1, strides=[1, 1], padding='VALID', name='node417') node418 = self.batch_normalization(node417, variance_epsilon=9.999999747378752e-06, name='node418') node419 = node418 + node410 node420 = tf.nn.relu(node419, name='node420') node421 = self.convolution(node420, group=1, strides=[1, 1], padding='VALID', name='node421') node422 = self.batch_normalization(node421, variance_epsilon=9.999999747378752e-06, name='node422') node423 = tf.nn.relu(node422, name='node423') node424_pad = tf.pad(node423, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node424 = self.convolution(node424_pad, group=1, strides=[1, 1], padding='VALID', name='node424') node425 = self.batch_normalization(node424, variance_epsilon=9.999999747378752e-06, name='node425') node426 = tf.nn.relu(node425, name='node426') node427 = self.convolution(node426, group=1, strides=[1, 1], padding='VALID', name='node427') node428 = self.batch_normalization(node427, variance_epsilon=9.999999747378752e-06, name='node428') node429 = node428 + node420 node430 = tf.nn.relu(node429, name='node430') node431 = self.convolution(node430, group=1, strides=[1, 1], padding='VALID', name='node431') node432 = self.batch_normalization(node431, variance_epsilon=9.999999747378752e-06, name='node432') node433 = tf.nn.relu(node432, name='node433') node434_pad = tf.pad(node433, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node434 = self.convolution(node434_pad, group=1, strides=[1, 1], padding='VALID', name='node434') node435 = self.batch_normalization(node434, variance_epsilon=9.999999747378752e-06, name='node435') node436 = tf.nn.relu(node435, name='node436') node437 = self.convolution(node436, group=1, strides=[1, 1], padding='VALID', name='node437') node438 = self.batch_normalization(node437, variance_epsilon=9.999999747378752e-06, name='node438') node439 = node438 + node430 node440 = tf.nn.relu(node439, name='node440') node441 = self.convolution(node440, group=1, strides=[1, 1], padding='VALID', name='node441') node442 = self.batch_normalization(node441, variance_epsilon=9.999999747378752e-06, name='node442') node443 = tf.nn.relu(node442, name='node443') node444_pad = tf.pad(node443, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node444 = self.convolution(node444_pad, group=1, strides=[1, 1], padding='VALID', name='node444') node445 = self.batch_normalization(node444, variance_epsilon=9.999999747378752e-06, name='node445') node446 = tf.nn.relu(node445, name='node446') node447 = self.convolution(node446, group=1, strides=[1, 1], padding='VALID', name='node447') node448 = self.batch_normalization(node447, variance_epsilon=9.999999747378752e-06, name='node448') node449 = node448 + node440 node450 = tf.nn.relu(node449, name='node450') node451 = self.convolution(node450, group=1, strides=[1, 1], padding='VALID', name='node451') node452 = self.batch_normalization(node451, variance_epsilon=9.999999747378752e-06, name='node452') node453 = tf.nn.relu(node452, name='node453') node454_pad = tf.pad(node453, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node454 = self.convolution(node454_pad, group=1, strides=[1, 1], padding='VALID', name='node454') node455 = self.batch_normalization(node454, variance_epsilon=9.999999747378752e-06, name='node455') node456 = tf.nn.relu(node455, name='node456') node457 = self.convolution(node456, group=1, strides=[1, 1], padding='VALID', name='node457') node458 = self.batch_normalization(node457, variance_epsilon=9.999999747378752e-06, name='node458') node459 = node458 + node450 node460 = tf.nn.relu(node459, name='node460') feature_dict['C4'] = node460 node461 = self.convolution(node460, group=1, strides=[1, 1], padding='VALID', name='node461') node469 = self.convolution(node460, group=1, strides=[2, 2], padding='VALID', name='node469') node462 = self.batch_normalization(node461, variance_epsilon=9.999999747378752e-06, name='node462') node470 = self.batch_normalization(node469, variance_epsilon=9.999999747378752e-06, name='node470') node463 = tf.nn.relu(node462, name='node463') node464_pad = tf.pad(node463, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node464 = self.convolution(node464_pad, group=1, strides=[2, 2], padding='VALID', name='node464') node465 = self.batch_normalization(node464, variance_epsilon=9.999999747378752e-06, name='node465') node466 = tf.nn.relu(node465, name='node466') node467 = self.convolution(node466, group=1, strides=[1, 1], padding='VALID', name='node467') node468 = self.batch_normalization(node467, variance_epsilon=9.999999747378752e-06, name='node468') node471 = node468 + node470 node472 = tf.nn.relu(node471, name='node472') node473 = self.convolution(node472, group=1, strides=[1, 1], padding='VALID', name='node473') node474 = self.batch_normalization(node473, variance_epsilon=9.999999747378752e-06, name='node474') node475 = tf.nn.relu(node474, name='node475') node476_pad = tf.pad(node475, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node476 = self.convolution(node476_pad, group=1, strides=[1, 1], padding='VALID', name='node476') node477 = self.batch_normalization(node476, variance_epsilon=9.999999747378752e-06, name='node477') node478 = tf.nn.relu(node477, name='node478') node479 = self.convolution(node478, group=1, strides=[1, 1], padding='VALID', name='node479') node480 = self.batch_normalization(node479, variance_epsilon=9.999999747378752e-06, name='node480') node481 = node480 + node472 node482 = tf.nn.relu(node481, name='node482') node483 = self.convolution(node482, group=1, strides=[1, 1], padding='VALID', name='node483') node484 = self.batch_normalization(node483, variance_epsilon=9.999999747378752e-06, name='node484') node485 = tf.nn.relu(node484, name='node485') node486_pad = tf.pad(node485, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]]) node486 = self.convolution(node486_pad, group=1, strides=[1, 1], padding='VALID', name='node486') node487 = self.batch_normalization(node486, variance_epsilon=9.999999747378752e-06, name='node487') node488 = tf.nn.relu(node487, name='node488') node489 = self.convolution(node488, group=1, strides=[1, 1], padding='VALID', name='node489') node490 = self.batch_normalization(node489, variance_epsilon=9.999999747378752e-06, name='node490') node491 = node490 + node482 node492 = tf.nn.relu(node491, name='node492') feature_dict['C5'] = node492 # node493 = tf.nn.avg_pool(node492, [1, 7, 7, 1], [1, 1, 1, 1], padding='VALID', name='node493') # node494 = tf.contrib.layers.flatten(node493) # node495_flatten = tf.contrib.layers.flatten(node494) # node495 = tf.layers.dense(node495_flatten, 1000, # kernel_initializer=tf.constant_initializer(_weights_dict['node495']['weights']), # bias_initializer=tf.constant_initializer(_weights_dict['node495']['bias']), # use_bias=True) return feature_dict def convolution(self, inputs, name, group, **kwargs): w = tf.Variable(self._weights_dict[name]['weights'], trainable=self.is_training and int(name[4:]) > self.freeze_blocks_node_index[self.scope_name], name=name + "_weight") if group == 1: layer = tf.nn.convolution(inputs, w, name=name, **kwargs) else: weight_groups = tf.split(w, num_or_size_splits=group, axis=-1) xs = tf.split(inputs, num_or_size_splits=group, axis=-1) convolved = [tf.nn.convolution(x, weight, name=name, **kwargs) for (x, weight) in zip(xs, weight_groups)] layer = tf.concat(convolved, axis=-1) if 'bias' in self._weights_dict[name]: b = tf.Variable(self._weights_dict[name]['bias'], trainable=self.is_training and int(name[4:]) > self.freeze_blocks_node_index[self.scope_name], name=name + "_bias") layer += b return layer def batch_normalization(self, inputs, name, **kwargs): mean = tf.Variable(self._weights_dict[name]['mean'], name=name + "_mean", trainable=False) variance = tf.Variable(self._weights_dict[name]['var'], name=name + "_var", trainable=False) offset = tf.Variable(self._weights_dict[name]['bias'], name=name + "_bias", trainable=False) if 'bias' in self._weights_dict[name] else None scale = tf.Variable(self._weights_dict[name]['scale'], name=name + "_scale", trainable=False) if 'scale' in self._weights_dict[name] else None return tf.nn.batch_normalization(inputs, mean, variance, offset, scale, name=name, **kwargs) def resnet_base(self, img_batch, scope_name, is_training=True): if not is_training: scope_name = 'tower_0/' + scope_name with tf.variable_scope(scope_name): weight_file = self.cfgs.PRETRAINED_CKPT self.is_training = is_training self.scope_name = scope_name if self.cfgs.NET_NAME == 'resnext50_32x4d': feature_dict = self.resnext50_32x4d(img_batch, weight_file) elif self.cfgs.NET_NAME == 'resnext101_32x8d': feature_dict = self.resnext101_32x8d(img_batch, weight_file) elif self.cfgs.NET_NAME == 'resnet34': feature_dict = self.resnet34(img_batch, weight_file) else: feature_dict = self.resnet50(img_batch, weight_file) return feature_dict
36,536
2,587
package cn.hikyson.godeye.core.internal.modules.crash; import androidx.annotation.Keep; import java.io.Serializable; import java.util.Map; @Keep public class CrashInfo implements Serializable { public String startTime; public String crashTime; public String crashType; public String crashMessage; public String processId; public String processName; public String threadId; public String threadName; public String nativeCrashCode; public String nativeCrashSignal; public String nativeCrashBacktrace; public String nativeCrashStack; public String javaCrashStacktrace; public Map<String, String> extras; public CrashInfo() { } @Override public String toString() { return "CrashInfo{" + "startTime='" + startTime + '\'' + ", crashTime='" + crashTime + '\'' + ", crashType='" + crashType + '\'' + ", crashMessage='" + crashMessage + '\'' + ", processId='" + processId + '\'' + ", processName='" + processName + '\'' + ", threadId='" + threadId + '\'' + ", threadName='" + threadName + '\'' + ", nativeCrashCode='" + nativeCrashCode + '\'' + ", nativeCrashSignal='" + nativeCrashSignal + '\'' + ", nativeCrashBacktrace='" + nativeCrashBacktrace + '\'' + ", nativeCrashStack='" + nativeCrashStack + '\'' + ", javaCrashStacktrace='" + javaCrashStacktrace + '\'' + ", extras=" + extras + '}'; } }
687
743
<reponame>vforkliu/GithubApp package com.anly.githubapp.ui.base; import android.content.Context; import android.os.Bundle; import android.support.annotation.Nullable; import android.support.v4.app.Fragment; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import com.anly.githubapp.common.wrapper.AppLog; import com.anly.githubapp.di.HasComponent; /** * Created by mingjun on 16/7/16. */ public class BaseFragment extends Fragment { @Override public void onCreate(@Nullable Bundle savedInstanceState) { super.onCreate(savedInstanceState); // AppLog.d("onCreate:" + this); } @Override public void onDestroy() { super.onDestroy(); // AppLog.d("onDestroy:" + this); } @Override public void onActivityCreated(@Nullable Bundle savedInstanceState) { super.onActivityCreated(savedInstanceState); // AppLog.d("onActivityCreated:" + this); } @Override public void onViewCreated(View view, @Nullable Bundle savedInstanceState) { super.onViewCreated(view, savedInstanceState); // AppLog.d("onViewCreated:" + this); } @Nullable @Override public View onCreateView(LayoutInflater inflater, @Nullable ViewGroup container, @Nullable Bundle savedInstanceState) { // AppLog.d("onCreateView:" + this); return super.onCreateView(inflater, container, savedInstanceState); } @Override public void onDestroyView() { super.onDestroyView(); // AppLog.d("onDestroyView:" + this); } @Override public void onAttach(Context context) { super.onAttach(context); // AppLog.d("onAttach:" + this); } @Override public void onDetach() { super.onDetach(); // AppLog.d("onDetach:" + this); } @Override public void onStart() { super.onStart(); // AppLog.d("onStart:" + this); } @Override public void onResume() { super.onResume(); // AppLog.d("onResume:" + this); } @Override public void onPause() { super.onPause(); // AppLog.d("onPause:" + this); } @Override public void onStop() { super.onStop(); // AppLog.d("onStop:" + this); } /** * Gets a component for dependency injection by its type. */ @SuppressWarnings("unchecked") protected <C> C getComponent(Class<C> componentType) { return componentType.cast(((HasComponent<C>) getActivity()).getComponent()); } }
1,011
3,000
<gh_stars>1000+ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. package com.mojang.brigadier; import com.google.common.collect.Lists; import com.mojang.brigadier.context.CommandContext; import com.mojang.brigadier.context.CommandContextBuilder; import com.mojang.brigadier.exceptions.CommandSyntaxException; import com.mojang.brigadier.tree.LiteralCommandNode; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; import static com.mojang.brigadier.arguments.IntegerArgumentType.integer; import static com.mojang.brigadier.builder.LiteralArgumentBuilder.literal; import static com.mojang.brigadier.builder.RequiredArgumentBuilder.argument; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasProperty; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.junit.Assert.assertThat; import static org.junit.Assert.fail; import static org.mockito.Matchers.argThat; import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @RunWith(MockitoJUnitRunner.class) public class CommandDispatcherTest { private CommandDispatcher<Object> subject; @Mock private Command<Object> command; @Mock private Object source; @Before public void setUp() throws Exception { subject = new CommandDispatcher<>(); when(command.run(any())).thenReturn(42); } private static StringReader inputWithOffset(final String input, final int offset) { final StringReader result = new StringReader(input); result.setCursor(offset); return result; } @SuppressWarnings("unchecked") @Test public void testCreateAndExecuteCommand() throws Exception { subject.register(literal("foo").executes(command)); assertThat(subject.execute("foo", source), is(42)); verify(command).run(any(CommandContext.class)); } @SuppressWarnings("unchecked") @Test public void testCreateAndExecuteOffsetCommand() throws Exception { subject.register(literal("foo").executes(command)); assertThat(subject.execute(inputWithOffset("/foo", 1), source), is(42)); verify(command).run(any(CommandContext.class)); } @SuppressWarnings("unchecked") @Test public void testCreateAndMergeCommands() throws Exception { subject.register(literal("base").then(literal("foo").executes(command))); subject.register(literal("base").then(literal("bar").executes(command))); assertThat(subject.execute("base foo", source), is(42)); assertThat(subject.execute("base bar", source), is(42)); verify(command, times(2)).run(any(CommandContext.class)); } @Test public void testExecuteUnknownCommand() throws Exception { subject.register(literal("bar")); subject.register(literal("baz")); try { subject.execute("foo", source); fail(); } catch (final CommandSyntaxException ex) { assertThat(ex.getType(), is(CommandSyntaxException.BUILT_IN_EXCEPTIONS.dispatcherUnknownCommand())); assertThat(ex.getCursor(), is(0)); } } @Test public void testExecuteImpermissibleCommand() throws Exception { subject.register(literal("foo").requires(s -> false)); try { subject.execute("foo", source); fail(); } catch (final CommandSyntaxException ex) { assertThat(ex.getType(), is(CommandSyntaxException.BUILT_IN_EXCEPTIONS.dispatcherUnknownCommand())); assertThat(ex.getCursor(), is(0)); } } @Test public void testExecuteEmptyCommand() throws Exception { subject.register(literal("")); try { subject.execute("", source); fail(); } catch (final CommandSyntaxException ex) { assertThat(ex.getType(), is(CommandSyntaxException.BUILT_IN_EXCEPTIONS.dispatcherUnknownCommand())); assertThat(ex.getCursor(), is(0)); } } @Test public void testExecuteUnknownSubcommand() throws Exception { subject.register(literal("foo").executes(command)); try { subject.execute("foo bar", source); fail(); } catch (final CommandSyntaxException ex) { assertThat(ex.getType(), is(CommandSyntaxException.BUILT_IN_EXCEPTIONS.dispatcherUnknownArgument())); assertThat(ex.getCursor(), is(4)); } } @Test public void testExecuteIncorrectLiteral() throws Exception { subject.register(literal("foo").executes(command).then(literal("bar"))); try { subject.execute("foo baz", source); fail(); } catch (final CommandSyntaxException ex) { assertThat(ex.getType(), is(CommandSyntaxException.BUILT_IN_EXCEPTIONS.dispatcherUnknownArgument())); assertThat(ex.getCursor(), is(4)); } } @Test public void testExecuteAmbiguousIncorrectArgument() throws Exception { subject.register( literal("foo").executes(command) .then(literal("bar")) .then(literal("baz")) ); try { subject.execute("foo unknown", source); fail(); } catch (final CommandSyntaxException ex) { assertThat(ex.getType(), is(CommandSyntaxException.BUILT_IN_EXCEPTIONS.dispatcherUnknownArgument())); assertThat(ex.getCursor(), is(4)); } } @SuppressWarnings("unchecked") @Test public void testExecuteSubcommand() throws Exception { final Command<Object> subCommand = mock(Command.class); when(subCommand.run(any())).thenReturn(100); subject.register(literal("foo").then( literal("a") ).then( literal("=").executes(subCommand) ).then( literal("c") ).executes(command)); assertThat(subject.execute("foo =", source), is(100)); verify(subCommand).run(any(CommandContext.class)); } @SuppressWarnings("unchecked") @Test public void testParseIncompleteLiteral() throws Exception { subject.register(literal("foo").then(literal("bar").executes(command))); final ParseResults<Object> parse = subject.parse("foo ", source); assertThat(parse.getReader().getRemaining(), equalTo(" ")); assertThat(parse.getContext().getNodes().size(), is(1)); } @SuppressWarnings("unchecked") @Test public void testParseIncompleteArgument() throws Exception { subject.register(literal("foo").then(argument("bar", integer()).executes(command))); final ParseResults<Object> parse = subject.parse("foo ", source); assertThat(parse.getReader().getRemaining(), equalTo(" ")); assertThat(parse.getContext().getNodes().size(), is(1)); } @SuppressWarnings("unchecked") @Test public void testExecuteAmbiguiousParentSubcommand() throws Exception { final Command<Object> subCommand = mock(Command.class); when(subCommand.run(any())).thenReturn(100); subject.register( literal("test") .then( argument("incorrect", integer()) .executes(command) ) .then( argument("right", integer()) .then( argument("sub", integer()) .executes(subCommand) ) ) ); assertThat(subject.execute("test 1 2", source), is(100)); verify(subCommand).run(any(CommandContext.class)); verify(command, never()).run(any()); } @SuppressWarnings("unchecked") @Test public void testExecuteAmbiguiousParentSubcommandViaRedirect() throws Exception { final Command<Object> subCommand = mock(Command.class); when(subCommand.run(any())).thenReturn(100); final LiteralCommandNode<Object> real = subject.register( literal("test") .then( argument("incorrect", integer()) .executes(command) ) .then( argument("right", integer()) .then( argument("sub", integer()) .executes(subCommand) ) ) ); subject.register(literal("redirect").redirect(real)); assertThat(subject.execute("redirect 1 2", source), is(100)); verify(subCommand).run(any(CommandContext.class)); verify(command, never()).run(any()); } @SuppressWarnings("unchecked") @Test public void testExecuteRedirectedMultipleTimes() throws Exception { final LiteralCommandNode<Object> concreteNode = subject.register(literal("actual").executes(command)); final LiteralCommandNode<Object> redirectNode = subject.register(literal("redirected").redirect(subject.getRoot())); final String input = "redirected redirected actual"; final ParseResults<Object> parse = subject.parse(input, source); assertThat(parse.getContext().getRange().get(input), equalTo("redirected")); assertThat(parse.getContext().getNodes().size(), is(1)); assertThat(parse.getContext().getRootNode(), is(subject.getRoot())); assertThat(parse.getContext().getNodes().get(0).getRange(), equalTo(parse.getContext().getRange())); assertThat(parse.getContext().getNodes().get(0).getNode(), is(redirectNode)); final CommandContextBuilder<Object> child1 = parse.getContext().getChild(); assertThat(child1, is(notNullValue())); assertThat(child1.getRange().get(input), equalTo("redirected")); assertThat(child1.getNodes().size(), is(1)); assertThat(child1.getRootNode(), is(subject.getRoot())); assertThat(child1.getNodes().get(0).getRange(), equalTo(child1.getRange())); assertThat(child1.getNodes().get(0).getNode(), is(redirectNode)); final CommandContextBuilder<Object> child2 = child1.getChild(); assertThat(child2, is(notNullValue())); assertThat(child2.getRange().get(input), equalTo("actual")); assertThat(child2.getNodes().size(), is(1)); assertThat(child2.getRootNode(), is(subject.getRoot())); assertThat(child2.getNodes().get(0).getRange(), equalTo(child2.getRange())); assertThat(child2.getNodes().get(0).getNode(), is(concreteNode)); assertThat(subject.execute(parse), is(42)); verify(command).run(any(CommandContext.class)); } @SuppressWarnings("unchecked") @Test public void testExecuteRedirected() throws Exception { final RedirectModifier<Object> modifier = mock(RedirectModifier.class); final Object source1 = new Object(); final Object source2 = new Object(); when(modifier.apply(argThat(hasProperty("source", is(source))))).thenReturn(Lists.newArrayList(source1, source2)); final LiteralCommandNode<Object> concreteNode = subject.register(literal("actual").executes(command)); final LiteralCommandNode<Object> redirectNode = subject.register(literal("redirected").fork(subject.getRoot(), modifier)); final String input = "redirected actual"; final ParseResults<Object> parse = subject.parse(input, source); assertThat(parse.getContext().getRange().get(input), equalTo("redirected")); assertThat(parse.getContext().getNodes().size(), is(1)); assertThat(parse.getContext().getRootNode(), equalTo(subject.getRoot())); assertThat(parse.getContext().getNodes().get(0).getRange(), equalTo(parse.getContext().getRange())); assertThat(parse.getContext().getNodes().get(0).getNode(), is(redirectNode)); assertThat(parse.getContext().getSource(), is(source)); final CommandContextBuilder<Object> parent = parse.getContext().getChild(); assertThat(parent, is(notNullValue())); assertThat(parent.getRange().get(input), equalTo("actual")); assertThat(parent.getNodes().size(), is(1)); assertThat(parse.getContext().getRootNode(), equalTo(subject.getRoot())); assertThat(parent.getNodes().get(0).getRange(), equalTo(parent.getRange())); assertThat(parent.getNodes().get(0).getNode(), is(concreteNode)); assertThat(parent.getSource(), is(source)); assertThat(subject.execute(parse), is(2)); verify(command).run(argThat(hasProperty("source", is(source1)))); verify(command).run(argThat(hasProperty("source", is(source2)))); } @Test public void testExecuteOrphanedSubcommand() throws Exception { subject.register(literal("foo").then( argument("bar", integer()) ).executes(command)); try { subject.execute("foo 5", source); fail(); } catch (final CommandSyntaxException ex) { assertThat(ex.getType(), is(CommandSyntaxException.BUILT_IN_EXCEPTIONS.dispatcherUnknownCommand())); assertThat(ex.getCursor(), is(5)); } } @Test public void testExecute_invalidOther() throws Exception { final Command<Object> wrongCommand = mock(Command.class); subject.register(literal("w").executes(wrongCommand)); subject.register(literal("world").executes(command)); assertThat(subject.execute("world", source), is(42)); verify(wrongCommand, never()).run(any()); verify(command).run(any()); } @Test public void parse_noSpaceSeparator() throws Exception { subject.register(literal("foo").then(argument("bar", integer()).executes(command))); try { subject.execute("foo$", source); fail(); } catch (final CommandSyntaxException ex) { assertThat(ex.getType(), is(CommandSyntaxException.BUILT_IN_EXCEPTIONS.dispatcherUnknownCommand())); assertThat(ex.getCursor(), is(0)); } } @Test public void testExecuteInvalidSubcommand() throws Exception { subject.register(literal("foo").then( argument("bar", integer()) ).executes(command)); try { subject.execute("foo bar", source); fail(); } catch (final CommandSyntaxException ex) { assertThat(ex.getType(), is(CommandSyntaxException.BUILT_IN_EXCEPTIONS.readerExpectedInt())); assertThat(ex.getCursor(), is(4)); } } @Test public void testGetPath() { final LiteralCommandNode<Object> bar = literal("bar").build(); subject.register(literal("foo").then(bar)); assertThat(subject.getPath(bar), equalTo(Lists.newArrayList("foo", "bar"))); } @Test public void testFindNodeExists() { final LiteralCommandNode<Object> bar = literal("bar").build(); subject.register(literal("foo").then(bar)); assertThat(subject.findNode(Lists.newArrayList("foo", "bar")), is(bar)); } @Test public void testFindNodeDoesntExist() { assertThat(subject.findNode(Lists.newArrayList("foo", "bar")), is(nullValue())); } }
6,355
2,039
package org.nd4j.linalg.cpu.nativecpu.blas; import org.bytedeco.javacpp.DoublePointer; import org.bytedeco.javacpp.FloatPointer; import org.bytedeco.javacpp.IntPointer; import org.nd4j.linalg.api.blas.impl.SparseBaseLevel2; import org.nd4j.linalg.api.buffer.DataBuffer; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.factory.Nd4j; import org.nd4j.nativeblas.SparseNd4jBlas; import static org.bytedeco.javacpp.mkl_rt.*; /** * @author <NAME> */ public class SparseCpuLevel2 extends SparseBaseLevel2 { private SparseNd4jBlas sparseNd4jBlas = (SparseNd4jBlas) Nd4j.sparseFactory().blas(); // Mapping with Sparse Blas calls public void scoomv(char transA, int M, DataBuffer values, DataBuffer rowInd, DataBuffer colInd, int nnz, INDArray x, INDArray y){ mkl_cspblas_scoogemv( Character.toString(transA), (IntPointer) Nd4j.createBuffer(new int[]{M}).addressPointer(), (FloatPointer) values.addressPointer(), (IntPointer) rowInd.addressPointer(), (IntPointer) colInd.addressPointer(), (IntPointer) Nd4j.createBuffer(new int[]{nnz}).addressPointer(), (FloatPointer) x.data().addressPointer(), (FloatPointer)y.data().addressPointer()); } public void dcoomv(char transA, int M, DataBuffer values, DataBuffer rowInd, DataBuffer colInd, int nnz, INDArray x, INDArray y){ mkl_cspblas_dcoogemv( Character.toString(transA), (IntPointer) Nd4j.createBuffer(new int[]{M}).addressPointer(), (DoublePointer) values.addressPointer(), (IntPointer) rowInd.addressPointer(), (IntPointer) colInd.addressPointer(), (IntPointer) Nd4j.createBuffer(nnz).addressPointer(), (DoublePointer) x.data().addressPointer(), (DoublePointer)y.data().addressPointer()); } }
912
1,139
<gh_stars>1000+ package com.journaldev.csv; import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Scanner; public class CSVScannerExample { public static void main(String[] args) throws IOException { Scanner scanner = new Scanner(new File("employees.csv")); Scanner dataScanner = null; int index = 0; List<Employee> empList = new ArrayList<>(); while (scanner.hasNextLine()) { dataScanner = new Scanner(scanner.nextLine()); dataScanner.useDelimiter(","); Employee emp = new Employee(); while (dataScanner.hasNext()) { String data = dataScanner.next(); if (index == 0) emp.setId(Integer.parseInt(data)); else if (index == 1) emp.setName(data); else if (index == 2) emp.setRole(data); else if (index == 3) emp.setSalary(data); else System.out.println("invalid data::" + data); index++; } index = 0; empList.add(emp); } scanner.close(); System.out.println(empList); } }
425
344
/*--------------------------------------------------------------------------------------------- * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See License.txt in the project root for license information. *--------------------------------------------------------------------------------------------*/ // Do not edit this file. It is machine generated. { "breakpointWidgetAriaLabel": "Geben Sie die Haltepunktbedingung für die Zeile {0} ein. Das Programm verwendet diesen Haltepunkt nur, wenn diese Bedingung erfüllt ist. Drücken Sie zum Akzeptieren die EINGABETASTE oder ESC, um den Vorgang abzubrechen.", "breakpointWidgetPlaceholder": "Der Haltepunkt für die Zeile {0} wird nur verwendet, wenn diese Bedingung erfüllt ist. Drücken Sie zum Akzeptieren die EINGABETASTE oder ESC, um den Vorgang abzubrechen." }
245
337
<reponame>sethvargo/vaex import tempfile import os import numpy as np import vaex df = None def setup_df(N, M): global df x = [np.arange(N, dtype=np.float64) for _ in range(M)] df = vaex.from_dict({ f'c{i}': x[i] for i in range(M) }) def time_export_plain(N, M): with tempfile.TemporaryDirectory() as tmpdir: df.export_hdf5(os.path.join(tmpdir, 'bench.hdf5')) time_export_plain.setup = setup_df time_export_plain.params = [[1024**2, 1024**2*16], [1, 4, 16]] time_export_plain.param_names = ['N', 'M'] def time_export_correlated(N, M): names = df.get_column_names() new_names = [f't{i}' for i in range(M)] for i in range(M): df[f't{i}'] = sum(df[c] for c in names) dfc = df[new_names] with tempfile.TemporaryDirectory() as tmpdir: dfc.export_hdf5(os.path.join(tmpdir, 'bench.hdf5')) time_export_correlated.setup = setup_df time_export_correlated.params = [[1024**2, 1024**2*16], [1, 4, 16]] time_export_correlated.param_names = ['N', 'M']
456
1,244
<gh_stars>1000+ #include "libc.h" void sethostent(int x) { } void *gethostent() { return 0; } void endhostent(void) { } weak_alias(sethostent, setnetent); weak_alias(gethostent, getnetent); weak_alias(endhostent, endnetent); weak_alias(sethostent, setservent); weak_alias(gethostent, getservent); weak_alias(endhostent, endservent); weak_alias(sethostent, setprotoent); weak_alias(gethostent, getprotoent); weak_alias(endhostent, endprotoent);
184
2,255
import threading class Payload(object): PAYLOAD_ID = 0 PAYLOAD_ID_LOCK = threading.Lock() def __init__(self, data=""): self.data = data with Payload.PAYLOAD_ID_LOCK: self.id = Payload.PAYLOAD_ID Payload.PAYLOAD_ID += 1
131
9,657
import torch import torch.nn as nn from base.loss_transfer import TransferLoss import torch.nn.functional as F class AdaRNN(nn.Module): """ model_type: 'Boosting', 'AdaRNN' """ def __init__(self, use_bottleneck=False, bottleneck_width=256, n_input=128, n_hiddens=[64, 64], n_output=6, dropout=0.0, len_seq=9, model_type='AdaRNN', trans_loss='mmd'): super(AdaRNN, self).__init__() self.use_bottleneck = use_bottleneck self.n_input = n_input self.num_layers = len(n_hiddens) self.hiddens = n_hiddens self.n_output = n_output self.model_type = model_type self.trans_loss = trans_loss self.len_seq = len_seq in_size = self.n_input features = nn.ModuleList() for hidden in n_hiddens: rnn = nn.GRU( input_size=in_size, num_layers=1, hidden_size=hidden, batch_first=True, dropout=dropout ) features.append(rnn) in_size = hidden self.features = nn.Sequential(*features) if use_bottleneck == True: # finance self.bottleneck = nn.Sequential( nn.Linear(n_hiddens[-1], bottleneck_width), nn.Linear(bottleneck_width, bottleneck_width), nn.BatchNorm1d(bottleneck_width), nn.ReLU(), nn.Dropout(), ) self.bottleneck[0].weight.data.normal_(0, 0.005) self.bottleneck[0].bias.data.fill_(0.1) self.bottleneck[1].weight.data.normal_(0, 0.005) self.bottleneck[1].bias.data.fill_(0.1) self.fc = nn.Linear(bottleneck_width, n_output) torch.nn.init.xavier_normal_(self.fc.weight) else: self.fc_out = nn.Linear(n_hiddens[-1], self.n_output) if self.model_type == 'AdaRNN': gate = nn.ModuleList() for i in range(len(n_hiddens)): gate_weight = nn.Linear( len_seq * self.hiddens[i]*2, len_seq) gate.append(gate_weight) self.gate = gate bnlst = nn.ModuleList() for i in range(len(n_hiddens)): bnlst.append(nn.BatchNorm1d(len_seq)) self.bn_lst = bnlst self.softmax = torch.nn.Softmax(dim=0) self.init_layers() def init_layers(self): for i in range(len(self.hiddens)): self.gate[i].weight.data.normal_(0, 0.05) self.gate[i].bias.data.fill_(0.0) def forward_pre_train(self, x, len_win=0): out = self.gru_features(x) fea = out[0] if self.use_bottleneck == True: fea_bottleneck = self.bottleneck(fea[:, -1, :]) fc_out = self.fc(fea_bottleneck).squeeze() else: fc_out = self.fc_out(fea[:, -1, :]).squeeze() out_list_all, out_weight_list = out[1], out[2] out_list_s, out_list_t = self.get_features(out_list_all) loss_transfer = torch.zeros((1,)).cuda() for i in range(len(out_list_s)): criterion_transder = TransferLoss( loss_type=self.trans_loss, input_dim=out_list_s[i].shape[2]) h_start = 0 for j in range(h_start, self.len_seq, 1): i_start = j - len_win if j - len_win >= 0 else 0 i_end = j + len_win if j + len_win < self.len_seq else self.len_seq - 1 for k in range(i_start, i_end + 1): weight = out_weight_list[i][j] if self.model_type == 'AdaRNN' else 1 / ( self.len_seq - h_start) * (2 * len_win + 1) loss_transfer = loss_transfer + weight * criterion_transder.compute( out_list_s[i][:, j, :], out_list_t[i][:, k, :]) return fc_out, loss_transfer, out_weight_list def gru_features(self, x, predict=False): x_input = x out = None out_lis = [] out_weight_list = [] if ( self.model_type == 'AdaRNN') else None for i in range(self.num_layers): out, _ = self.features[i](x_input.float()) x_input = out out_lis.append(out) if self.model_type == 'AdaRNN' and predict == False: out_gate = self.process_gate_weight(x_input, i) out_weight_list.append(out_gate) return out, out_lis, out_weight_list def process_gate_weight(self, out, index): x_s = out[0: int(out.shape[0]//2)] x_t = out[out.shape[0]//2: out.shape[0]] x_all = torch.cat((x_s, x_t), 2) x_all = x_all.view(x_all.shape[0], -1) weight = torch.sigmoid(self.bn_lst[index]( self.gate[index](x_all.float()))) weight = torch.mean(weight, dim=0) res = self.softmax(weight).squeeze() return res def get_features(self, output_list): fea_list_src, fea_list_tar = [], [] for fea in output_list: fea_list_src.append(fea[0: fea.size(0) // 2]) fea_list_tar.append(fea[fea.size(0) // 2:]) return fea_list_src, fea_list_tar # For Boosting-based def forward_Boosting(self, x, weight_mat=None): out = self.gru_features(x) fea = out[0] if self.use_bottleneck: fea_bottleneck = self.bottleneck(fea[:, -1, :]) fc_out = self.fc(fea_bottleneck).squeeze() else: fc_out = self.fc_out(fea[:, -1, :]).squeeze() out_list_all = out[1] out_list_s, out_list_t = self.get_features(out_list_all) loss_transfer = torch.zeros((1,)).cuda() if weight_mat is None: weight = (1.0 / self.len_seq * torch.ones(self.num_layers, self.len_seq)).cuda() else: weight = weight_mat dist_mat = torch.zeros(self.num_layers, self.len_seq).cuda() for i in range(len(out_list_s)): criterion_transder = TransferLoss( loss_type=self.trans_loss, input_dim=out_list_s[i].shape[2]) for j in range(self.len_seq): loss_trans = criterion_transder.compute( out_list_s[i][:, j, :], out_list_t[i][:, j, :]) loss_transfer = loss_transfer + weight[i, j] * loss_trans dist_mat[i, j] = loss_trans return fc_out, loss_transfer, dist_mat, weight # For Boosting-based def update_weight_Boosting(self, weight_mat, dist_old, dist_new): epsilon = 1e-12 dist_old = dist_old.detach() dist_new = dist_new.detach() ind = dist_new > dist_old + epsilon weight_mat[ind] = weight_mat[ind] * \ (1 + torch.sigmoid(dist_new[ind] - dist_old[ind])) weight_norm = torch.norm(weight_mat, dim=1, p=1) weight_mat = weight_mat / weight_norm.t().unsqueeze(1).repeat(1, self.len_seq) return weight_mat def predict(self, x): out = self.gru_features(x, predict=True) fea = out[0] if self.use_bottleneck == True: fea_bottleneck = self.bottleneck(fea[:, -1, :]) fc_out = self.fc(fea_bottleneck).squeeze() else: fc_out = self.fc_out(fea[:, -1, :]).squeeze() return fc_out
3,899
317
#include "smack.h" #include <assert.h> #include <math.h> // @expect verified int main(void) { long double NaN = 0.0l / 0.0l; long double Inf = 1.0l / 0.0l; long double negInf = -1.0l / 0.0l; long double val = __VERIFIER_nondet_long_double(); if (!__isnanl(val) && !__isinfl(val) && !__iszerol(val)) { if (val > 0) { assert(fabsl(val) == val); } else { assert(fabsl(val) == -val); } } assert(fabsl(0.0l) == 0.0l); assert(fabsl(-0.0l) == 0.0l); int isNeg = __signbitl(fabsl(-0.0l)); assert(!isNeg); assert(fabsl(Inf) == Inf); assert(fabsl(negInf) == Inf); assert(__isnanl(fabsl(NaN))); return 0; }
309
416
package org.simpleflatmapper.map.getter; import org.simpleflatmapper.converter.Context; import org.simpleflatmapper.reflect.Getter; import org.simpleflatmapper.reflect.primitive.BooleanGetter; import org.simpleflatmapper.reflect.primitive.ByteGetter; import org.simpleflatmapper.reflect.primitive.CharacterGetter; import org.simpleflatmapper.reflect.primitive.DoubleGetter; import org.simpleflatmapper.reflect.primitive.FloatGetter; import org.simpleflatmapper.reflect.primitive.IntGetter; import org.simpleflatmapper.reflect.primitive.LongGetter; import org.simpleflatmapper.reflect.primitive.ShortGetter; public final class ContextualGetterAdapter<S, P> implements ContextualGetter<S, P> { private final Getter<? super S, ? extends P> getter; public ContextualGetterAdapter(Getter<? super S, ? extends P> getter) { this.getter = getter; } @Override public P get(S s, Context context) throws Exception { return getter.get(s); } public static <S, P> ContextualGetter<S, P> of(Getter<? super S, ? extends P> getter) { if (getter == null) return null; if (getter instanceof BooleanGetter) { return (ContextualGetter<S, P>) new FieldMapperBooleanGetterAdapter<S>((Getter<? super S, Boolean>) getter); } if (getter instanceof ByteGetter) { return (ContextualGetter<S, P>) new FieldMapperByteGetterAdapter<S>((Getter<? super S, Byte>) getter); } if (getter instanceof CharacterGetter) { return (ContextualGetter<S, P>) new FieldMapperCharacterGetterAdapter<S>((Getter<? super S, Character>) getter); } if (getter instanceof ShortGetter) { return (ContextualGetter<S, P>) new FieldMapperShortGetterAdapter<S>((Getter<? super S, Short>) getter); } if (getter instanceof IntGetter) { return (ContextualGetter<S, P>) new FieldMapperIntGetterAdapter<S>((Getter<? super S, Integer>) getter); } if (getter instanceof LongGetter) { return (ContextualGetter<S, P>) new FieldMapperLongGetterAdapter<S>((Getter<? super S, Long>) getter); } if (getter instanceof FloatGetter) { return (ContextualGetter<S, P>) new FieldMapperFloatGetterAdapter<S>((Getter<? super S, Float>) getter); } if (getter instanceof DoubleGetter) { return (ContextualGetter<S, P>) new FieldMapperDoubleGetterAdapter<S>((Getter<? super S, Double>) getter); } return new ContextualGetterAdapter<S, P>(getter); } private static class FieldMapperBooleanGetterAdapter<S> implements ContextualGetter<S, Boolean>, BooleanContextualGetter<S> { private final Getter<? super S, Boolean> getter; private final BooleanGetter<? super S> pGetter; public FieldMapperBooleanGetterAdapter(Getter<? super S, Boolean> getter) { this.getter = getter; this.pGetter = (BooleanGetter<? super S>) getter; } @Override public Boolean get(S s, Context context) throws Exception { return getter.get(s); } @Override public boolean getBoolean(S s, Context mappingContext) throws Exception { return pGetter.getBoolean(s); } } private static class FieldMapperByteGetterAdapter<S> implements ContextualGetter<S, Byte>, ByteContextualGetter<S> { private final Getter<? super S, Byte> getter; private final ByteGetter<? super S> pGetter; public FieldMapperByteGetterAdapter(Getter<? super S, Byte> getter) { this.getter = getter; this.pGetter = (ByteGetter<? super S>) getter; } @Override public Byte get(S s, Context context) throws Exception { return getter.get(s); } @Override public byte getByte(S s, Context mappingContext) throws Exception { return pGetter.getByte(s); } } private static class FieldMapperCharacterGetterAdapter<S> implements ContextualGetter<S, Character>, CharacterContextualGetter<S> { private final Getter<? super S, Character> getter; private final CharacterGetter<? super S> pGetter; public FieldMapperCharacterGetterAdapter(Getter<? super S, Character> getter) { this.getter = getter; this.pGetter = (CharacterGetter<? super S>) getter; } @Override public Character get(S s, Context context) throws Exception { return getter.get(s); } @Override public char getCharacter(S s, Context mappingContext) throws Exception { return pGetter.getCharacter(s); } } private static class FieldMapperShortGetterAdapter<S> implements ContextualGetter<S, Short>, ShortContextualGetter<S> { private final Getter<? super S, Short> getter; private final ShortGetter<? super S> pGetter; public FieldMapperShortGetterAdapter(Getter<? super S, Short> getter) { this.getter = getter; this.pGetter = (ShortGetter<? super S>) getter; } @Override public Short get(S s, Context context) throws Exception { return getter.get(s); } @Override public short getShort(S s, Context mappingContext) throws Exception { return pGetter.getShort(s); } } private static class FieldMapperIntGetterAdapter<S> implements ContextualGetter<S, Integer>, IntContextualGetter<S> { private final Getter<? super S, Integer> getter; private final IntGetter<? super S> pGetter; public FieldMapperIntGetterAdapter(Getter<? super S, Integer> getter) { this.getter = getter; this.pGetter = (IntGetter<? super S>) getter; } @Override public Integer get(S s, Context context) throws Exception { return getter.get(s); } @Override public int getInt(S s, Context mappingContext) throws Exception { return pGetter.getInt(s); } } private static class FieldMapperLongGetterAdapter<S> implements ContextualGetter<S, Long>, LongContextualGetter<S> { private final Getter<? super S, Long> getter; private final LongGetter<? super S> pGetter; public FieldMapperLongGetterAdapter(Getter<? super S, Long> getter) { this.getter = getter; this.pGetter = (LongGetter<? super S>) getter; } @Override public Long get(S s, Context context) throws Exception { return getter.get(s); } @Override public long getLong(S s, Context mappingContext) throws Exception { return pGetter.getLong(s); } } private static class FieldMapperFloatGetterAdapter<S> implements ContextualGetter<S, Float>, FloatContextualGetter<S> { private final Getter<? super S, Float> getter; private final FloatGetter<? super S> pGetter; public FieldMapperFloatGetterAdapter(Getter<? super S, Float> getter) { this.getter = getter; this.pGetter = (FloatGetter<? super S>) getter; } @Override public Float get(S s, Context context) throws Exception { return getter.get(s); } @Override public float getFloat(S s, Context mappingContext) throws Exception { return pGetter.getFloat(s); } } private static class FieldMapperDoubleGetterAdapter<S> implements ContextualGetter<S, Double>, DoubleContextualGetter<S> { private final Getter<? super S, Double> getter; private final DoubleGetter<? super S> pGetter; public FieldMapperDoubleGetterAdapter(Getter<? super S, Double> getter) { this.getter = getter; this.pGetter = (DoubleGetter<? super S>) getter; } @Override public Double get(S s, Context context) throws Exception { return getter.get(s); } @Override public double getDouble(S s, Context mappingContext) throws Exception { return pGetter.getDouble(s); } } public static <S> BooleanContextualGetter<? super S> of(final BooleanGetter<? super S> getter) { return new BooleanContextualGetterAdapter<S>(getter); } private static class BooleanContextualGetterAdapter<S> implements BooleanContextualGetter<S> { private final BooleanGetter<? super S> getter; public BooleanContextualGetterAdapter(BooleanGetter<? super S> getter) { this.getter = getter; } @Override public boolean getBoolean(S s, Context mappingContext) throws Exception { return getter.getBoolean(s); } } public static <S> ByteContextualGetter<? super S> of(final ByteGetter<? super S> getter) { return new ByteContextualGetterAdapter<S>(getter); } private static class ByteContextualGetterAdapter<S> implements ByteContextualGetter<S> { private final ByteGetter<? super S> getter; public ByteContextualGetterAdapter(ByteGetter<? super S> getter) { this.getter = getter; } @Override public byte getByte(S s, Context mappingContext) throws Exception { return getter.getByte(s); } } public static <S> CharacterContextualGetter<? super S> of(final CharacterGetter<? super S> getter) { return new CharacterContextualGetterAdapter<S>(getter); } private static class CharacterContextualGetterAdapter<S> implements CharacterContextualGetter<S> { private final CharacterGetter<? super S> getter; public CharacterContextualGetterAdapter(CharacterGetter<? super S> getter) { this.getter = getter; } @Override public char getCharacter(S s, Context mappingContext) throws Exception { return getter.getCharacter(s); } } public static <S> ShortContextualGetter<? super S> of(final ShortGetter<? super S> getter) { return new ShortContextualGetterAdapter<S>(getter); } private static class ShortContextualGetterAdapter<S> implements ShortContextualGetter<S> { private final ShortGetter<? super S> getter; public ShortContextualGetterAdapter(ShortGetter<? super S> getter) { this.getter = getter; } @Override public short getShort(S s, Context mappingContext) throws Exception { return getter.getShort(s); } } public static <S> IntContextualGetter<? super S> of(final IntGetter<? super S> getter) { return new IntContextualGetterAdapter<S>(getter); } private static class IntContextualGetterAdapter<S> implements IntContextualGetter<S> { private final IntGetter<? super S> getter; public IntContextualGetterAdapter(IntGetter<? super S> getter) { this.getter = getter; } @Override public int getInt(S s, Context mappingContext) throws Exception { return getter.getInt(s); } } public static <S> LongContextualGetter<? super S> of(final LongGetter<? super S> getter) { return new LongContextualGetterAdapter<S>(getter); } private static class LongContextualGetterAdapter<S> implements LongContextualGetter<S> { private final LongGetter<? super S> getter; public LongContextualGetterAdapter(LongGetter<? super S> getter) { this.getter = getter; } @Override public long getLong(S s, Context mappingContext) throws Exception { return getter.getLong(s); } } public static <S> FloatContextualGetter<? super S> of(final FloatGetter<? super S> getter) { return new FloatContextualGetterAdapter<S>(getter); } private static class FloatContextualGetterAdapter<S> implements FloatContextualGetter<S> { private final FloatGetter<? super S> getter; public FloatContextualGetterAdapter(FloatGetter<? super S> getter) { this.getter = getter; } @Override public float getFloat(S s, Context mappingContext) throws Exception { return getter.getFloat(s); } } public static <S> DoubleContextualGetter<? super S> of(final DoubleGetter<? super S> getter) { return new DoubleContextualGetterAdapter<S>(getter); } private static class DoubleContextualGetterAdapter<S> implements DoubleContextualGetter<S> { private final DoubleGetter<? super S> getter; public DoubleContextualGetterAdapter(DoubleGetter<? super S> getter) { this.getter = getter; } @Override public double getDouble(S s, Context mappingContext) throws Exception { return getter.getDouble(s); } } }
5,223
370
<reponame>alexrenz/bosen-2 // Author: <NAME> (<EMAIL>) // Date: 2014.10.04 #include "mlr_engine.hpp" #include "mlr_sgd_solver.hpp" #include "lr_sgd_solver.hpp" #include "abstract_mlr_sgd_solver.hpp" #include "common.hpp" #include <string> #include <cmath> #include <vector> #include <cstdio> #include <glog/logging.h> #include <petuum_ps_common/include/petuum_ps.hpp> #include <ml/include/ml.hpp> #include <cstdint> #include <fstream> #include <io/general_fstream.hpp> namespace mlr { namespace { // Save MLRSGDSolver::w_cache_ to disk. Could be time consuming if w is large. void SaveWeights(AbstractMLRSGDSolver* mlr_solver) { // Save weights. CHECK(!FLAGS_output_file_prefix.empty()); std::string output_filename = FLAGS_output_file_prefix + ".weight"; mlr_solver->SaveWeights(output_filename); } } // anonymous namespace MLREngine::MLREngine() : thread_counter_(0) { perform_test_ = FLAGS_perform_test; num_train_eval_ = FLAGS_num_train_eval; process_barrier_.reset(new boost::barrier(FLAGS_num_app_threads)); // Apepnd client_id if the train_data isn't global. std::string meta_file = FLAGS_train_file + (FLAGS_global_data ? "" : "." + std::to_string(FLAGS_client_id)) + ".meta"; petuum::ml::MetafileReader mreader(meta_file); num_train_data_ = mreader.get_int32("num_train_this_partition"); if (FLAGS_num_train_data != 0) { num_train_data_ = std::min(num_train_data_, FLAGS_num_train_data); } feature_dim_ = mreader.get_int32("feature_dim"); num_labels_ = mreader.get_int32("num_labels"); read_format_ = mreader.get_string("format"); feature_one_based_ = mreader.get_bool("feature_one_based"); label_one_based_ = mreader.get_bool("label_one_based"); snappy_compressed_ = mreader.get_bool("snappy_compressed"); // Read test meta file. if (perform_test_) { std::string test_meta_file = FLAGS_test_file + ".meta"; petuum::ml::MetafileReader mreader_test(test_meta_file); num_test_data_ = mreader_test.get_int32("num_test"); CHECK_EQ(feature_dim_, mreader_test.get_int32("feature_dim")); CHECK_EQ(num_labels_, mreader_test.get_int32("num_labels")); CHECK_EQ(read_format_, mreader_test.get_string("format")); CHECK_EQ(feature_one_based_, mreader_test.get_bool("feature_one_based")); CHECK_EQ(label_one_based_, mreader_test.get_bool("label_one_based")); } } MLREngine::~MLREngine() { for (auto p : train_features_) { delete p; } for (auto p : test_features_) { delete p; } } void MLREngine::ReadData() { std::string train_file = FLAGS_train_file + (FLAGS_global_data ? "" : "." + std::to_string(FLAGS_client_id)); LOG(INFO) << "Reading train file: " << train_file; if (read_format_ == "bin") { petuum::ml::ReadDataLabelBinary(train_file, feature_dim_, num_train_data_, &train_features_, &train_labels_); if (perform_test_) { LOG(INFO) << "Reading test file: " << FLAGS_test_file; petuum::ml::ReadDataLabelBinary(FLAGS_test_file, feature_dim_, num_test_data_, &test_features_, &test_labels_); } } else if (read_format_ == "libsvm") { petuum::ml::ReadDataLabelLibSVM(train_file, feature_dim_, num_train_data_, &train_features_, &train_labels_, feature_one_based_, label_one_based_, snappy_compressed_); if (perform_test_) { LOG(INFO) << "Reading test file: " << FLAGS_test_file; petuum::ml::ReadDataLabelLibSVM(FLAGS_test_file, feature_dim_, num_test_data_, &test_features_, &test_labels_, feature_one_based_, label_one_based_, snappy_compressed_); } } } void MLREngine::InitWeights(const std::string& weight_file) { petuum::HighResolutionTimer weight_init_timer; petuum::io::ifstream weight_stream(weight_file); CHECK(weight_stream) << "Failed to open " << weight_file; LOG(INFO) << "Loading weights from " << weight_file; // Check that num_labels and feature_dim match. std::string field_name; int32_t num_labels_weightfile; weight_stream >> field_name >> num_labels_weightfile; CHECK_EQ("num_labels:", field_name); CHECK_EQ(num_labels_, num_labels_weightfile); int32_t feature_dim_weightfile; weight_stream >> field_name >> feature_dim_weightfile; CHECK_EQ("feature_dim:", field_name); CHECK_EQ(feature_dim_, feature_dim_weightfile); // Now read the weights and put them in w_table. for (int i = 0; i < num_labels_; ++i) { petuum::UpdateBatch<float> w_update_batch(feature_dim_); for (int d = 0; d < feature_dim_; ++d) { float weight_val; weight_stream >> weight_val; w_update_batch.UpdateSet(d, d, weight_val); } w_table_.BatchInc(i, w_update_batch); } weight_stream.close(); LOG(INFO) << "Loaded and initialized weight in " << weight_init_timer.elapsed(); } void MLREngine::Start() { petuum::PSTableGroup::RegisterThread(); // Initialize local thread data structures. int thread_id = thread_counter_++; int client_id = FLAGS_client_id; int num_clients = FLAGS_num_clients; int num_threads = FLAGS_num_app_threads; int num_epochs = FLAGS_num_epochs; int num_batches_per_epoch = FLAGS_num_batches_per_epoch; int num_secs_per_checkpoint = FLAGS_num_secs_per_checkpoint; float init_lr = FLAGS_init_lr; int num_batches_per_eval = FLAGS_num_batches_per_eval; bool global_data = FLAGS_global_data; int num_test_eval = FLAGS_num_test_eval; if (num_test_eval == 0) { num_test_eval = num_test_data_; } if (thread_id == 0) { loss_table_ = petuum::PSTableGroup::GetTableOrDie<float>(FLAGS_loss_table_id); w_table_ = petuum::PSTableGroup::GetTableOrDie<float>(FLAGS_w_table_id); } // Barrier to ensure w_table_ and loss_table_ is initialized. process_barrier_->wait(); if (FLAGS_use_weight_file) { if (client_id == 0 && thread_id == 0) { InitWeights(FLAGS_weight_file); } // Barrier to ensure weights are initialized from the existing weight. petuum::PSTableGroup::GlobalBarrier(); } // Create MLR sgd solver. std::unique_ptr<AbstractMLRSGDSolver> mlr_solver; if (num_labels_ == 2) { // Create LR sgd solver. LRSGDSolverConfig solver_config; solver_config.feature_dim = feature_dim_; solver_config.sparse_data = read_format_ == "libsvm"; solver_config.w_table = w_table_; solver_config.lambda = FLAGS_lambda; solver_config.w_table_num_cols = FLAGS_w_table_num_cols; mlr_solver.reset(new LRSGDSolver(solver_config)); } else { MLRSGDSolverConfig solver_config; solver_config.feature_dim = feature_dim_; solver_config.num_labels = num_labels_; solver_config.sparse_data = (read_format_ == "libsvm"); solver_config.w_table = w_table_; solver_config.w_table_num_cols = FLAGS_w_table_num_cols; CHECK_EQ(0, FLAGS_lambda) << "regularization isn't supported in MLR yet."; mlr_solver.reset(new MLRSGDSolver(solver_config)); } mlr_solver->RefreshParams(); petuum::HighResolutionTimer total_timer; petuum::ml::WorkloadManagerConfig workload_mgr_config; workload_mgr_config.thread_id = thread_id; workload_mgr_config.client_id = client_id; workload_mgr_config.num_clients = num_clients; workload_mgr_config.num_threads = num_threads; workload_mgr_config.num_batches_per_epoch = num_batches_per_epoch; workload_mgr_config.num_data = num_train_data_; workload_mgr_config.global_data = global_data; petuum::ml::WorkloadManager workload_mgr(workload_mgr_config); // For training error. petuum::ml::WorkloadManager workload_mgr_train_error(workload_mgr_config); LOG_IF(INFO, client_id == 0 && thread_id == 0) << "Batch size: " << workload_mgr.GetBatchSize(); petuum::ml::WorkloadManagerConfig test_workload_mgr_config; test_workload_mgr_config.thread_id = thread_id; test_workload_mgr_config.client_id = client_id; test_workload_mgr_config.num_clients = num_clients; test_workload_mgr_config.num_threads = num_threads; test_workload_mgr_config.num_batches_per_epoch = 1; // Need to set num_data to non-zero to avoid problem in WorkloadManager. test_workload_mgr_config.num_data = perform_test_ ? num_test_data_ : 10000; // test set is always globa (duplicated on all clients). test_workload_mgr_config.global_data = true; petuum::ml::WorkloadManager test_workload_mgr(test_workload_mgr_config); // It's reset after every check-pointing (saving to disk). petuum::HighResolutionTimer checkpoint_timer; float lr_decay_rate = FLAGS_lr_decay_rate; int32_t eval_counter = 0; int32_t batch_counter = 0; for (int epoch = 0; epoch < num_epochs; ++epoch) { float curr_lr = init_lr * pow(lr_decay_rate, epoch); workload_mgr.Restart(); while (!workload_mgr.IsEnd()) { std::vector<int> minibatch_idx(workload_mgr.GetBatchSize()); for (int i = 0; i < minibatch_idx.size(); ++i) { minibatch_idx[i] = workload_mgr.GetDataIdxAndAdvance(); } mlr_solver->MiniBatchSGD(train_features_, train_labels_, minibatch_idx, curr_lr); CHECK(workload_mgr.IsEndOfBatch()); petuum::PSTableGroup::Clock(); mlr_solver->RefreshParams(); ++batch_counter; if (batch_counter % num_batches_per_eval == 0) { petuum::HighResolutionTimer eval_timer; petuum::PSTableGroup::GlobalBarrier(); mlr_solver->RefreshParams(); ComputeTrainError(mlr_solver.get(), &workload_mgr_train_error, num_train_eval_, eval_counter); if (perform_test_) { ComputeTestError(mlr_solver.get(), &test_workload_mgr, num_test_eval, eval_counter); } if (client_id == 0 && thread_id == 0) { loss_table_.Inc(eval_counter, kColIdxLossTableEpoch, epoch + 1); loss_table_.Inc(eval_counter, kColIdxLossTableBatch, batch_counter); loss_table_.Inc(eval_counter, kColIdxLossTableTime, total_timer.elapsed()); if (eval_counter > 0) { // Print the last eval info to overcome staleness. LOG(INFO) << PrintOneEval(eval_counter - 1); if (checkpoint_timer.elapsed() > num_secs_per_checkpoint) { petuum::HighResolutionTimer save_disk_timer; LOG(INFO) << "SaveLoss now..."; SaveLoss(eval_counter - 1); SaveWeights(mlr_solver.get()); checkpoint_timer.restart(); LOG(INFO) << "Checkpointing finished in " << save_disk_timer.elapsed(); } } } ++eval_counter; } } CHECK_EQ(0, batch_counter % num_batches_per_epoch); } petuum::PSTableGroup::GlobalBarrier(); // Use all the train data in the last training error eval. ComputeTrainError(mlr_solver.get(), &workload_mgr_train_error, num_train_data_, eval_counter); if (perform_test_) { // Use the whole test set in the end. ComputeTestError(mlr_solver.get(), &test_workload_mgr, num_test_data_, eval_counter); } petuum::PSTableGroup::GlobalBarrier(); if (client_id == 0 && thread_id == 0) { loss_table_.Inc(eval_counter, kColIdxLossTableEpoch, num_epochs); loss_table_.Inc(eval_counter, kColIdxLossTableBatch, batch_counter); loss_table_.Inc(eval_counter, kColIdxLossTableTime, total_timer.elapsed()); LOG(INFO) << std::endl << PrintAllEval(eval_counter); LOG(INFO) << "Final eval: " << PrintOneEval(eval_counter); SaveLoss(eval_counter); SaveWeights(mlr_solver.get()); } petuum::PSTableGroup::DeregisterThread(); } void MLREngine::ComputeTrainError(AbstractMLRSGDSolver* mlr_solver, petuum::ml::WorkloadManager* workload_mgr, int32_t num_data_to_use, int32_t ith_eval) { float total_zero_one_loss = 0.; float total_entropy_loss = 0.; workload_mgr->Restart(); int num_total = 0; while (!workload_mgr->IsEnd() && num_total < num_data_to_use) { int32_t data_idx = workload_mgr->GetDataIdxAndAdvance(); std::vector<float> pred = mlr_solver->Predict(*(train_features_[data_idx])); total_zero_one_loss += mlr_solver->ZeroOneLoss(pred, train_labels_[data_idx]); total_entropy_loss += mlr_solver->CrossEntropyLoss(pred, train_labels_[data_idx]); ++num_total; } loss_table_.Inc(ith_eval, kColIdxLossTableZeroOneLoss, total_zero_one_loss); loss_table_.Inc(ith_eval, kColIdxLossTableEntropyLoss, total_entropy_loss); loss_table_.Inc(ith_eval, kColIdxLossTableNumEvalTrain, static_cast<float>(num_total)); } void MLREngine::ComputeTestError(AbstractMLRSGDSolver* mlr_solver, petuum::ml::WorkloadManager* test_workload_mgr, int32_t num_data_to_use, int32_t ith_eval) { test_workload_mgr->Restart(); int32_t num_error = 0; int32_t num_total = 0; int i = 0; while (!test_workload_mgr->IsEnd() && i < num_data_to_use) { int32_t data_idx = test_workload_mgr->GetDataIdxAndAdvance(); std::vector<float> pred = mlr_solver->Predict(*test_features_[data_idx]); num_error += mlr_solver->ZeroOneLoss(pred, test_labels_[data_idx]); ++num_total; ++i; } loss_table_.Inc(ith_eval, kColIdxLossTableTestZeroOneLoss, static_cast<float>(num_error)); loss_table_.Inc(ith_eval, kColIdxLossTableNumEvalTest, static_cast<float>(num_total)); } std::string MLREngine::PrintOneEval(int32_t ith_eval) { std::stringstream output; petuum::RowAccessor row_acc; loss_table_.Get(ith_eval, &row_acc); const auto& loss_row = row_acc.Get<petuum::DenseRow<float> >(); std::string test_info; if (perform_test_) { CHECK_LT(0, static_cast<int>(loss_row[kColIdxLossTableNumEvalTest])); std::string test_zero_one_str = std::to_string(loss_row[kColIdxLossTableTestZeroOneLoss] / loss_row[kColIdxLossTableNumEvalTest]); std::string num_test_str = std::to_string(static_cast<int>(loss_row[kColIdxLossTableNumEvalTest])); test_info += "test-0-1: " + test_zero_one_str + " num-test-used: " + num_test_str; } CHECK_LT(0, static_cast<int>(loss_row[kColIdxLossTableNumEvalTrain])); output << loss_row[kColIdxLossTableEpoch] << " " << loss_row[kColIdxLossTableBatch] << " " << "train-0-1: " << loss_row[kColIdxLossTableZeroOneLoss] / loss_row[kColIdxLossTableNumEvalTrain] << " " << "train-entropy: " << loss_row[kColIdxLossTableEntropyLoss] / loss_row[kColIdxLossTableNumEvalTrain] << " " << "num-train-used: " << loss_row[kColIdxLossTableNumEvalTrain] << " " << test_info << " " << "time: " << loss_row[kColIdxLossTableTime] << std::endl; return output.str(); } std::string MLREngine::PrintAllEval(int32_t up_to_ith_eval) { std::stringstream output; if (perform_test_) { output << "Epoch Batch Train-0-1 Train-Entropy Num-Train-Used Test-0-1 " << "Num-Test-Used Time" << std::endl; } else { output << "Epoch Batch Train-0-1 Train-Entropy Num-Train-Used " << "Time" << std::endl; } for (int i = 0; i <= up_to_ith_eval; ++i) { petuum::RowAccessor row_acc; loss_table_.Get(i, &row_acc); const auto& loss_row = row_acc.Get<petuum::DenseRow<float> >(); std::string test_info; if (perform_test_) { CHECK_LT(0, static_cast<int>(loss_row[kColIdxLossTableNumEvalTest])); std::string test_zero_one_str = std::to_string(loss_row[kColIdxLossTableTestZeroOneLoss] / loss_row[kColIdxLossTableNumEvalTest]); std::string num_test_str = std::to_string(static_cast<int>(loss_row[kColIdxLossTableNumEvalTest])); test_info += test_zero_one_str + " " + num_test_str; } CHECK_LT(0, static_cast<int>(loss_row[kColIdxLossTableNumEvalTrain])); output << loss_row[kColIdxLossTableEpoch] << " " << loss_row[kColIdxLossTableBatch] << " " << loss_row[kColIdxLossTableZeroOneLoss] / loss_row[kColIdxLossTableNumEvalTrain] << " " << loss_row[kColIdxLossTableEntropyLoss] / loss_row[kColIdxLossTableNumEvalTrain] << " " << loss_row[kColIdxLossTableNumEvalTrain] << " " << test_info << " " << loss_row[kColIdxLossTableTime] << std::endl; } return output.str(); } void MLREngine::SaveLoss(int32_t up_to_ith_eval) { CHECK(!FLAGS_output_file_prefix.empty()); std::string output_filename = FLAGS_output_file_prefix + ".loss"; petuum::HighResolutionTimer disk_output_timer; petuum::io::ofstream out_stream(output_filename); out_stream << GetExperimentInfo(); out_stream << PrintAllEval(up_to_ith_eval); out_stream.close(); LOG(INFO) << "Loss up to " << up_to_ith_eval << " (exclusive) is saved to " << output_filename << " in " << disk_output_timer.elapsed(); } std::string MLREngine::GetExperimentInfo() const { std::stringstream ss; ss << "Train set: " << FLAGS_train_file << std::endl << "feature_dim: " << feature_dim_ << std::endl << "num_labels: " << num_labels_ << std::endl << "num_train_data: " << num_train_data_ << std::endl << "num_test_data: " << num_test_data_ << std::endl << "num_epochs: " << FLAGS_num_epochs << std::endl << "num_batches_per_epoch: " << FLAGS_num_batches_per_epoch << std::endl << "init_lr: " << FLAGS_init_lr << std::endl << "lr_decay_rate: " << FLAGS_lr_decay_rate << std::endl << "num_batches_per_eval: " << FLAGS_num_batches_per_eval << std::endl << "use_weight_file: " << FLAGS_use_weight_file << std::endl << (FLAGS_use_weight_file ? FLAGS_weight_file + "\n" : "") << "num_secs_per_checkpoint: " << FLAGS_num_secs_per_checkpoint << std::endl << "staleness: " << FLAGS_staleness << std::endl << "num_clients: " << FLAGS_num_clients << std::endl << "num_app_threads: " << FLAGS_num_app_threads << std::endl << "num_comm_channels_per_client: " << FLAGS_num_comm_channels_per_client << std::endl; return ss.str(); } } // namespace mlr
7,600
729
<filename>jigsaw-service/jigsaw-service-facade/src/main/gen/org/jigsaw/payment/model/Enums.java // Generated by the protocol buffer compiler. DO NOT EDIT! // source: enums.proto package org.jigsaw.payment.model; public final class Enums { private Enums() {} public static void registerAllExtensions( com.google.protobuf.ExtensionRegistryLite registry) { } public static void registerAllExtensions( com.google.protobuf.ExtensionRegistry registry) { registerAllExtensions( (com.google.protobuf.ExtensionRegistryLite) registry); } public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; } private static com.google.protobuf.Descriptors.FileDescriptor descriptor; static { java.lang.String[] descriptorData = { "\n\013enums.proto*\242\016\n\nStatusCode\022\013\n\007SUCCESS\020" + "\000\022\013\n\007UNKNOWN\020\001\022\023\n\017BAD_DATA_FORMAT\020\002\022\025\n\021P" + "ERMISSION_DENIED\020\003\022\022\n\016INTERNAL_ERROR\020\004\022\021" + "\n\rDATA_REQUIRED\020\005\022\021\n\rLIMIT_REACHED\020\006\022\021\n\r" + "QUOTA_REACHED\020\007\022\020\n\014INVALID_AUTH\020\010\022\020\n\014AUT" + "H_EXPIRED\020\t\022\021\n\rDATA_CONFLICT\020\n\022\023\n\017ENML_V" + "ALIDATION\020\013\022\025\n\021SHARD_UNAVAILABLE\020\014\022\021\n\rLE" + "N_TOO_SHORT\020\r\022\020\n\014LEN_TOO_LONG\020\016\022\013\n\007TOO_F" + "EW\020\017\022\014\n\010TOO_MANY\020\020\022\031\n\025UNSUPPORTED_OPERAT" + "ION\020\021\022\016\n\nTAKEN_DOWN\020\022\022\026\n\022RATE_LIMIT_REAC" + "HED\020\023\022$\n BUSINESS_SECURITY_LOGIN_REQUIRE" + "D\020\024\022\030\n\024DEVICE_LIMIT_REACHED\020\025\022\036\n\032ERROR_C" + "OUPONS_FORMAT_INPUT\020e\022\025\n\021ERROR_COUPON_LO" + "CK\020f\022\034\n\030ERROR_COUPON_USE_EXPIRED\020g\022 \n\034ER" + "ROR_COUPON_USE_UNACTIVATED\020h\022\033\n\027ERROR_CO" + "UPON_USE_FROZEN\020i\022\031\n\025ERROR_COUPON_USE_US" + "ED\020j\022\033\n\027ERROR_COUPON_USE_LOCKED\020k\022\036\n\032ERR" + "OR_COUPON_USE_DESTROYED\020l\022\035\n\031ERROR_COUPO" + "N_TYPE_CANT_OP\020m\022)\n%ERROR_COUPON_TYPE_CA" + "NT_DIRECT_CONSUME\020n\022\036\n\032ERROR_COUPON_USE_" + "UNSTARTED\020o\022\033\n\027ERROR_ORDER_COUPON_NULL\020p" + "\022#\n\037ERROR_ORDER_COUPON_NOT_ALL_LOCK\020q\022\034\n" + "\030ERROR_COUPON_SEND_UNHAND\020r\022 \n\034ERROR_COU" + "PON_SEND_ACTIVATING\020s\022\037\n\033ERROR_COUPON_SE" + "ND_DESTROYED\020t\022\034\n\030ERROR_COUPON_SEND_LOCK" + "ED\020u\022\037\n\033ERROR_COUPON_SEND_NO_ENOUGH\020v\022\"\n" + "\036ERROR_COUPON_SEND_TIME_EXPIRED\020w\022\"\n\036ERR" + "OR_COUPON_BATCH_UNAVAILABLE\020x\022$\n ERROR_O" + "RDER_COUPON_NOT_ALL_VALID\020y\022\'\n#ERROR_COU" + "PON_BATCH_SEND_UNAVAILABLE\020z\022\033\n\027ERROR_CO" + "UPON_BIND_LIMIT\020{\022\033\n\027ERROR_COUPON_BATCH_" + "NULL\020|\022\025\n\021ERROR_COUPON_NULL\020}\022(\n$ERROR_C" + "OUPON_BIND_NOT_BELONG_PARTNER\020~\022\035\n\031ERROR" + "_COUPON_BIND_IS_SENT\020\177\022(\n#ERROR_COUPON_B" + "IND_STATUS_NOT_UNUSED\020\200\001\022\037\n\032ERROR_COUPON" + "_CONSUME_ERROR\020\201\001\022!\n\034ERROR_COUPON_BIND_O" + "VER_LIMIT\020\202\001\022\033\n\026ERROR_PASSWORD_NOT_SET\020\311" + "\001\022\035\n\030ERROR_PASSWORD_SAVE_FAIL\0<PASSWORD>" + "R_PASSWORD_INVALID\020\<PASSWORD>\025ERROR_PASSWORD_" + "<PASSWORD>\<PASSWORD>ERROR_IDCARD_INVALID\020\315\001\022\031\n\024" + "ERROR_MOBILE_NOT_SET\020\316\001\022 \n\033ERROR_NAME_ID" + "CARD_NOT_MATCH\020\317\001\022\031\n\024ERROR_ACCOUNT_FROZE" + "N\020\320\001\022\033\n\026ERROR_ACCOUNT_INACTIVE\020\321\001\022\031\n\024ERR" + "OR_PASSWORD_WRONG\020\322\001\022\036\n\031ERROR_ACCOUNT_UN" + "AVAILABLE\020\255\002\022%\n ERROR_ACCOUNT_BALANCE_NO" + "T_ENOUGH\020\256\002\022\035\n\030ERROR_ORDER_ALREADY_PAID\020" + "\221\003*\302\001\n\014AccountTitle\022\030\n\024UNKNOWN_ACCOUNTTI" + "TLE\020\000\022\027\n\020PERSONAL_DEPOSIT\020\371\272\206\001\022\027\n\020BUSINE" + "SS_DEPOSIT\020\372\272\206\001\022\031\n\022PERSONAL_BANK_CARD\020\251\225" + "\267\001\022\031\n\022BUSINESS_BANK_CARD\020\252\225\267\001\022\027\n\020VIRTUAL" + "_CURRENCY\020\253\225\267\001\022\027\n\020PLATFORM_ACCOUNT\020\254\225\267\001*" + "U\n\007FeeUnit\022\024\n\020UNKNOWN_FEE_UNIT\020\000\022\007\n\003CNY\020" + "\001\022\007\n\003USD\020\002\022\007\n\003EUR\020\003\022\007\n\003HKD\020\004\022\007\n\003TWD\020\005\022\007\n" + "\003JPY\020\006*\210\001\n\nEntityType\022\027\n\023UNKNOWN_ENTITY_" + "TYPE\020\000\022\010\n\004USER\020\001\022\014\n\010MERCHANT\020\002\022\014\n\010CLEARI" + "NG\020\003\022\023\n\017PAYMENT_SERVICE\020\004\022\023\n\017PAYMENT_CHA" + "NNEL\020\005\022\021\n\rCERTIFICATION\020\006B\034\n\030org.jigsaw." + "payment.modelP\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { public com.google.protobuf.ExtensionRegistry assignDescriptors( com.google.protobuf.Descriptors.FileDescriptor root) { descriptor = root; return null; } }; com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { }, assigner); } // @@protoc_insertion_point(outer_class_scope) }
2,927
22,688
<filename>third_party/can_card_library/hermes_can/include/bcan_lib.h #ifndef _BCAN_LIB_H_ #define _BCAN_LIB_H_ #include <stdlib.h> #include <stdint.h> #include <sys/types.h> #include <sys/ioctl.h> #include "linux/zynq_api.h" #ifdef DEBUG #define BLOG_DBG0(s...) syslog(LOG_DEBUG, s); #else #define BLOG_DBG0(s...) do{}while(0); #endif #define BLOG_ERR(s...) syslog(LOG_ERR, s); typedef uint64_t bcan_hdl_t; #define BCAN_MAX_TX_MSG 256 #define BCAN_MAX_RX_MSG 256 typedef struct bcan_ihdl { int dev_index; int dev_state; int fd; uint32_t baudrate; uint32_t tx_to; uint32_t rx_to; } bcan_ihdl_t; // Channel states #define BCAN_DEV_UNINIT -1 #define BCAN_DEV_OPEN (1 << 0) #define BCAN_DEV_CLOSE (1 << 1) #define BCAN_DEV_BAUD_SET (1 << 2) #define BCAN_DEV_NORMAL (1 << 3) #define BCAN_DEV_LOOPBACK (1 << 4) #define BCAN_DEV_CONFIG (1 << 5) #define BCAN_DEV_START (1 << 6) #define BCAN_DEV_STOP (1 << 7) #define BCAN_DEV_ACTIVE (1 << 8) #define BCAN_DEV_RECVD (1 << 9) #endif /* _BCAN_LIB_H_ */
512
1,738
/* * All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or * its licensors. * * For complete copyright and license terms please see the LICENSE at the root of this * distribution (the "License"). All use of this software is governed by the License, * or, if provided, by the license below or the license accompanying this file. Do not * remove or modify any license notices. This file is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * */ // Original file Copyright Crytek GMBH or its affiliates, used under license. // Description : Occlusion buffer #include "StdAfx.h" #include "CZBufferCuller.h" SHWOccZBuffer HWZBuffer; void CZBufferCuller::BeginFrame(const SRenderingPassInfo& passInfo) { if (!GetCVars()->e_CoverageBuffer) { return; } const CCamera& rCam = passInfo.GetCamera(); m_AccurateTest = GetCVars()->e_CoverageBufferAccurateOBBTest; m_Treshold = GetCVars()->e_CoverageBufferTolerance; FUNCTION_PROFILER_3DENGINE; if (GetCVars()->e_CoverageBufferDebugFreeze || GetCVars()->e_CameraFreeze) { return; } m_ObjectsTested = m_ObjectsTestedAndRejected = 0; //to enable statistics m_Camera = rCam; m_Position = rCam.GetPosition(); uint32 oldSizeX = m_SizeX; uint32 oldSizeY = m_SizeY; const uint32 sizeX = min(max(1, GetCVars()->e_CoverageBufferResolution), 1024); const uint32 sizeY = sizeX; m_SizeX = sizeX; m_SizeY = sizeY; m_fSizeX = static_cast<f32>(sizeX); m_fSizeY = static_cast<f32>(sizeY); m_fSizeZ = static_cast<f32>(TZB_MAXDEPTH); if (oldSizeX != sizeX) { CryModuleMemalignFree(m_ZBuffer); //64-byte buffer to avoid memory page issues when vector loading m_ZBuffer = (TZBZexel*)CryModuleMemalign((sizeof(TZBZexel) * sizeX * sizeY) + 64, 128); } m_MatViewProj.Transpose(); m_RotationSafe = GetCVars()->e_CoverageBufferRotationSafeCheck; m_DebugFreez = GetCVars()->e_CoverageBufferDebugFreeze != 0; } void CZBufferCuller::ReloadBuffer(const uint32 BufferID) { if (m_DebugFreez) { return; } m_Bias = BufferID == 0 ? static_cast<int32>(GetCVars()->e_CoverageBufferBias) : 0; } CZBufferCuller::CZBufferCuller() : m_OutdoorVisible(1) , m_MatViewProj(IDENTITY) { m_SizeX = m_SizeY = min(max(1, GetCVars()->e_CoverageBufferResolution), 1024); m_ZBuffer = (TZBZexel*)CryModuleMemalign((sizeof(TZBZexel) * m_SizeX * m_SizeY) + 64, 128); m_ObjectsTested = m_ObjectsTestedAndRejected = 0; } bool CZBufferCuller::IsBoxVisible(const AABB& objBox, uint32* const __restrict pResDest) { FUNCTION_PROFILER_3DENGINE; m_ObjectsTested++; Vec4 Verts[8] = { m_MatViewProj* Vec4(objBox.min.x, objBox.min.y, objBox.min.z, 1.f),//0 m_MatViewProj * Vec4(objBox.min.x, objBox.max.y, objBox.min.z, 1.f),//1 m_MatViewProj * Vec4(objBox.max.x, objBox.min.y, objBox.min.z, 1.f),//2 m_MatViewProj * Vec4(objBox.max.x, objBox.max.y, objBox.min.z, 1.f),//3 m_MatViewProj * Vec4(objBox.min.x, objBox.min.y, objBox.max.z, 1.f),//4 m_MatViewProj * Vec4(objBox.min.x, objBox.max.y, objBox.max.z, 1.f),//5 m_MatViewProj * Vec4(objBox.max.x, objBox.min.y, objBox.max.z, 1.f),//6 m_MatViewProj * Vec4(objBox.max.x, objBox.max.y, objBox.max.z, 1.f)//7 }; bool CutNearPlane = Verts[0].w <= 0.f; CutNearPlane |= Verts[1].w <= 0.f; CutNearPlane |= Verts[2].w <= 0.f; CutNearPlane |= Verts[3].w <= 0.f; CutNearPlane |= Verts[4].w <= 0.f; CutNearPlane |= Verts[5].w <= 0.f; CutNearPlane |= Verts[6].w <= 0.f; CutNearPlane |= Verts[7].w <= 0.f; if (CutNearPlane) { return true; } IF (m_RotationSafe == 1, 0) { return Rasterize<1>(Verts, 8); } IF (m_RotationSafe == 2, 1) { return Rasterize<2>(Verts, 8); } return Rasterize<0>(Verts, 8); ++m_ObjectsTestedAndRejected; return false; } static int sh = 8; void CZBufferCuller::DrawDebug(int32 nStep) { // project buffer to the screen nStep %= 32; if (!nStep) { return; } const CCamera& rCam = GetCamera(); float farPlane = rCam.GetFarPlane(); float nearPlane = rCam.GetNearPlane(); float a = farPlane / (farPlane - nearPlane); float b = farPlane * nearPlane / (nearPlane - farPlane); const float scale = 10.0f; TransformationMatrices backupSceneMatrices; m_pRenderer->Set2DMode(m_SizeX, m_SizeY, backupSceneMatrices); SAuxGeomRenderFlags Flags = e_Def3DPublicRenderflags; Flags.SetDepthWriteFlag(e_DepthWriteOff); Flags.SetAlphaBlendMode(e_AlphaBlended); m_pRenderer->GetIRenderAuxGeom()->SetRenderFlags(Flags); Vec3 vSize(.4f, .4f, .4f); if (nStep == 1) { vSize = Vec3(.5f, .5f, .5f); } for (uint32 y = 0; y < m_SizeY; y += nStep) { for (uint32 x = 0; x < m_SizeX; x += nStep) { Vec3 vPos((float)x, (float)(m_SizeY - y - 1), 0); vPos += Vec3(0.5f, -0.5f, 0); const uint32 Value = m_ZBuffer[x + y * m_SizeX]; //Value>>=sh; float w = Value / (65535.0f); float z = b / (w - a); uint32 ValueC = 255u - min(255u, (uint32)(z * scale)); ColorB col; col = ColorB(ValueC, ValueC, ValueC, 200); if (Value != 0xffff) { //ColorB col((Value&31)<<3,((Value>>5)&31)<<3,((Value>>10)&63)<<2,200); GetRenderer()->GetIRenderAuxGeom()->DrawAABB(AABB(vPos - vSize, vPos + vSize), nStep <= 2, col, eBBD_Faceted); } } } //m_pRenderer->GetIRenderAuxGeom()->Flush(); m_pRenderer->Unset2DMode(backupSceneMatrices); } void CZBufferCuller::GetMemoryUsage(ICrySizer* pSizer) const { SIZER_COMPONENT_NAME(pSizer, "CoverageBuffer"); pSizer->AddObject(m_ZBuffer, sizeof(TZBZexel) * m_SizeX * m_SizeY); }
3,018
5,169
<gh_stars>1000+ { "name": "UIImageViewAligned", "version": "0.0.1", "summary": "A UIImageView subclass which allows you to align the image left/right/top/bottom, even when contentMode is AspectFit.", "description": "It is a subclass of UIImageView that allows you to customize the alignment of the displayed image inside the view's frame. This works even if the contentMode is set to AspectFit, AspectFill or ScaleToFill.", "homepage": "https://github.com/reydanro/UIImageViewAligned", "license": { "type": "MIT", "file": "LICENSE" }, "authors": "<NAME>", "social_media_url": "http://www.mindtreatstudios.com/", "platforms": { "ios": null }, "source": { "git": "https://github.com/reydanro/UIImageViewAligned.git", "tag": "pod-0.0.1" }, "source_files": "UIImageViewAligned", "frameworks": "UIKit" }
303
698
<reponame>siddheshmhatre/high_performance_python import numpy import _util def norm_square_numpy(vector): return numpy.sum(vector * vector) def run_experiment(size, num_iter=3): vector = numpy.arange(size) return _util.run(norm_square_numpy, vector, num_iter) if __name__ == "__main__": print run_experiment(1000000, 10)
132
1,399
package com.codemonkeylabs.fpslibrary.sample.UI; import android.support.v7.widget.RecyclerView; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import com.codemonkeylabs.fpslibrary.sample.R; import javax.inject.Inject; /** * Created by brianplummer on 8/30/15. */ public class FPSSampleAdpater extends RecyclerView.Adapter<FPSSampleViewHolder> { private float megaBytes = 1; @Inject public FPSSampleAdpater() { } @Override public FPSSampleViewHolder onCreateViewHolder(ViewGroup parent, int viewType) { View itemView = LayoutInflater.from( parent.getContext()).inflate(R.layout.sample_item, parent, false); return new FPSSampleViewHolder(itemView); } @Override public void onBindViewHolder(FPSSampleViewHolder holder, int position) { holder.onBind(position, megaBytes); } @Override public int getItemCount() { return 255; } public void setMegaBytes(float megaBytes) { this.megaBytes = megaBytes; } }
421
2,003
// Copyright (c) 2018, Baidu.com, Inc. All Rights Reserved // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #pragma once #include "memtable.h" #include "memtable_on_leveldb.h" namespace leveldb { class ShardedMemTable : public MemTable { public: // For Base MemTble ShardedMemTable(const InternalKeyComparator &cmp, CompactStrategyFactory *compact_strategy_factory, int32_t shard_num); // For MemTable on LevelDB ShardedMemTable(const std::string &dbname, const InternalKeyComparator &cmp, CompactStrategyFactory *compact_strategy_factory, size_t write_buffer_size, size_t block_size, Logger *info_log, int32_t shard_num); virtual void Ref() override { ++refs_; } virtual void Unref() override { --refs_; assert(refs_ >= 0); if (refs_ <= 0) { delete this; } } virtual ~ShardedMemTable() override; virtual size_t ApproximateMemoryUsage() override; virtual Iterator *NewIterator() override; virtual void Add(SequenceNumber number, ValueType type, const Slice &slice, const Slice &slice1) override; virtual bool Get(const LookupKey &key, std::string *value, const std::map<uint64_t, uint64_t> &rollbacks, Status *s) override; virtual SequenceNumber GetLastSequence() const override { return last_seq_; } virtual bool Empty() override { return empty_; } virtual void SetNonEmpty() override { empty_ = false; } virtual bool BeingFlushed() override { return being_flushed_; } virtual void SetBeingFlushed(bool flag) override { assert(flag != being_flushed_); being_flushed_ = flag; } virtual uint64_t GetSnapshot(uint64_t last_sequence) override; virtual void ReleaseSnapshot(uint64_t sequence_number) override; private: InternalKeyComparator comparator_; std::vector<MemTable *>::iterator current_memtable_; std::vector<MemTable *> sharded_memtable_; SequenceNumber last_seq_{0}; int refs_{0}; bool being_flushed_{false}; bool empty_{true}; }; }
738
333
<gh_stars>100-1000 /** * Copyright 2016 Red Hat, Inc. * * Red Hat licenses this file to you under the Apache License, version * 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. */ package io.fabric8.maven.enricher.standard; import io.fabric8.kubernetes.api.builder.TypedVisitor; import io.fabric8.kubernetes.api.model.KubernetesListBuilder; import io.fabric8.maven.core.config.PlatformMode; import io.fabric8.maven.core.util.Configs; import io.fabric8.maven.enricher.api.BaseEnricher; import io.fabric8.maven.enricher.api.MavenEnricherContext; import io.fabric8.openshift.api.model.DeploymentConfigBuilder; /** * This enricher adds the 'revisionHistoryLimit' property to deployment spec of RCs / RSs for KuberNetes/OpenShift resource descriptors. * This property determines number of previous ReplicaControlller to retain in history in order to rollback previous one. */ public class RevisionHistoryEnricher extends BaseEnricher { public static final String DEFAULT_NAME = "fmp-revision-history"; private static final String DEFAULT_NUMBER_OF_REVISIONS = "2"; // config keys enum Config implements Configs.Key { limit {{ d = DEFAULT_NUMBER_OF_REVISIONS; }}; protected String d; public String def() { return d; } } public RevisionHistoryEnricher(MavenEnricherContext buildContext) { super(buildContext, DEFAULT_NAME); } @Override public void create(PlatformMode platformMode, KubernetesListBuilder builder) { final Integer maxRevisionHistories = Configs.asInt(getConfig(Config.limit)); log.info("Adding revision history limit to %s", maxRevisionHistories); if(platformMode == PlatformMode.kubernetes) { builder.accept(new TypedVisitor<io.fabric8.kubernetes.api.model.apps.DeploymentBuilder>() { @Override public void visit(io.fabric8.kubernetes.api.model.apps.DeploymentBuilder item) { item.editOrNewSpec() .withRevisionHistoryLimit(maxRevisionHistories) .endSpec(); } }); builder.accept(new TypedVisitor<io.fabric8.kubernetes.api.model.extensions.DeploymentBuilder>() { @Override public void visit(io.fabric8.kubernetes.api.model.extensions.DeploymentBuilder item) { item.editOrNewSpec() .withRevisionHistoryLimit(maxRevisionHistories) .endSpec(); } }); } else { builder.accept(new TypedVisitor<DeploymentConfigBuilder>() { @Override public void visit(DeploymentConfigBuilder item) { item.editOrNewSpec() .withRevisionHistoryLimit(maxRevisionHistories) .endSpec(); } }); } } }
1,396
816
package com.jxtech.integration.services; import java.util.HashMap; import java.util.List; import java.util.Map; import net.sf.json.JSONObject; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.jxtech.integration.jsonvo.IntergrationVo; import com.jxtech.integration.jsonvo.JboVo; import com.jxtech.jbo.App; import com.jxtech.jbo.JboIFace; import com.jxtech.jbo.JboSetIFace; import com.jxtech.jbo.auth.JxSession; import com.jxtech.jbo.util.JboUtil; import com.jxtech.jbo.util.JxConstant; import com.jxtech.jbo.util.JxException; import com.jxtech.util.StrUtil; /** * * @author <EMAIL> * @date 2015.11 * */ public class IntergrationImpl implements Intergration { private static Logger LOG = LoggerFactory.getLogger(IntergrationImpl.class); @SuppressWarnings("rawtypes") private static Map<String, Class> CLASS_MAP = new HashMap<String, Class>(); static { CLASS_MAP.put("_jbos", JboVo.class); CLASS_MAP.put("_datas", HashMap.class); CLASS_MAP.put("_childrens", JboVo.class); } /** * 将传入的JSON数据持久化 * * @param jsonData * @return * @throws JxException */ public boolean execute(String jsonData) throws JxException { if (StrUtil.isNull(jsonData)) { return false; } try { JSONObject jsonObject = JSONObject.fromObject(jsonData); LOG.debug("json:\r\n" + jsonData); IntergrationVo ivo = (IntergrationVo) JSONObject.toBean(jsonObject, IntergrationVo.class, CLASS_MAP); if (null != ivo) { // 首先获取数据中的jboname,如果jboname为空则根据appnametype获取对应的app,然后获取app的jboname String jboName = ivo.get_jboname(); if (StrUtil.isNull(jboName)) { String appNameType = ivo.get_appNameType(); if (!StrUtil.isNull(appNameType)) { App app = JxSession.getApp(appNameType); if (null != app) { jboName = app.getJboset().getAppname(); } } } if (!StrUtil.isNull(jboName)) { JboSetIFace jboSet = JboUtil.getJboSet(jboName);// 主表的JboSet List<JboVo> vos = ivo.get_jbos();// 得到主对象的Jbo列表 int size = vos.size(); for (int i = 0; i < size; i++) { JboVo jboVo = vos.get(i);// 主对象JboVo JboIFace jbo = convertJboFromVo(jboVo, jboSet);// 得到主对象的Jbo if (null != jbo) { List<JboVo> children = jboVo.get_childrens();// 得到子Jbo if (null != children && !children.isEmpty()) { int csize = children.size(); for (int j = 0; j < csize; j++) { handleChildJboVo(jbo, children.get(j));// 处理子Jbo } } } } jboSet.commit(); } } } catch (Exception e) { LOG.error(e.getMessage(),e); return false; } return true; } /** * 处理主对象的Jbo,没有联系名的 * * @param jboVo * @param jboSet * @return * @throws JxException */ private JboIFace convertJboFromVo(JboVo jboVo, JboSetIFace jboSet) throws JxException { if (jboVo == null || jboSet == null) { return null; } JboIFace jbo = null; String action = jboVo.get_action(); if ("C".equalsIgnoreCase(action)) { jbo = jboSet.add(); jbo.getData().putAll(jboVo.get_datas()); // jboVo.set_uid(jbo.getUidValue()); } else if (!StrUtil.isNull(jboVo.get_uid())) { jbo = jboSet.queryJbo(jboVo.get_uid()); if (jbo != null) { if ("U".equalsIgnoreCase(action) && !jboVo.get_datas().isEmpty()) { modifyJboValue(jbo, jboVo); jbo.setModify(true); } else if ("D".equalsIgnoreCase(action)) { jbo.delete(); } } else { LOG.debug("没有找到对应的记录:" + jboSet.getJboname() + "=" + jboVo.get_uid()); } } else { LOG.debug("未知操作或uid为空,只允许C、U、D操作。action=" + action + ",uid=" + jboVo.get_uid()); } return jbo; } /** * 处理子Jbo * * @param pJbo * @param childJboVo * @return * @throws JxException */ private JboIFace convertChildJboFromVo(JboIFace pJbo, JboVo childJboVo) throws JxException { if (null == pJbo && null == childJboVo) { LOG.debug("jbo or childJboVo is null."); return null; } String relationshipname = childJboVo.get_relationshipname(); if (StrUtil.isNull(relationshipname)) { LOG.debug("Relationship is null."); return null; } String childJboAction = childJboVo.get_action(); if (StrUtil.isNull(childJboAction)) { LOG.debug("子对象没有配置Action"); return null; } JboSetIFace jboSet = pJbo.getChildrenJboSet(relationshipname); if (null == jboSet) { LOG.debug("没有找到相应的联系:jboname=" + pJbo.getJboName() + ",relationshipname=" + relationshipname); return null; } JboIFace childJbo = null; // 当父JBO为新增的时候,子jbo只能是新增的数据啊。没有其他操作 if (pJbo.isToBeAdd()) { if ("C".equalsIgnoreCase(childJboAction)) { childJbo = jboSet.add(); childJbo.getData().putAll(childJboVo.get_datas()); // childJboVo.set_uid(childJbo.getUidValue());//回填 } else { LOG.debug("Action is wrong." + childJboAction); } } else if (pJbo.isToBeDel()) { childJbo = jboSet.queryJbo(childJboVo.get_uid()); if (null != childJbo) { childJbo.delete(); } } else { if ("C".equalsIgnoreCase(childJboAction)) { // 创建记录Create childJbo = jboSet.add(); childJbo.getData().putAll(childJboVo.get_datas()); // childJboVo.set_uid(childJbo.getUidValue()); } else if ("U".equalsIgnoreCase(childJboAction)) { // 更新记录Update childJbo = jboSet.queryJbo(childJboVo.get_uid()); if (null != childJbo) { modifyJboValue(childJbo, childJboVo); childJbo.setModify(true); } } else if ("D".equalsIgnoreCase(childJboAction)) { // 删除记录 Delete childJbo = jboSet.queryJbo(childJboVo.get_uid()); if (null != childJbo) { childJbo.delete(); } } else { LOG.debug("未知操作:Action=" + childJboAction); } } if (childJbo != null) { pJbo.addNeedSaveList(childJbo); jboSet.setJbolist(null); pJbo.setChangedChildren(null); pJbo.setChildren(null); } return childJbo; } /** * 处理单个jbovo对象,执行crud操作 * * @param pJbo 主对象Jbo * @param childJboVo 子对象jbovo * @throws JxException */ private void handleChildJboVo(JboIFace pJbo, JboVo childJboVo) throws JxException { JboIFace childJbo = convertChildJboFromVo(pJbo, childJboVo);// 将ChildJboVo转换为ChildJbo // 递归处理 List<JboVo> childrens = childJboVo.get_childrens(); if (null != childJbo && null != childrens) { for (JboVo ccJbovo : childrens) { handleChildJboVo(childJbo, ccJbovo); } } } /** * 把 jboVo 中的值,赋值给Jbo * * @param jbo * @param jboVo * @throws JxException */ private void modifyJboValue(JboIFace jbo, JboVo jboVo) throws JxException { if (jbo == null || jboVo == null) { return; } Map<String, Object> voDatas = jboVo.get_datas(); for (Map.Entry<String, Object> entry : voDatas.entrySet()) { String key = entry.getKey(); jbo.setObject(key, entry.getValue(), JxConstant.SET_VALUE_NONE); } } }
4,981
9,782
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.facebook.presto.verifier.checksum; import com.facebook.presto.common.type.ArrayType; import com.facebook.presto.common.type.RowType; import com.facebook.presto.common.type.Type; import com.facebook.presto.sql.tree.CoalesceExpression; import com.facebook.presto.sql.tree.Expression; import com.facebook.presto.sql.tree.FunctionCall; import com.facebook.presto.sql.tree.LongLiteral; import com.facebook.presto.sql.tree.QualifiedName; import com.facebook.presto.sql.tree.SingleColumn; import com.facebook.presto.sql.tree.TryExpression; import com.facebook.presto.verifier.framework.Column; import com.google.common.collect.ImmutableList; import java.util.List; import java.util.Objects; import java.util.Optional; import static com.facebook.presto.sql.QueryUtil.functionCall; import static com.facebook.presto.verifier.framework.VerifierUtil.delimitedIdentifier; import static com.google.common.base.Preconditions.checkArgument; public class ArrayColumnValidator implements ColumnValidator { @Override public List<SingleColumn> generateChecksumColumns(Column column) { Expression checksum = generateArrayChecksum(column.getExpression(), column.getType()); Expression arrayCardinalityChecksum = functionCall("checksum", functionCall("cardinality", column.getExpression())); Expression arrayCardinalitySum = new CoalesceExpression( functionCall("sum", functionCall("cardinality", column.getExpression())), new LongLiteral("0")); return ImmutableList.of( new SingleColumn(checksum, Optional.of(delimitedIdentifier(getChecksumColumnAlias(column)))), new SingleColumn(arrayCardinalityChecksum, Optional.of(delimitedIdentifier(getCardinalityChecksumColumnAlias(column)))), new SingleColumn(arrayCardinalitySum, Optional.of(delimitedIdentifier(getCardinalitySumColumnAlias(column))))); } @Override public List<ColumnMatchResult<ArrayColumnChecksum>> validate(Column column, ChecksumResult controlResult, ChecksumResult testResult) { ArrayColumnChecksum controlChecksum = toColumnChecksum(column, controlResult); ArrayColumnChecksum testChecksum = toColumnChecksum(column, testResult); return ImmutableList.of(new ColumnMatchResult<>(Objects.equals(controlChecksum, testChecksum), column, controlChecksum, testChecksum)); } public static Expression generateArrayChecksum(Expression column, Type type) { checkArgument(type instanceof ArrayType, "Expect ArrayType, found %s", type.getDisplayName()); Type elementType = ((ArrayType) type).getElementType(); if (elementType.isOrderable()) { FunctionCall arraySort = new FunctionCall(QualifiedName.of("array_sort"), ImmutableList.of(column)); if (elementType instanceof ArrayType || elementType instanceof RowType) { return new CoalesceExpression( new FunctionCall(QualifiedName.of("checksum"), ImmutableList.of(new TryExpression(arraySort))), new FunctionCall(QualifiedName.of("checksum"), ImmutableList.of(column))); } return new FunctionCall(QualifiedName.of("checksum"), ImmutableList.of(arraySort)); } return new FunctionCall(QualifiedName.of("checksum"), ImmutableList.of(column)); } private static ArrayColumnChecksum toColumnChecksum(Column column, ChecksumResult checksumResult) { return new ArrayColumnChecksum( checksumResult.getChecksum(getChecksumColumnAlias(column)), checksumResult.getChecksum(getCardinalityChecksumColumnAlias(column)), (long) checksumResult.getChecksum(getCardinalitySumColumnAlias(column))); } private static String getChecksumColumnAlias(Column column) { return column.getName() + "$checksum"; } private static String getCardinalityChecksumColumnAlias(Column column) { return column.getName() + "$cardinality_checksum"; } private static String getCardinalitySumColumnAlias(Column column) { return column.getName() + "$cardinality_sum"; } }
1,674
435
<filename>pycon-india-2012/videos/rewriting-the-wayback-machine-s-live-web-proxy-in-python-noufal-ibrahim-anand-chitipothu.json { "copyright_text": null, "description": "The wayback machine is a high traffic website that has been online for over a decade. It was a mostly Java application. One component of the application is the Liveweb proxy. This is an HTTP proxy that archives a resource which is requested through it and the core data source for the wayback machine.\nThe liveweb proxy was rearchitected from scratch in Python and deployed on the actual website and has been running for a few months now without a single hitch. There were limitations in the standard library which needed to be worked around, careful tuning of parameters to balance disk I/O and memory usage, fine details of the HTTP protocol that needed to be understood and respected.\nThis talk discusses the architecture and design of the new system to handle the kind of traffic and patterns which are expected of an archiving proxy and how it was deployed.", "duration": 2663, "language": "eng", "recorded": "2012-09-29", "related_urls": [ { "label": "Conference schedule", "url": "https://in.pycon.org/2012/index.html#schedule" }, { "label": "Session Information", "url": "https://in.pycon.org/2012/funnel/pyconindia2012/19-rewriting-the-wayback-machines-live-web-proxy-in-python/" }, { "label": "Liveweb Proxy Rearchitected in Python", "url": "https://github.com/internetarchive/liveweb" } ], "speakers": [ "<NAME>", "<NAME>" ], "tags": [ "wayback machine", "liveweb", "http" ], "thumbnail_url": "https://i.ytimg.com/vi/pDlOpMDwb-Q/maxresdefault.jpg", "title": "Rewriting the Wayback machine's live web proxy in Python", "videos": [ { "type": "youtube", "url": "https://www.youtube.com/watch?v=pDlOpMDwb-Q" } ] }
644
350
import json import click import pandas as pd def dump_filename(filename, subdomain_df): """Output to CSV, JSON or Clipboard.""" filename = filename.lower() if filename.endswith(".json"): click.secho(f"\n[+] Contents dumped into JSON file: {filename}", bold=True) with open(filename, "w+") as file_object: file_object.write(json.dumps(subdomain_df.to_dict(orient="list"))) elif filename.endswith(".csv"): click.secho(f"\n[+] Contents dumped into CSV file: {filename}", bold=True) subdomain_df.to_csv(filename, index=False) elif filename == "cb": click.secho(f"\n[+] Contents dumped into clipboard.", bold=True) subdomain_df.to_clipboard(index=False) elif filename.endswith(".txt"): melted_df = pd.melt(subdomain_df).value.tolist() subdomains = [subdomain for subdomain in melted_df if str(subdomain) != "nan"] with open(filename, "w") as file_object: for subdomain in subdomains: file_object.write(f"{subdomain}\n") click.secho(f"\n[+] Contents dumped into a text file: {filename}", bold=True) else: click.secho("\n[!] Extension not recognized, dumping using CSV format.", bold=True) subdomain_df.to_csv(filename, index=False)
537
4,016
<filename>examples/new_candidates-nosec.py import xml import yaml def subprocess_shell_cmd(): # sample function with known subprocess shell cmd candidates # candidate #2 subprocess.Popen('/bin/ls *', shell=True) # nosec def yaml_load(): # sample function with known yaml.load candidates temp_str = yaml.dump({'a': '1', 'b': '2'}) # candidate #4 y = yaml.load(temp_str) # nosec def xml_sax_make_parser(): # sample function with known xml.sax.make_parser candidates # candidate #6 xml.sax.make_parser() # nosec
207
854
<reponame>rakhi2001/ecom7 __________________________________________________________________________________________________ sample 24 ms submission class Solution: import math def uniquePaths(self, m: int, n: int) -> int: ans = math.factorial(m+n-2)//(math.factorial(n-1)*math.factorial(m-1)) return ans __________________________________________________________________________________________________ sample 12948 kb submission class Solution: def uniquePaths(self, m: int, n: int) -> int: grid = [[1 for x in range(m)] for x in range(n)] for i in range(1, m): for j in range(1, n): grid[j][i] = grid[j - 1][i] + grid[j][i -1] return grid[-1][-1] __________________________________________________________________________________________________
293
2,023
<reponame>tdiprima/code<filename>recipes/Python/440626_Copying_Pasting_Directories/recipe-440626.py '''cap_module.py The purpose of this module is to provide functions for copying and pasting directories and files. This is a level 1 module.''' #========================= # Level 1 Functions: Files #========================= def copy_file(path): '''copy_file(string) Import the needed functions. Assert that the path is a file. Return all file data.''' from os.path import basename, isfile assert isfile(path) return (basename(path), file(path, 'rb', 0).read()) def paste_file(file_object, path): '''paste_file(tuple, string) Import needed functions. Assert that the path is a directory. Create all file data.''' from os.path import isdir, join assert isdir(path) file(join(path, file_object[0]), 'wb', 0).write(file_object[1]) #=============================== # Level 2 Functions: Directories #=============================== def copy_dir(path): '''copy_dir(string) Import needed functions. Assert that path is a directory. Setup a storage area. Write all data to the storage area. Return the storage area.''' from os import listdir from os.path import basename, isdir, isfile, join assert isdir(path) dir = (basename(path), list()) for name in listdir(path): next_path = join(path, name) if isdir(next_path): dir[1].append(copy_dir(next_path)) elif isfile(next_path): dir[1].append(copy_file(next_path)) return dir def paste_dir(dir_object, path): '''paste_dir(tuple, string) Import needed functions. Assert that the path is a directory. Edit the path and create a directory as needed. Create all directories and files as needed.''' from os import mkdir from os.path import isdir, join assert isdir(path) if dir_object[0] is not '': path = join(path, dir_object[0]) mkdir(path) for object in dir_object[1]: if type(object[1]) is list: paste_dir(object, path) else: paste_file(object, path) #================ # CGI: Print File #================ if __name__ == '__main__': from sys import argv print 'Content-type: text/plain' print print file(argv[0]).read()
877
351
<reponame>soyuka/botty<filename>test/item/item_finder_test.py import pytest import numpy as np import cv2 from logger import Logger from item.item_finder import ItemFinder, Item from config import Config, ItemProps class TestItemFinder: def setup_method(self): Logger.init() Logger.remove_file_logger() Config().items["misc_flawless_amethyst"] = ItemProps(pickit_type=1) Config().items["uniq_armor_ormus_robes"] = ItemProps(pickit_type=1) Config().items["rune_26_vex"] = ItemProps(pickit_type=1) Config().items["misc_super_healing_potion"] = ItemProps(pickit_type=1) Config().items["magic_small_charm"] = ItemProps(pickit_type=1) Config().items["rare_stag_bow"] = ItemProps(pickit_type=1) self.item_finder = ItemFinder() @pytest.mark.parametrize("img_path, expected", [ ("test/assets/item_finder.png", 6), ]) def test_search(self, img_path: str, expected: int): inp_img = cv2.imread(img_path) item_list = self.item_finder.search(inp_img) assert(len(item_list) == expected)
470
190,993
<gh_stars>1000+ /* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/cc/experimental/libtf/module.h" #include <string> #include "tensorflow/cc/experimental/libtf/runtime/core/core.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/resource_loader.h" #include "tensorflow/core/platform/status_matchers.h" #include "tensorflow/core/platform/statusor.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/protobuf/error_codes.pb.h" #include "tensorflow/core/protobuf/saved_object_graph.pb.h" namespace tf { namespace libtf { namespace impl { using ::tensorflow::libexport::TFPackage; using ::tensorflow::testing::StatusIs; using ::tf::libtf::runtime::Runtime; TEST(ModuleTest, TestStubbedFunctions) { Runtime runtime = runtime::core::Runtime(); TFPackage tf_package; tensorflow::StatusOr<Handle> result = BuildProgram(runtime, tf_package); ASSERT_FALSE(result.status().ok()); } TEST(ModuleTest, TestBuildObjectsDataStructures) { const std::string path = tensorflow::GetDataDependencyFilepath( "tensorflow/cc/experimental/libtf/tests/testdata/data-structure-model"); TF_ASSERT_OK_AND_ASSIGN(TFPackage tf_package, TFPackage::Load(path)); TF_ASSERT_OK_AND_ASSIGN(std::vector<Handle> objects, BuildObjects(tf_package)); EXPECT_EQ(objects.size(), 7); // The first node of data-structure-model is a dictionary. TF_ASSERT_OK_AND_ASSIGN(tf::libtf::Dictionary node, Cast<tf::libtf::Dictionary>(objects.front())); // The next three nodes of data-structure-model are lists. for (unsigned int i = 1; i < 4; i++) { TF_ASSERT_OK_AND_ASSIGN(tf::libtf::List node, Cast<tf::libtf::List>(objects.at(i))); } // The last three nodes of data-structure-model are dictionaries. for (unsigned int i = 4; i < 7; i++) { TF_ASSERT_OK_AND_ASSIGN(tf::libtf::Dictionary node, Cast<tf::libtf::Dictionary>(objects.at(i))); } } TEST(ModuleTest, TestBuildEmptyList) { tensorflow::SavedObject saved_object_proto; const std::string pb_txt = R"pb( user_object { identifier: "trackable_list_wrapper" version { producer: 1 min_consumer: 1 } } )pb"; ASSERT_TRUE(::tensorflow::protobuf::TextFormat::ParseFromString( pb_txt, &saved_object_proto)); TF_ASSERT_OK_AND_ASSIGN(Handle result, BuildSavedUserObject(saved_object_proto)); EXPECT_EQ(Cast<tf::libtf::List>(result)->size(), 0); } TEST(ModuleTest, TestBuildEmptyDict) { tensorflow::SavedObject saved_object_proto; const std::string pb_txt = R"pb( user_object { identifier: "trackable_dict_wrapper" version { producer: 1 min_consumer: 1 } } )pb"; ASSERT_TRUE(::tensorflow::protobuf::TextFormat::ParseFromString( pb_txt, &saved_object_proto)); TF_ASSERT_OK_AND_ASSIGN(Handle result, BuildSavedUserObject(saved_object_proto)); EXPECT_EQ(Cast<tf::libtf::Dictionary>(result)->size(), 0); } TEST(ModuleTest, TestBuildSignatureMap) { tensorflow::SavedObject saved_object_proto; const std::string pb_txt = R"pb( user_object { identifier: "signature_map" version { producer: 1 min_consumer: 1 } } )pb"; ASSERT_TRUE(::tensorflow::protobuf::TextFormat::ParseFromString( pb_txt, &saved_object_proto)); TF_ASSERT_OK_AND_ASSIGN(Handle result, BuildSavedUserObject(saved_object_proto)); EXPECT_EQ(Cast<tf::libtf::Dictionary>(result)->size(), 0); } TEST(ModuleTest, TestUnimplementedUserObject) { tensorflow::SavedObject saved_object_proto; const std::string pb_txt = R"pb( user_object { identifier: "foo" version { producer: 1 min_consumer: 1 } } )pb"; ASSERT_TRUE(::tensorflow::protobuf::TextFormat::ParseFromString( pb_txt, &saved_object_proto)); EXPECT_THAT( BuildSavedUserObject(saved_object_proto), StatusIs(tensorflow::error::UNIMPLEMENTED, ::testing::HasSubstr("foo"))); } } // namespace impl } // namespace libtf } // namespace tf
1,842